src/share/vm/runtime/sweeper.cpp

Print this page




 123     _sweep_index = (_sweep_index + 1) % SweeperLogEntries;
 124   }
 125 }
 126 #else
 127 #define SWEEP(nm)
 128 #endif
 129 
 130 
 131 long      NMethodSweeper::_traversals = 0;   // No. of stack traversals performed
 132 nmethod*  NMethodSweeper::_current = NULL;   // Current nmethod
 133 int       NMethodSweeper::_seen = 0 ;        // No. of nmethods we have currently processed in current pass of CodeCache
 134 int       NMethodSweeper::_flushed_count = 0;   // Nof. nmethods flushed in current sweep
 135 int       NMethodSweeper::_zombified_count = 0; // Nof. nmethods made zombie in current sweep
 136 int       NMethodSweeper::_marked_count = 0;    // Nof. nmethods marked for reclaim in current sweep
 137 
 138 volatile int NMethodSweeper::_invocations = 0;   // No. of invocations left until we are completed with this pass
 139 volatile int NMethodSweeper::_sweep_started = 0; // Whether a sweep is in progress.
 140 
 141 jint      NMethodSweeper::_locked_seen = 0;
 142 jint      NMethodSweeper::_not_entrant_seen_on_stack = 0;
 143 bool      NMethodSweeper::_resweep = false;
 144 jint      NMethodSweeper::_flush_token = 0;
 145 jlong     NMethodSweeper::_last_full_flush_time = 0;
 146 int       NMethodSweeper::_highest_marked = 0;
 147 int       NMethodSweeper::_dead_compile_ids = 0;
 148 long      NMethodSweeper::_last_flush_traversal_id = 0;
 149 
 150 int       NMethodSweeper::_number_of_flushes = 0; // Total of full traversals caused by full cache
 151 int       NMethodSweeper::_total_nof_methods_reclaimed = 0;
 152 jlong     NMethodSweeper::_total_time_sweeping = 0;
 153 jlong     NMethodSweeper::_total_time_this_sweep = 0;
 154 jlong     NMethodSweeper::_peak_sweep_time = 0;
 155 jlong     NMethodSweeper::_peak_sweep_fraction_time = 0;
 156 jlong     NMethodSweeper::_total_disconnect_time = 0;
 157 jlong     NMethodSweeper::_peak_disconnect_time = 0;



 158 
 159 class MarkActivationClosure: public CodeBlobClosure {
 160 public:
 161   virtual void do_code_blob(CodeBlob* cb) {



 162     // If we see an activation belonging to a non_entrant nmethod, we mark it.
 163     if (cb->is_nmethod() && ((nmethod*)cb)->is_not_entrant()) {
 164       ((nmethod*)cb)->mark_as_seen_on_stack();

 165     }
 166   }
 167 };
 168 static MarkActivationClosure mark_activation_closure;
 169 


















 170 bool NMethodSweeper::sweep_in_progress() {
 171   return (_current != NULL);
 172 }
 173 
 174 void NMethodSweeper::scan_stacks() {



 175   assert(SafepointSynchronize::is_at_safepoint(), "must be executed at a safepoint");
 176   if (!MethodFlushing) return;
 177 
 178   // No need to synchronize access, since this is always executed at a
 179   // safepoint.

 180 
 181   // Make sure CompiledIC_lock in unlocked, since we might update some
 182   // inline caches. If it is, we just bail-out and try later.
 183   if (CompiledIC_lock->is_locked() || Patching_lock->is_locked()) return;
 184 

 185   // Check for restart
 186   assert(CodeCache::find_blob_unsafe(_current) == _current, "Sweeper nmethod cached state invalid");
 187   if (!sweep_in_progress() && _resweep) {
 188     _seen        = 0;
 189     _invocations = NmethodSweepFraction;
 190     _current     = CodeCache::first_nmethod();
 191     _traversals  += 1;
 192     _total_time_this_sweep = 0;
 193 
 194     if (PrintMethodFlushing) {
 195       tty->print_cr("### Sweep: stack traversal %d", _traversals);
 196     }
 197     Threads::nmethods_do(&mark_activation_closure);
 198 
 199     // reset the flags since we started a scan from the beginning.
 200     _resweep = false;
 201     _locked_seen = 0;
 202     _not_entrant_seen_on_stack = 0;



 203   }
 204 
 205   if (UseCodeCacheFlushing) {
 206     // only allow new flushes after the interval is complete.
 207     jlong now           = os::javaTimeMillis();
 208     jlong max_interval  = (jlong)MinCodeCacheFlushingInterval * (jlong)1000;
 209     jlong curr_interval = now - _last_full_flush_time;
 210     if (curr_interval > max_interval) {
 211       _flush_token = 0;
 212     }
 213 
 214     if (!CodeCache::needs_flushing() && !CompileBroker::should_compile_new_jobs()) {
 215       CompileBroker::set_should_compile_new_jobs(CompileBroker::run_compilation);
 216       log_sweep("restart_compiler");
 217     }
 218   }
 219 }
 220 
 221 void NMethodSweeper::possibly_sweep() {
 222   assert(JavaThread::current()->thread_state() == _thread_in_vm, "must run in vm mode");
 223   if (!MethodFlushing || !sweep_in_progress()) return;


 224 
 225   if (_invocations > 0) {
 226     // Only one thread at a time will sweep
 227     jint old = Atomic::cmpxchg( 1, &_sweep_started, 0 );
 228     if (old != 0) {
 229       return;
 230     }
 231 #ifdef ASSERT
 232     if (LogSweeper && _records == NULL) {
 233       // Create the ring buffer for the logging code
 234       _records = NEW_C_HEAP_ARRAY(SweeperRecord, SweeperLogEntries, mtGC);
 235       memset(_records, 0, sizeof(SweeperRecord) * SweeperLogEntries);
 236     }
 237 #endif
 238     if (_invocations > 0) {
 239       sweep_code_cache();
 240       _invocations--;
 241     }
 242     _sweep_started = 0;
 243   }
 244 }
 245 
 246 void NMethodSweeper::sweep_code_cache() {
 247 
 248   jlong sweep_start_counter = os::elapsed_counter();
 249 
 250   _flushed_count   = 0;
 251   _zombified_count = 0;
 252   _marked_count    = 0;
 253 
 254   if (PrintMethodFlushing && Verbose) {
 255     tty->print_cr("### Sweep at %d out of %d. Invocations left: %d", _seen, CodeCache::nof_nmethods(), _invocations);
 256   }
 257 
 258   if (!CompileBroker::should_compile_new_jobs()) {
 259     // If we have turned off compilations we might as well do full sweeps
 260     // in order to reach the clean state faster. Otherwise the sleeping compiler
 261     // threads will slow down sweeping. After a few iterations the cache
 262     // will be clean and sweeping stops (_resweep will not be set)
 263     _invocations = 1;
 264   }
 265 
 266   // We want to visit all nmethods after NmethodSweepFraction
 267   // invocations so divide the remaining number of nmethods by the
 268   // remaining number of invocations.  This is only an estimate since
 269   // the number of nmethods changes during the sweep so the final
 270   // stage must iterate until it there are no more nmethods.
 271   int todo = (CodeCache::nof_nmethods() - _seen) / _invocations;
 272 
 273   assert(!SafepointSynchronize::is_at_safepoint(), "should not be in safepoint when we get here");
 274   assert(!CodeCache_lock->owned_by_self(), "just checking");
 275 
 276   {
 277     MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
 278 
 279     // The last invocation iterates until there are no more nmethods
 280     for (int i = 0; (i < todo || _invocations == 1) && _current != NULL; i++) {
 281       if (SafepointSynchronize::is_synchronizing()) { // Safepoint request
 282         if (PrintMethodFlushing && Verbose) {
 283           tty->print_cr("### Sweep at %d out of %d, invocation: %d, yielding to safepoint", _seen, CodeCache::nof_nmethods(), _invocations);
 284         }
 285         MutexUnlockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
 286 
 287         assert(Thread::current()->is_Java_thread(), "should be java thread");
 288         JavaThread* thread = (JavaThread*)Thread::current();
 289         ThreadBlockInVM tbivm(thread);
 290         thread->java_suspend_self();
 291       }
 292       // Since we will give up the CodeCache_lock, always skip ahead
 293       // to the next nmethod.  Other blobs can be deleted by other
 294       // threads but nmethods are only reclaimed by the sweeper.
 295       nmethod* next = CodeCache::next_nmethod(_current);
 296 
 297       // Now ready to process nmethod and give up CodeCache_lock
 298       {
 299         MutexUnlockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
 300         process_nmethod(_current);
 301       }
 302       _seen++;
 303       _current = next;
 304     }
 305   }
 306 
 307   assert(_invocations > 1 || _current == NULL, "must have scanned the whole cache");
 308 
 309   if (!sweep_in_progress() && !_resweep && (_locked_seen || _not_entrant_seen_on_stack)) {
 310     // we've completed a scan without making progress but there were
 311     // nmethods we were unable to process either because they were
 312     // locked or were still on stack.  We don't have to aggresively
 313     // clean them up so just stop scanning.  We could scan once more
 314     // but that complicates the control logic and it's unlikely to
 315     // matter much.
 316     if (PrintMethodFlushing) {
 317       tty->print_cr("### Couldn't make progress on some nmethods so stopping sweep");
 318     }
 319   }
 320 
 321   jlong sweep_end_counter = os::elapsed_counter();
 322   jlong sweep_time = sweep_end_counter - sweep_start_counter;
 323   _total_time_sweeping  += sweep_time;
 324   _total_time_this_sweep += sweep_time;
 325   _peak_sweep_fraction_time = MAX2(sweep_time, _peak_sweep_fraction_time);
 326   _total_nof_methods_reclaimed += _flushed_count;
 327 
 328   EventSweepCodeCache event(UNTIMED);
 329   if (event.should_commit()) {
 330     event.set_starttime(sweep_start_counter);
 331     event.set_endtime(sweep_end_counter);
 332     event.set_sweepIndex(_traversals);


 375 
 376 void NMethodSweeper::release_nmethod(nmethod *nm) {
 377   // Clean up any CompiledICHolders
 378   {
 379     ResourceMark rm;
 380     MutexLocker ml_patch(CompiledIC_lock);
 381     RelocIterator iter(nm);
 382     while (iter.next()) {
 383       if (iter.type() == relocInfo::virtual_call_type) {
 384         CompiledIC::cleanup_call_site(iter.virtual_call_reloc());
 385       }
 386     }
 387   }
 388 
 389   MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
 390   nm->flush();
 391 }
 392 
 393 void NMethodSweeper::process_nmethod(nmethod *nm) {
 394   assert(!CodeCache_lock->owned_by_self(), "just checking");
 395 
 396   // Make sure this nmethod doesn't get unloaded during the scan,
 397   // since the locks acquired below might safepoint.
 398   NMethodMarker nmm(nm);
 399 
 400   SWEEP(nm);
 401 
 402   // Skip methods that are currently referenced by the VM
 403   if (nm->is_locked_by_vm()) {
 404     // But still remember to clean-up inline caches for alive nmethods
 405     if (nm->is_alive()) {
 406       // Clean-up all inline caches that points to zombie/non-reentrant methods
 407       MutexLocker cl(CompiledIC_lock);
 408       nm->cleanup_inline_caches();
 409       SWEEP(nm);
 410     } else {
 411       _locked_seen++;
 412       SWEEP(nm);
 413     }
 414     return;
 415   }
 416 
 417   if (nm->is_zombie()) {
 418     // If it is first time, we see nmethod then we mark it. Otherwise,
 419     // we reclame it. When we have seen a zombie method twice, we know that
 420     // there are no inline caches that refer to it.
 421     if (nm->is_marked_for_reclamation()) {
 422       assert(!nm->is_locked_by_vm(), "must not flush locked nmethods");
 423       if (PrintMethodFlushing && Verbose) {
 424         tty->print_cr("### Nmethod %3d/" PTR_FORMAT " (marked for reclamation) being flushed", nm->compile_id(), nm);
 425       }
 426       release_nmethod(nm);
 427       _flushed_count++;
 428     } else {
 429       if (PrintMethodFlushing && Verbose) {
 430         tty->print_cr("### Nmethod %3d/" PTR_FORMAT " (zombie) being marked for reclamation", nm->compile_id(), nm);
 431       }
 432       nm->mark_for_reclamation();
 433       _resweep = true;
 434       _marked_count++;
 435       SWEEP(nm);
 436     }
 437   } else if (nm->is_not_entrant()) {
 438     // If there is no current activations of this method on the
 439     // stack we can safely convert it to a zombie method
 440     if (nm->can_not_entrant_be_converted()) {
 441       if (PrintMethodFlushing && Verbose) {
 442         tty->print_cr("### Nmethod %3d/" PTR_FORMAT " (not entrant) being made zombie", nm->compile_id(), nm);
 443       }
 444       nm->make_zombie();
 445       _resweep = true;
 446       _zombified_count++;
 447       SWEEP(nm);
 448     } else {
 449       // Still alive, clean up its inline caches
 450       MutexLocker cl(CompiledIC_lock);
 451       nm->cleanup_inline_caches();
 452       // we coudn't transition this nmethod so don't immediately
 453       // request a rescan.  If this method stays on the stack for a
 454       // long time we don't want to keep rescanning the code cache.
 455       _not_entrant_seen_on_stack++;
 456       SWEEP(nm);
 457     }
 458   } else if (nm->is_unloaded()) {
 459     // Unloaded code, just make it a zombie
 460     if (PrintMethodFlushing && Verbose)
 461       tty->print_cr("### Nmethod %3d/" PTR_FORMAT " (unloaded) being made zombie", nm->compile_id(), nm);
 462 
 463     if (nm->is_osr_method()) {
 464       SWEEP(nm);
 465       // No inline caches will ever point to osr methods, so we can just remove it
 466       release_nmethod(nm);
 467       _flushed_count++;
 468     } else {
 469       nm->make_zombie();
 470       _resweep = true;
 471       _zombified_count++;
 472       SWEEP(nm);
 473     }
 474   } else {
 475     assert(nm->is_alive(), "should be alive");
 476 
 477     if (UseCodeCacheFlushing) {
 478       if (nm->is_speculatively_disconnected() && !nm->is_locked_by_vm() && !nm->is_osr_method() &&
 479           (_traversals > _last_flush_traversal_id + 2) && (nm->compile_id() < _highest_marked)) {



















 480         // This method has not been called since the forced cleanup happened
 481         nm->make_not_entrant();


 482       }
 483     }
 484 
 485     // Clean-up all inline caches that points to zombie/non-reentrant methods
 486     MutexLocker cl(CompiledIC_lock);
 487     nm->cleanup_inline_caches();
 488     SWEEP(nm);
 489   }
 490 }
 491 
 492 // Code cache unloading: when compilers notice the code cache is getting full,
 493 // they will call a vm op that comes here. This code attempts to speculatively
 494 // unload the oldest half of the nmethods (based on the compile job id) by
 495 // saving the old code in a list in the CodeCache. Then
 496 // execution resumes. If a method so marked is not called by the second sweeper
 497 // stack traversal after the current one, the nmethod will be marked non-entrant and
 498 // got rid of by normal sweeping. If the method is called, the Method*'s
 499 // _code field is restored and the Method*/nmethod
 500 // go back to their normal state.
 501 void NMethodSweeper::handle_full_code_cache(bool is_full) {
 502 
 503   if (is_full) {
 504     // Since code cache is full, immediately stop new compiles
 505     if (CompileBroker::set_should_compile_new_jobs(CompileBroker::stop_compilation)) {
 506       log_sweep("disable_compiler");
 507     }
 508   }
 509 
 510   // Make sure only one thread can flush
 511   // The token is reset after CodeCacheMinimumFlushInterval in scan stacks,
 512   // no need to check the timeout here.
 513   jint old = Atomic::cmpxchg( 1, &_flush_token, 0 );
 514   if (old != 0) {
 515     return;
 516   }
 517 
 518   VM_HandleFullCodeCache op(is_full);
 519   VMThread::execute(&op);
 520 
 521   // resweep again as soon as possible
 522   _resweep = true;























































 523 }
 524 

 525 void NMethodSweeper::speculative_disconnect_nmethods(bool is_full) {
 526   // If there was a race in detecting full code cache, only run
 527   // one vm op for it or keep the compiler shut off
 528 
 529   jlong disconnect_start_counter = os::elapsed_counter();
 530 
 531   // Traverse the code cache trying to dump the oldest nmethods
 532   int curr_max_comp_id = CompileBroker::get_compilation_id();
 533   int flush_target = ((curr_max_comp_id - _dead_compile_ids) / CodeCacheFlushingFraction) + _dead_compile_ids;
 534 
 535   log_sweep("start_cleaning");
 536 
 537   nmethod* nm = CodeCache::alive_nmethod(CodeCache::first());
 538   jint disconnected = 0;
 539   jint made_not_entrant  = 0;
 540   jint nmethod_count = 0;
 541 
 542   while ((nm != NULL)){
 543     int curr_comp_id = nm->compile_id();

























 544 


 545     // OSR methods cannot be flushed like this. Also, don't flush native methods
 546     // since they are part of the JDK in most cases
 547     if (!nm->is_osr_method() && !nm->is_locked_by_vm() && !nm->is_native_method()) {
 548 
 549       // only count methods that can be speculatively disconnected
 550       nmethod_count++;
 551 
 552       if (nm->is_in_use() && (curr_comp_id < flush_target)) {
 553         if ((nm->method()->code() == nm)) {
 554           // This method has not been previously considered for
 555           // unloading or it was restored already
 556           CodeCache::speculatively_disconnect(nm);
 557           disconnected++;
 558         } else if (nm->is_speculatively_disconnected()) {
 559           // This method was previously considered for preemptive unloading and was not called since then

 560           CompilationPolicy::policy()->delay_compilation(nm->method());
 561           nm->make_not_entrant();
 562           made_not_entrant++;

 563         }
 564 
 565         if (curr_comp_id > _highest_marked) {
 566           _highest_marked = curr_comp_id;
 567         }




 568       }




 569     }
 570     nm = CodeCache::alive_nmethod(CodeCache::next(nm));
 571   }
 572 
 573   // remember how many compile_ids wheren't seen last flush.
 574   _dead_compile_ids = curr_max_comp_id - nmethod_count;



















 575 
 576   log_sweep("stop_cleaning",
 577                        "disconnected='" UINT32_FORMAT "' made_not_entrant='" UINT32_FORMAT "'",
 578                        disconnected, made_not_entrant);
 579 
 580   // Shut off compiler. Sweeper will start over with a new stack scan and
 581   // traversal cycle and turn it back on if it clears enough space.
 582   if (is_full) {
 583     _last_full_flush_time = os::javaTimeMillis();
 584   }
 585 
 586   jlong disconnect_end_counter = os::elapsed_counter();
 587   jlong disconnect_time = disconnect_end_counter - disconnect_start_counter;
 588   _total_disconnect_time += disconnect_time;
 589   _peak_disconnect_time = MAX2(disconnect_time, _peak_disconnect_time);
 590 
 591   EventCleanCodeCache event(UNTIMED);
 592   if (event.should_commit()) {
 593     event.set_starttime(disconnect_start_counter);
 594     event.set_endtime(disconnect_end_counter);
 595     event.set_disconnectedCount(disconnected);
 596     event.set_madeNonEntrantCount(made_not_entrant);
 597     event.commit();
 598   }
 599   _number_of_flushes++;
 600 
 601   // After two more traversals the sweeper will get rid of unrestored nmethods
 602   _last_flush_traversal_id = _traversals;
 603   _resweep = true;
 604 #ifdef ASSERT
 605 
 606   if(PrintMethodFlushing && Verbose) {
 607     tty->print_cr("### sweeper: unload time: " INT64_FORMAT, (jlong)disconnect_time);
 608   }
 609 #endif
 610 }
 611 
 612 
 613 // Print out some state information about the current sweep and the
 614 // state of the code cache if it's requested.
 615 void NMethodSweeper::log_sweep(const char* msg, const char* format, ...) {
 616   if (PrintMethodFlushing) {
 617     stringStream s;
 618     // Dump code cache state into a buffer before locking the tty,
 619     // because log_state() will use locks causing lock conflicts.
 620     CodeCache::log_state(&s);
 621 
 622     ttyLocker ttyl;
 623     tty->print("### sweeper: %s ", msg);




 123     _sweep_index = (_sweep_index + 1) % SweeperLogEntries;
 124   }
 125 }
 126 #else
 127 #define SWEEP(nm)
 128 #endif
 129 
 130 
 131 long      NMethodSweeper::_traversals = 0;   // No. of stack traversals performed
 132 nmethod*  NMethodSweeper::_current = NULL;   // Current nmethod
 133 int       NMethodSweeper::_seen = 0 ;        // No. of nmethods we have currently processed in current pass of CodeCache
 134 int       NMethodSweeper::_flushed_count = 0;   // Nof. nmethods flushed in current sweep
 135 int       NMethodSweeper::_zombified_count = 0; // Nof. nmethods made zombie in current sweep
 136 int       NMethodSweeper::_marked_count = 0;    // Nof. nmethods marked for reclaim in current sweep
 137 
 138 volatile int NMethodSweeper::_invocations = 0;   // No. of invocations left until we are completed with this pass
 139 volatile int NMethodSweeper::_sweep_started = 0; // Whether a sweep is in progress.
 140 
 141 jint      NMethodSweeper::_locked_seen = 0;
 142 jint      NMethodSweeper::_not_entrant_seen_on_stack = 0;
 143 bool      NMethodSweeper::_request_mark_phase = false;
 144 jint      NMethodSweeper::_flush_token = 0;
 145 jlong     NMethodSweeper::_last_full_flush_time = 0;


 146 long      NMethodSweeper::_last_flush_traversal_id = 0;
 147 
 148 int       NMethodSweeper::_number_of_flushes = 0; // Total of full traversals caused by full cache
 149 int       NMethodSweeper::_total_nof_methods_reclaimed = 0;
 150 jlong     NMethodSweeper::_total_time_sweeping = 0;
 151 jlong     NMethodSweeper::_total_time_this_sweep = 0;
 152 jlong     NMethodSweeper::_peak_sweep_time = 0;
 153 jlong     NMethodSweeper::_peak_sweep_fraction_time = 0;
 154 jlong     NMethodSweeper::_total_disconnect_time = 0;
 155 jlong     NMethodSweeper::_peak_disconnect_time = 0;
 156 int       NMethodSweeper::_hotness_counter_reset_val = 0;
 157 
 158 enum {hotness_counter_decay = 1 };
 159 
 160 class MarkActivationClosure: public CodeBlobClosure {
 161 public:
 162   virtual void do_code_blob(CodeBlob* cb) {
 163     if (cb->is_nmethod()) {
 164       nmethod* nm = (nmethod*)cb;
 165       nm->set_hotness_counter(NMethodSweeper::get_hotness_counter_reset_val());
 166       // If we see an activation belonging to a non_entrant nmethod, we mark it.
 167       if (nm->is_not_entrant()) {
 168         nm->mark_as_seen_on_stack();
 169       }
 170     }
 171   }
 172 };
 173 static MarkActivationClosure mark_activation_closure;
 174 
 175 class SetHotnessClosure: public CodeBlobClosure {
 176 public:
 177   virtual void do_code_blob(CodeBlob* cb) {
 178     if (cb->is_nmethod()) {
 179       nmethod* nm = (nmethod*)cb;
 180       nm->set_hotness_counter(NMethodSweeper::get_hotness_counter_reset_val());
 181     }
 182   }
 183 };
 184 static SetHotnessClosure set_hotness_closure;
 185 
 186 
 187 int NMethodSweeper::get_hotness_counter_reset_val() {
 188   if (_hotness_counter_reset_val == 0) {
 189     _hotness_counter_reset_val = (ReservedCodeCacheSize / M) * 2;
 190   }
 191   return _hotness_counter_reset_val;
 192 }
 193 bool NMethodSweeper::sweep_in_progress() {
 194   return (_current != NULL);
 195 }
 196 
 197 // Scans the stacks of all Java threads and marks activations of not-entrant methods.
 198 // No need to synchronize access, since 'mark_active_nmethods' is always executed at a
 199 // safepoint.
 200 void NMethodSweeper::mark_active_nmethods() {
 201   assert(SafepointSynchronize::is_at_safepoint(), "must be executed at a safepoint");
 202   // If we do not want to reclaim not-entrant or zombie methods there is no need
 203   // to scan stacks
 204   if (!MethodFlushing) {
 205     return;
 206   }
 207 
 208   // Make sure CompiledIC_lock in unlocked, since we might update some
 209   // inline caches. If it is, we just bail-out and try later.
 210   if (CompiledIC_lock->is_locked() || Patching_lock->is_locked()) return;
 211 
 212 
 213   // Check for restart
 214   assert(CodeCache::find_blob_unsafe(_current) == _current, "Sweeper nmethod cached state invalid");
 215   if (!sweep_in_progress() && need_marking_phase()) {
 216     _seen        = 0;
 217     _invocations = NmethodSweepFraction;
 218     _current     = CodeCache::first_nmethod();
 219     _traversals  += 1;
 220     _total_time_this_sweep = 0;
 221 
 222     if (PrintMethodFlushing) {
 223       tty->print_cr("### Sweep: stack traversal %d", _traversals);
 224     }
 225     Threads::nmethods_do(&mark_activation_closure);
 226 
 227     // reset the flags since we started a scan from the beginning.
 228     reset_nmethod_marking();
 229     _locked_seen = 0;
 230     _not_entrant_seen_on_stack = 0;
 231   // Only set hotness counter
 232   } else {
 233     Threads::nmethods_do(&set_hotness_closure);
 234   }
 235 
 236   if (UseCodeCacheFlushing) {
 237     // Only allow new flushes after the interval is complete.
 238     jlong now           = os::javaTimeMillis();
 239     jlong max_interval  = (jlong)MinCodeCacheFlushingInterval * (jlong)1000;
 240     jlong curr_interval = now - _last_full_flush_time;
 241     if (curr_interval > max_interval) {
 242       _flush_token = 0;
 243     }





 244   }
 245 }
 246 
 247 void NMethodSweeper::possibly_sweep() {
 248   assert(JavaThread::current()->thread_state() == _thread_in_vm, "must run in vm mode");
 249   if (!MethodFlushing || !sweep_in_progress()) {
 250     return;
 251   }
 252 
 253   if (_invocations > 0) {
 254     // Only one thread at a time will sweep
 255     jint old = Atomic::cmpxchg( 1, &_sweep_started, 0 );
 256     if (old != 0) {
 257       return;
 258     }
 259 #ifdef ASSERT
 260     if (LogSweeper && _records == NULL) {
 261       // Create the ring buffer for the logging code
 262       _records = NEW_C_HEAP_ARRAY(SweeperRecord, SweeperLogEntries, mtGC);
 263       memset(_records, 0, sizeof(SweeperRecord) * SweeperLogEntries);
 264     }
 265 #endif
 266     if (_invocations > 0) {
 267       sweep_code_cache();
 268       _invocations--;
 269     }
 270     _sweep_started = 0;
 271   }
 272 }
 273 
 274 void NMethodSweeper::sweep_code_cache() {
 275 
 276   jlong sweep_start_counter = os::elapsed_counter();
 277 
 278   _flushed_count   = 0;
 279   _zombified_count = 0;
 280   _marked_count    = 0;
 281 
 282   if (PrintMethodFlushing && Verbose) {
 283     tty->print_cr("### Sweep at %d out of %d. Invocations left: %d", _seen, CodeCache::nof_nmethods(), _invocations);
 284   }
 285 
 286   if (!CompileBroker::should_compile_new_jobs()) {
 287     // If we have turned off compilations we might as well do full sweeps
 288     // in order to reach the clean state faster. Otherwise the sleeping compiler
 289     // threads will slow down sweeping.

 290     _invocations = 1;
 291   }
 292 
 293   // We want to visit all nmethods after NmethodSweepFraction
 294   // invocations so divide the remaining number of nmethods by the
 295   // remaining number of invocations.  This is only an estimate since
 296   // the number of nmethods changes during the sweep so the final
 297   // stage must iterate until it there are no more nmethods.
 298   int todo = (CodeCache::nof_nmethods() - _seen) / _invocations;

 299   assert(!SafepointSynchronize::is_at_safepoint(), "should not be in safepoint when we get here");
 300   assert(!CodeCache_lock->owned_by_self(), "just checking");
 301 
 302   {
 303     MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
 304 
 305     // The last invocation iterates until there are no more nmethods
 306     for (int i = 0; (i < todo || _invocations == 1) && _current != NULL; i++) {
 307       if (SafepointSynchronize::is_synchronizing()) { // Safepoint request
 308         if (PrintMethodFlushing && Verbose) {
 309           tty->print_cr("### Sweep at %d out of %d, invocation: %d, yielding to safepoint", _seen, CodeCache::nof_nmethods(), _invocations);
 310         }
 311         MutexUnlockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
 312 
 313         assert(Thread::current()->is_Java_thread(), "should be java thread");
 314         JavaThread* thread = (JavaThread*)Thread::current();
 315         ThreadBlockInVM tbivm(thread);
 316         thread->java_suspend_self();
 317       }
 318       // Since we will give up the CodeCache_lock, always skip ahead
 319       // to the next nmethod.  Other blobs can be deleted by other
 320       // threads but nmethods are only reclaimed by the sweeper.
 321       nmethod* next = CodeCache::next_nmethod(_current);
 322 
 323       // Now ready to process nmethod and give up CodeCache_lock
 324       {
 325         MutexUnlockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
 326         process_nmethod(_current);
 327       }
 328       _seen++;
 329       _current = next;
 330     }
 331   }
 332 
 333   assert(_invocations > 1 || _current == NULL, "must have scanned the whole cache");
 334 
 335   if (!sweep_in_progress() && !need_marking_phase() && (_locked_seen || _not_entrant_seen_on_stack)) {
 336     // we've completed a scan without making progress but there were
 337     // nmethods we were unable to process either because they were
 338     // locked or were still on stack.  We don't have to aggressively
 339     // clean them up so just stop scanning. We could scan once more
 340     // but that complicates the control logic and it's unlikely to
 341     // matter much.
 342     if (PrintMethodFlushing) {
 343       tty->print_cr("### Couldn't make progress on some nmethods so stopping sweep");
 344     }
 345   }
 346 
 347   jlong sweep_end_counter = os::elapsed_counter();
 348   jlong sweep_time = sweep_end_counter - sweep_start_counter;
 349   _total_time_sweeping  += sweep_time;
 350   _total_time_this_sweep += sweep_time;
 351   _peak_sweep_fraction_time = MAX2(sweep_time, _peak_sweep_fraction_time);
 352   _total_nof_methods_reclaimed += _flushed_count;
 353 
 354   EventSweepCodeCache event(UNTIMED);
 355   if (event.should_commit()) {
 356     event.set_starttime(sweep_start_counter);
 357     event.set_endtime(sweep_end_counter);
 358     event.set_sweepIndex(_traversals);


 401 
 402 void NMethodSweeper::release_nmethod(nmethod *nm) {
 403   // Clean up any CompiledICHolders
 404   {
 405     ResourceMark rm;
 406     MutexLocker ml_patch(CompiledIC_lock);
 407     RelocIterator iter(nm);
 408     while (iter.next()) {
 409       if (iter.type() == relocInfo::virtual_call_type) {
 410         CompiledIC::cleanup_call_site(iter.virtual_call_reloc());
 411       }
 412     }
 413   }
 414 
 415   MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
 416   nm->flush();
 417 }
 418 
 419 void NMethodSweeper::process_nmethod(nmethod *nm) {
 420   assert(!CodeCache_lock->owned_by_self(), "just checking");

 421   // Make sure this nmethod doesn't get unloaded during the scan,
 422   // since the locks acquired might below the safepoint.
 423   NMethodMarker nmm(nm);

 424   SWEEP(nm);
 425 
 426   // Skip methods that are currently referenced by the VM
 427   if (nm->is_locked_by_vm()) {
 428     // But still remember to clean-up inline caches for alive nmethods
 429     if (nm->is_alive()) {
 430       // Clean-up all inline caches that point to zombie/non-reentrant methods
 431       MutexLocker cl(CompiledIC_lock);
 432       nm->cleanup_inline_caches();
 433       SWEEP(nm);
 434     } else {
 435       _locked_seen++;
 436       SWEEP(nm);
 437     }
 438     return;
 439   }
 440 
 441   if (nm->is_zombie()) {
 442     // If it is the first time we see nmethod then we mark it. Otherwise,
 443     // we reclaim it. When we have seen a zombie method twice, we know that
 444     // there are no inline caches that refer to it.
 445     if (nm->is_marked_for_reclamation()) {
 446       assert(!nm->is_locked_by_vm(), "must not flush locked nmethods");
 447       if (PrintMethodFlushing && Verbose) {
 448         tty->print_cr("### Nmethod %3d/" PTR_FORMAT " (marked for reclamation) being flushed", nm->compile_id(), nm);
 449       }
 450       release_nmethod(nm);
 451       _flushed_count++;
 452     } else {
 453       if (PrintMethodFlushing && Verbose) {
 454         tty->print_cr("### Nmethod %3d/" PTR_FORMAT " (zombie) being marked for reclamation", nm->compile_id(), nm);
 455       }
 456       nm->mark_for_reclamation();
 457       request_nmethod_marking();
 458       _marked_count++;
 459       SWEEP(nm);
 460     }
 461   } else if (nm->is_not_entrant()) {
 462     // If there are no current activations of this method on the
 463     // stack we can safely convert it to a zombie method
 464     if (nm->can_not_entrant_be_converted()) {
 465       if (PrintMethodFlushing && Verbose) {
 466         tty->print_cr("### Nmethod %3d/" PTR_FORMAT " (not entrant) being made zombie", nm->compile_id(), nm);
 467       }
 468       nm->make_zombie();
 469       request_nmethod_marking();
 470       _zombified_count++;
 471       SWEEP(nm);
 472     } else {
 473       // Still alive, clean up its inline caches
 474       MutexLocker cl(CompiledIC_lock);
 475       nm->cleanup_inline_caches();
 476       // we coudn't transition this nmethod so don't immediately
 477       // request a rescan.  If this method stays on the stack for a
 478       // long time we don't want to keep rescanning the code cache.
 479       _not_entrant_seen_on_stack++;
 480       SWEEP(nm);
 481     }
 482   } else if (nm->is_unloaded()) {
 483     // Unloaded code, just make it a zombie
 484     if (PrintMethodFlushing && Verbose)
 485       tty->print_cr("### Nmethod %3d/" PTR_FORMAT " (unloaded) being made zombie", nm->compile_id(), nm);
 486 
 487     if (nm->is_osr_method()) {
 488       SWEEP(nm);
 489       // No inline caches will ever point to osr methods, so we can just remove it
 490       release_nmethod(nm);
 491       _flushed_count++;
 492     } else {
 493       nm->make_zombie();
 494       request_nmethod_marking();
 495       _zombified_count++;
 496       SWEEP(nm);
 497     }
 498   } else {


 499     if (UseCodeCacheFlushing) {
 500       if (!nm->is_locked_by_vm() && !nm->is_osr_method()) {
 501         // Do not make native methods and OSR-methods not-entrant
 502         if (!nm->is_speculatively_disconnected() && !nm->is_native_method() && !nm->is_osr_method()) {
 503           nm->dec_hotness_counter(hotness_counter_decay);
 504           // This method is cold and the code cache fills up => get rid of it.
 505           int reset_val = get_hotness_counter_reset_val();
 506           int time_since_reset = reset_val - nm->get_hotness_counter();
 507           double threshold = -reset_val + (CodeCache::reverse_free_ratio() * NmethodSweepActivity);
 508           // A method is marked as not-entrance if the method is
 509           // 1) 'old enough': nm->get_hotness_counter() < threshold
 510           // 2) The method was in_use for a minimum amount of time: (time_since_reset > 10)
 511           //    The second condition is necessary if we are dealing with very small code cache
 512           //    sizes (e.g., <10m) and the code cache size is too small to hold all hot methods.
 513           //    The second condition ensures that methods are not immediately made not-entrant
 514           //    just after compilation.
 515           if ((nm->get_hotness_counter() < threshold) && (time_since_reset > 10)) {
 516              nm->make_not_entrant();
 517              nm->set_hotness_counter(-reset_val);
 518              request_nmethod_marking();
 519           }
 520         } else if (nm->is_speculatively_disconnected() && (_traversals > _last_flush_traversal_id + 2)) {
 521           // This method has not been called since the forced cleanup happened
 522           nm->make_not_entrant();
 523           nm->set_hotness_counter(-get_hotness_counter_reset_val());
 524           request_nmethod_marking();
 525         }
 526       }
 527     }
 528     // Clean-up all inline caches that point to zombie/non-reentrant methods
 529     MutexLocker cl(CompiledIC_lock);
 530     nm->cleanup_inline_caches();
 531     SWEEP(nm);
 532   }
 533 }
 534 
 535 // Code cache unloading: when compilers notice the code cache is getting full,
 536 // they will call a vm op that comes here. This code attempts to speculatively
 537 // unload the coldest part of the nmethods by saving the cold code in a list in
 538 // the CodeCache. Then execution resumes. If a method so marked is not called by
 539 // the second sweeper stack traversal after the current one, the nmethod will be
 540 // marked non-entrant and got rid of by normal sweeping. If the method is called,
 541 // the Method*'s _code field is restored and the Method*/nmethod go back to their
 542 // normal state.

 543 void NMethodSweeper::handle_full_code_cache(bool is_full) {
 544 
 545   if (is_full) {
 546     // Since code cache is full, immediately stop new compiles
 547     if (CompileBroker::set_should_compile_new_jobs(CompileBroker::stop_compilation)) {
 548       log_sweep("disable_compiler");
 549     }
 550   }
 551 
 552   // Make sure only one thread can flush
 553   // The token is reset after MinCodeCacheFlushingInterval in scan stacks,
 554   // no need to check the timeout here.
 555   jint old = Atomic::cmpxchg( 1, &_flush_token, 0 );
 556   if (old != 0) {
 557     return;
 558   }
 559 
 560   VM_HandleFullCodeCache op(is_full);
 561   VMThread::execute(&op);
 562 
 563   // Do marking as soon as possible
 564   request_nmethod_marking();
 565 }
 566 
 567 int NMethodSweeper::sort_nmethods_by_hotness(nmethod** nm1, nmethod** nm2) {
 568   return ((*(nm1))->get_hotness_counter() > (*nm2)->get_hotness_counter());
 569 }
 570 
 571 class NMethodBlock : public CHeapObj<mtInternal> {
 572  private:
 573   GrowableArray<nmethod*>* _nmethods;
 574   int                      _block_size;
 575   double                   _hotness;
 576 
 577  public:
 578   NMethodBlock() {
 579     _nmethods = new GrowableArray<nmethod*>();
 580     _hotness = 0;
 581     _block_size = 0;
 582   }
 583 
 584   void append(nmethod* nm) {
 585     _nmethods->append(nm);
 586     _block_size += nm->total_size();
 587   }
 588 
 589   int get_length() const {
 590     return _nmethods->length();
 591   }
 592 
 593   int get_size_in_bytes() const {
 594     return _block_size;
 595   }
 596 
 597   nmethod* at(int i) const {
 598     return _nmethods->at(i);
 599   }
 600 
 601   // Computes the average hotness of a nmethod block
 602   void computer_hotness() {
 603     if (_block_size > 0) {
 604       for (int i = 0; i < _nmethods->length(); i++) {
 605         nmethod* nm = _nmethods->at(i);
 606         _hotness += nm->total_size() * nm->get_hotness_counter();
 607       }
 608       _hotness /= get_size_in_bytes();
 609     }
 610   }
 611 
 612   double get_hotness() {
 613     computer_hotness();
 614     return _hotness;
 615   }
 616 };
 617 
 618 static int sort_nmethod_blocks_by_hotness(NMethodBlock** b1, NMethodBlock** b2) {
 619   return ((*(b1))->get_hotness() > (*b2)->get_hotness());
 620 }
 621 
 622 
 623 void NMethodSweeper::speculative_disconnect_nmethods(bool is_full) {
 624   // If there was a race in detecting full code cache, only run
 625   // one vm op for it or keep the compiler shut off

 626   jlong disconnect_start_counter = os::elapsed_counter();
 627 







 628   jint disconnected = 0;
 629   jint made_not_entrant  = 0;
 630   jint nmethod_count = 0;
 631 
 632   log_sweep("start_cleaning");
 633   {
 634     ResourceMark rm;
 635     nmethod* nm = CodeCache::first_nmethod();
 636 
 637     // The intention behind flushing_fraction is that for smaller code cache sizes
 638     // more memory is speculatively disconnected than for large code cache sizes.
 639     // See the following examples
 640     //    CodeCacheSize[mb]      memory flushed [mb]
 641     //                  256      40
 642     //                  128      25
 643     //                   64      16
 644     //                   32      10
 645     //                   16       6
 646     //                    8       4
 647     //
 648     // In addition, it is possible to increase the amount of memory that is flushed by
 649     // using 'CodeCacheFlushingMinimumPercentage'
 650     const double flushing_fraction = 1 - (pow((ReservedCodeCacheSize / M), -1/3) + CodeCacheFlushingMinimumPercentage / 100);
 651     const int memory_to_flush = ReservedCodeCacheSize * flushing_fraction;
 652     int memory_will_be_flushed = 0;
 653     // Put methods that are speculatively disconnected into a nmethod block.
 654     // Flushing whole blocks should help to reduce code cache fragmentation.
 655     GrowableArray<NMethodBlock*>* nmethod_blocks = new GrowableArray<NMethodBlock*>();
 656     NMethodBlock* nm_block = new NMethodBlock();
 657     nmethod_blocks->append(nm_block);
 658     const int nmethod_block_size = 1 * M;
 659 
 660     // See how many methods are 'in flight' of being flushed
 661     while (nm != NULL) {
 662       // OSR methods cannot be flushed like this. Also, don't flush native methods
 663       // since they are part of the JDK in most cases
 664       if (nm->is_in_use()) {
 665         if (!nm->is_locked_by_vm() && !nm->is_osr_method() && !nm->is_native_method()) {
 666           nm_block->append(nm);









 667           // This method was previously considered for preemptive unloading and was not called since then
 668           if (nm->is_speculatively_disconnected()) {
 669             CompilationPolicy::policy()->delay_compilation(nm->method());
 670                 nm->make_not_entrant();
 671                 made_not_entrant++;
 672             memory_will_be_flushed += nm->total_size();
 673           }



 674         }
 675       // These checks ensure that we only add nmethods that can be removed from the code cache
 676       } else if (nm->is_not_entrant() || nm->is_zombie() || nm->is_unloaded()) {
 677         memory_will_be_flushed += nm->total_size();
 678         nm_block->append(nm);
 679       }
 680 
 681       if (nm_block->get_size_in_bytes() > nmethod_block_size) {
 682         nm_block = new NMethodBlock();
 683         nmethod_blocks->append(nm_block);
 684       }
 685       nm = CodeCache::next_nmethod(nm);
 686     }
 687 
 688     // Speculatively disconnect methods until we reach 'memory_to_flush'
 689     if (memory_will_be_flushed < memory_to_flush) {
 690       nmethod_blocks->sort(sort_nmethod_blocks_by_hotness);
 691       // Iterate over sorted array and speculatively disconnect these nmethods
 692       for (int block_idx = 0; block_idx < nmethod_blocks->length(); block_idx++) {
 693         nm_block = nmethod_blocks->at(block_idx);
 694         for (int nmethod_idx = 0; nmethod_idx < nm_block->get_length(); nmethod_idx++) {
 695           nm = nm_block->at(nmethod_idx);
 696           if ((nm->is_in_use()) && (nm->method()->code() == nm)) {
 697             CodeCache::speculatively_disconnect(nm);
 698             disconnected++;
 699           }
 700         }
 701         memory_will_be_flushed += nm_block->get_size_in_bytes();
 702         // Stop flushing
 703         if (memory_will_be_flushed >= memory_to_flush) {
 704           break;
 705         }
 706       }
 707     }
 708   } // End ResourceMark
 709 
 710   log_sweep("stop_cleaning",
 711             "disconnected='" UINT32_FORMAT "' made_not_entrant='" UINT32_FORMAT "'",
 712             disconnected, made_not_entrant);
 713 
 714   // Shut off compiler. Sweeper will start over with a new stack scan and
 715   // traversal cycle and turn it back on if it clears enough space.
 716   if (is_full) {
 717     _last_full_flush_time = os::javaTimeMillis();
 718   }
 719 
 720   jlong disconnect_end_counter = os::elapsed_counter();
 721   jlong disconnect_time = disconnect_end_counter - disconnect_start_counter;
 722   _total_disconnect_time += disconnect_time;
 723   _peak_disconnect_time = MAX2(disconnect_time, _peak_disconnect_time);
 724 
 725   EventCleanCodeCache event(UNTIMED);
 726   if (event.should_commit()) {
 727     event.set_starttime(disconnect_start_counter);
 728     event.set_endtime(disconnect_end_counter);
 729     event.set_disconnectedCount(disconnected);
 730     event.set_madeNonEntrantCount(made_not_entrant);
 731     event.commit();
 732   }
 733   _number_of_flushes++;
 734 
 735   // After two more traversals the sweeper will get rid of unrestored nmethods
 736   _last_flush_traversal_id = _traversals;
 737   request_nmethod_marking();
 738 #ifdef ASSERT
 739 
 740   if(PrintMethodFlushing && Verbose) {
 741     tty->print_cr("### sweeper: unload time: " INT64_FORMAT, (jlong)disconnect_time);
 742   }
 743 #endif
 744 }
 745 
 746 
 747 // Print out some state information about the current sweep and the
 748 // state of the code cache if it's requested.
 749 void NMethodSweeper::log_sweep(const char* msg, const char* format, ...) {
 750   if (PrintMethodFlushing) {
 751     stringStream s;
 752     // Dump code cache state into a buffer before locking the tty,
 753     // because log_state() will use locks causing lock conflicts.
 754     CodeCache::log_state(&s);
 755 
 756     ttyLocker ttyl;
 757     tty->print("### sweeper: %s ", msg);