src/share/vm/runtime/sweeper.cpp

Print this page
rev 4203 : 8008208: Event tracing for code cache subsystems can give wrong timestamps
Reviewed-by:


 131 int       NMethodSweeper::_seen = 0 ;        // No. of nmethods we have currently processed in current pass of CodeCache
 132 int       NMethodSweeper::_flushed_count = 0;   // Nof. nmethods flushed in current sweep
 133 int       NMethodSweeper::_zombified_count = 0; // Nof. nmethods made zombie in current sweep
 134 int       NMethodSweeper::_marked_count = 0;    // Nof. nmethods marked for reclaim in current sweep
 135 
 136 volatile int NMethodSweeper::_invocations = 0;   // No. of invocations left until we are completed with this pass
 137 volatile int NMethodSweeper::_sweep_started = 0; // Whether a sweep is in progress.
 138 
 139 jint      NMethodSweeper::_locked_seen = 0;
 140 jint      NMethodSweeper::_not_entrant_seen_on_stack = 0;
 141 bool      NMethodSweeper::_rescan = false;
 142 bool      NMethodSweeper::_do_sweep = false;
 143 bool      NMethodSweeper::_was_full = false;
 144 jint      NMethodSweeper::_advise_to_sweep = 0;
 145 jlong     NMethodSweeper::_last_was_full = 0;
 146 uint      NMethodSweeper::_highest_marked = 0;
 147 long      NMethodSweeper::_was_full_traversal = 0;
 148 
 149 int       NMethodSweeper::_number_of_flushes = 0; // Total of full traversals caused by full cache
 150 int       NMethodSweeper::_total_nof_methods_reclaimed = 0;
 151 long      NMethodSweeper::_total_time_sweeping = 0;
 152 long      NMethodSweeper::_total_time_this_sweep = 0;
 153 long      NMethodSweeper::_peak_sweep_time = 0;
 154 long      NMethodSweeper::_peak_sweep_fraction_time = 0;
 155 long      NMethodSweeper::_total_disconnect_time = 0;
 156 long      NMethodSweeper::_peak_disconnect_time = 0;
 157 
 158 class MarkActivationClosure: public CodeBlobClosure {
 159 public:
 160   virtual void do_code_blob(CodeBlob* cb) {
 161     // If we see an activation belonging to a non_entrant nmethod, we mark it.
 162     if (cb->is_nmethod() && ((nmethod*)cb)->is_not_entrant()) {
 163       ((nmethod*)cb)->mark_as_seen_on_stack();
 164     }
 165   }
 166 };
 167 static MarkActivationClosure mark_activation_closure;
 168 
 169 void NMethodSweeper::scan_stacks() {
 170   assert(SafepointSynchronize::is_at_safepoint(), "must be executed at a safepoint");
 171   if (!MethodFlushing) return;
 172   _do_sweep = true;
 173 
 174   // No need to synchronize access, since this is always executed at a
 175   // safepoint.  If we aren't in the middle of scan and a rescan
 176   // hasn't been requested then just return. If UseCodeCacheFlushing is on and


 240     if (old != 0) {
 241       return;
 242     }
 243 #ifdef ASSERT
 244     if (LogSweeper && _records == NULL) {
 245       // Create the ring buffer for the logging code
 246       _records = NEW_C_HEAP_ARRAY(SweeperRecord, SweeperLogEntries, mtGC);
 247       memset(_records, 0, sizeof(SweeperRecord) * SweeperLogEntries);
 248     }
 249 #endif
 250     if (_invocations > 0) {
 251       sweep_code_cache();
 252       _invocations--;
 253     }
 254     _sweep_started = 0;
 255   }
 256 }
 257 
 258 void NMethodSweeper::sweep_code_cache() {
 259 
 260   long sweep_start_counter, sweep_end_counter;
 261   long sweep_time;
 262   sweep_start_counter = os::elapsed_counter();
 263 
 264   _flushed_count   = 0;
 265   _zombified_count = 0;
 266   _marked_count    = 0;
 267 
 268   if (PrintMethodFlushing && Verbose) {
 269     tty->print_cr("### Sweep at %d out of %d. Invocations left: %d", _seen, CodeCache::nof_nmethods(), _invocations);
 270   }
 271 
 272   // We want to visit all nmethods after NmethodSweepFraction
 273   // invocations so divide the remaining number of nmethods by the
 274   // remaining number of invocations.  This is only an estimate since
 275   // the number of nmethods changes during the sweep so the final
 276   // stage must iterate until it there are no more nmethods.
 277   int todo = (CodeCache::nof_nmethods() - _seen) / _invocations;
 278 
 279   assert(!SafepointSynchronize::is_at_safepoint(), "should not be in safepoint when we get here");
 280   assert(!CodeCache_lock->owned_by_self(), "just checking");
 281 
 282   {


 307       }
 308       _seen++;
 309       _current = next;
 310     }
 311   }
 312 
 313   assert(_invocations > 1 || _current == NULL, "must have scanned the whole cache");
 314 
 315   if (_current == NULL && !_rescan && (_locked_seen || _not_entrant_seen_on_stack)) {
 316     // we've completed a scan without making progress but there were
 317     // nmethods we were unable to process either because they were
 318     // locked or were still on stack.  We don't have to aggresively
 319     // clean them up so just stop scanning.  We could scan once more
 320     // but that complicates the control logic and it's unlikely to
 321     // matter much.
 322     if (PrintMethodFlushing) {
 323       tty->print_cr("### Couldn't make progress on some nmethods so stopping sweep");
 324     }
 325   }
 326 
 327   sweep_end_counter = os::elapsed_counter();
 328   sweep_time = sweep_end_counter - sweep_start_counter;
 329   _total_time_sweeping  += sweep_time;
 330   _total_time_this_sweep += sweep_time;
 331   _peak_sweep_fraction_time = MAX2(sweep_time, _peak_sweep_fraction_time);
 332   _total_nof_methods_reclaimed += _flushed_count;
 333 
 334   EventSweepCodeCache event(UNTIMED);
 335   if (event.should_commit()) {
 336     event.set_starttime(sweep_start_counter);
 337     event.set_endtime(sweep_end_counter);
 338     event.set_sweepIndex(_traversals);
 339     event.set_sweepFractionIndex(NmethodSweepFraction - _invocations + 1);
 340     event.set_sweptCount(todo);
 341     event.set_flushedCount(_flushed_count);
 342     event.set_markedCount(_marked_count);
 343     event.set_zombifiedCount(_zombified_count);
 344     event.commit();
 345   }
 346 
 347 #ifdef ASSERT
 348   if(PrintMethodFlushing) {


 506     jlong curr_interval = now - _last_was_full;
 507     if (curr_interval < max_interval) {
 508       _rescan = true;
 509       log_sweep("disable_compiler", "flushing_interval='" UINT64_FORMAT "'",
 510                            curr_interval/1000);
 511       return;
 512     }
 513   }
 514 
 515   VM_HandleFullCodeCache op(is_full);
 516   VMThread::execute(&op);
 517 
 518   // rescan again as soon as possible
 519   _rescan = true;
 520 }
 521 
 522 void NMethodSweeper::speculative_disconnect_nmethods(bool is_full) {
 523   // If there was a race in detecting full code cache, only run
 524   // one vm op for it or keep the compiler shut off
 525 
 526   long disconnect_start_counter;
 527   long disconnect_end_counter;
 528   long disconnect_time;
 529 
 530   if ((!was_full()) && (is_full)) {
 531     if (!CodeCache::needs_flushing()) {
 532       log_sweep("restart_compiler");
 533       CompileBroker::set_should_compile_new_jobs(CompileBroker::run_compilation);
 534       return;
 535     }
 536   }
 537 
 538   disconnect_start_counter = os::elapsed_counter();
 539 
 540   // Traverse the code cache trying to dump the oldest nmethods
 541   uint curr_max_comp_id = CompileBroker::get_compilation_id();
 542   uint flush_target = ((curr_max_comp_id - _highest_marked) >> 1) + _highest_marked;
 543   log_sweep("start_cleaning");
 544 
 545   nmethod* nm = CodeCache::alive_nmethod(CodeCache::first());
 546   jint disconnected = 0;
 547   jint made_not_entrant  = 0;
 548   while ((nm != NULL)){
 549     uint curr_comp_id = nm->compile_id();
 550 
 551     // OSR methods cannot be flushed like this. Also, don't flush native methods
 552     // since they are part of the JDK in most cases
 553     if (nm->is_in_use() && (!nm->is_osr_method()) && (!nm->is_locked_by_vm()) &&
 554         (!nm->is_native_method()) && ((curr_comp_id < flush_target))) {
 555 
 556       if ((nm->method()->code() == nm)) {
 557         // This method has not been previously considered for
 558         // unloading or it was restored already


 566       }
 567 
 568       if (curr_comp_id > _highest_marked) {
 569         _highest_marked = curr_comp_id;
 570       }
 571     }
 572     nm = CodeCache::alive_nmethod(CodeCache::next(nm));
 573   }
 574 
 575   log_sweep("stop_cleaning",
 576                        "disconnected='" UINT32_FORMAT "' made_not_entrant='" UINT32_FORMAT "'",
 577                        disconnected, made_not_entrant);
 578 
 579   // Shut off compiler. Sweeper will start over with a new stack scan and
 580   // traversal cycle and turn it back on if it clears enough space.
 581   if (was_full()) {
 582     _last_was_full = os::javaTimeMillis();
 583     CompileBroker::set_should_compile_new_jobs(CompileBroker::stop_compilation);
 584   }
 585 
 586   disconnect_end_counter = os::elapsed_counter();
 587   disconnect_time = disconnect_end_counter - disconnect_start_counter;
 588   _total_disconnect_time += disconnect_time;
 589   _peak_disconnect_time = MAX2(disconnect_time, _peak_disconnect_time);
 590 
 591   EventCleanCodeCache event(UNTIMED);
 592   if (event.should_commit()) {
 593     event.set_starttime(disconnect_start_counter);
 594     event.set_endtime(disconnect_end_counter);
 595     event.set_disconnectedCount(disconnected);
 596     event.set_madeNonEntrantCount(made_not_entrant);
 597     event.commit();
 598   }
 599   _number_of_flushes++;
 600 
 601   // After two more traversals the sweeper will get rid of unrestored nmethods
 602   _was_full_traversal = _traversals;
 603 #ifdef ASSERT
 604 
 605   if(PrintMethodFlushing && Verbose) {
 606     tty->print_cr("### sweeper: unload time: " INT64_FORMAT, (jlong)disconnect_time);
 607   }




 131 int       NMethodSweeper::_seen = 0 ;        // No. of nmethods we have currently processed in current pass of CodeCache
 132 int       NMethodSweeper::_flushed_count = 0;   // Nof. nmethods flushed in current sweep
 133 int       NMethodSweeper::_zombified_count = 0; // Nof. nmethods made zombie in current sweep
 134 int       NMethodSweeper::_marked_count = 0;    // Nof. nmethods marked for reclaim in current sweep
 135 
 136 volatile int NMethodSweeper::_invocations = 0;   // No. of invocations left until we are completed with this pass
 137 volatile int NMethodSweeper::_sweep_started = 0; // Whether a sweep is in progress.
 138 
 139 jint      NMethodSweeper::_locked_seen = 0;
 140 jint      NMethodSweeper::_not_entrant_seen_on_stack = 0;
 141 bool      NMethodSweeper::_rescan = false;
 142 bool      NMethodSweeper::_do_sweep = false;
 143 bool      NMethodSweeper::_was_full = false;
 144 jint      NMethodSweeper::_advise_to_sweep = 0;
 145 jlong     NMethodSweeper::_last_was_full = 0;
 146 uint      NMethodSweeper::_highest_marked = 0;
 147 long      NMethodSweeper::_was_full_traversal = 0;
 148 
 149 int       NMethodSweeper::_number_of_flushes = 0; // Total of full traversals caused by full cache
 150 int       NMethodSweeper::_total_nof_methods_reclaimed = 0;
 151 jlong     NMethodSweeper::_total_time_sweeping = 0;
 152 jlong     NMethodSweeper::_total_time_this_sweep = 0;
 153 jlong     NMethodSweeper::_peak_sweep_time = 0;
 154 jlong     NMethodSweeper::_peak_sweep_fraction_time = 0;
 155 jlong     NMethodSweeper::_total_disconnect_time = 0;
 156 jlong     NMethodSweeper::_peak_disconnect_time = 0;
 157 
 158 class MarkActivationClosure: public CodeBlobClosure {
 159 public:
 160   virtual void do_code_blob(CodeBlob* cb) {
 161     // If we see an activation belonging to a non_entrant nmethod, we mark it.
 162     if (cb->is_nmethod() && ((nmethod*)cb)->is_not_entrant()) {
 163       ((nmethod*)cb)->mark_as_seen_on_stack();
 164     }
 165   }
 166 };
 167 static MarkActivationClosure mark_activation_closure;
 168 
 169 void NMethodSweeper::scan_stacks() {
 170   assert(SafepointSynchronize::is_at_safepoint(), "must be executed at a safepoint");
 171   if (!MethodFlushing) return;
 172   _do_sweep = true;
 173 
 174   // No need to synchronize access, since this is always executed at a
 175   // safepoint.  If we aren't in the middle of scan and a rescan
 176   // hasn't been requested then just return. If UseCodeCacheFlushing is on and


 240     if (old != 0) {
 241       return;
 242     }
 243 #ifdef ASSERT
 244     if (LogSweeper && _records == NULL) {
 245       // Create the ring buffer for the logging code
 246       _records = NEW_C_HEAP_ARRAY(SweeperRecord, SweeperLogEntries, mtGC);
 247       memset(_records, 0, sizeof(SweeperRecord) * SweeperLogEntries);
 248     }
 249 #endif
 250     if (_invocations > 0) {
 251       sweep_code_cache();
 252       _invocations--;
 253     }
 254     _sweep_started = 0;
 255   }
 256 }
 257 
 258 void NMethodSweeper::sweep_code_cache() {
 259 
 260   jlong sweep_start_counter = os::elapsed_counter();


 261 
 262   _flushed_count   = 0;
 263   _zombified_count = 0;
 264   _marked_count    = 0;
 265 
 266   if (PrintMethodFlushing && Verbose) {
 267     tty->print_cr("### Sweep at %d out of %d. Invocations left: %d", _seen, CodeCache::nof_nmethods(), _invocations);
 268   }
 269 
 270   // We want to visit all nmethods after NmethodSweepFraction
 271   // invocations so divide the remaining number of nmethods by the
 272   // remaining number of invocations.  This is only an estimate since
 273   // the number of nmethods changes during the sweep so the final
 274   // stage must iterate until it there are no more nmethods.
 275   int todo = (CodeCache::nof_nmethods() - _seen) / _invocations;
 276 
 277   assert(!SafepointSynchronize::is_at_safepoint(), "should not be in safepoint when we get here");
 278   assert(!CodeCache_lock->owned_by_self(), "just checking");
 279 
 280   {


 305       }
 306       _seen++;
 307       _current = next;
 308     }
 309   }
 310 
 311   assert(_invocations > 1 || _current == NULL, "must have scanned the whole cache");
 312 
 313   if (_current == NULL && !_rescan && (_locked_seen || _not_entrant_seen_on_stack)) {
 314     // we've completed a scan without making progress but there were
 315     // nmethods we were unable to process either because they were
 316     // locked or were still on stack.  We don't have to aggresively
 317     // clean them up so just stop scanning.  We could scan once more
 318     // but that complicates the control logic and it's unlikely to
 319     // matter much.
 320     if (PrintMethodFlushing) {
 321       tty->print_cr("### Couldn't make progress on some nmethods so stopping sweep");
 322     }
 323   }
 324 
 325   jlong sweep_end_counter = os::elapsed_counter();
 326   jlong sweep_time = sweep_end_counter - sweep_start_counter;
 327   _total_time_sweeping  += sweep_time;
 328   _total_time_this_sweep += sweep_time;
 329   _peak_sweep_fraction_time = MAX2(sweep_time, _peak_sweep_fraction_time);
 330   _total_nof_methods_reclaimed += _flushed_count;
 331 
 332   EventSweepCodeCache event(UNTIMED);
 333   if (event.should_commit()) {
 334     event.set_starttime(sweep_start_counter);
 335     event.set_endtime(sweep_end_counter);
 336     event.set_sweepIndex(_traversals);
 337     event.set_sweepFractionIndex(NmethodSweepFraction - _invocations + 1);
 338     event.set_sweptCount(todo);
 339     event.set_flushedCount(_flushed_count);
 340     event.set_markedCount(_marked_count);
 341     event.set_zombifiedCount(_zombified_count);
 342     event.commit();
 343   }
 344 
 345 #ifdef ASSERT
 346   if(PrintMethodFlushing) {


 504     jlong curr_interval = now - _last_was_full;
 505     if (curr_interval < max_interval) {
 506       _rescan = true;
 507       log_sweep("disable_compiler", "flushing_interval='" UINT64_FORMAT "'",
 508                            curr_interval/1000);
 509       return;
 510     }
 511   }
 512 
 513   VM_HandleFullCodeCache op(is_full);
 514   VMThread::execute(&op);
 515 
 516   // rescan again as soon as possible
 517   _rescan = true;
 518 }
 519 
 520 void NMethodSweeper::speculative_disconnect_nmethods(bool is_full) {
 521   // If there was a race in detecting full code cache, only run
 522   // one vm op for it or keep the compiler shut off
 523 




 524   if ((!was_full()) && (is_full)) {
 525     if (!CodeCache::needs_flushing()) {
 526       log_sweep("restart_compiler");
 527       CompileBroker::set_should_compile_new_jobs(CompileBroker::run_compilation);
 528       return;
 529     }
 530   }
 531 
 532   jlong disconnect_start_counter = os::elapsed_counter();
 533 
 534   // Traverse the code cache trying to dump the oldest nmethods
 535   uint curr_max_comp_id = CompileBroker::get_compilation_id();
 536   uint flush_target = ((curr_max_comp_id - _highest_marked) >> 1) + _highest_marked;
 537   log_sweep("start_cleaning");
 538 
 539   nmethod* nm = CodeCache::alive_nmethod(CodeCache::first());
 540   jint disconnected = 0;
 541   jint made_not_entrant  = 0;
 542   while ((nm != NULL)){
 543     uint curr_comp_id = nm->compile_id();
 544 
 545     // OSR methods cannot be flushed like this. Also, don't flush native methods
 546     // since they are part of the JDK in most cases
 547     if (nm->is_in_use() && (!nm->is_osr_method()) && (!nm->is_locked_by_vm()) &&
 548         (!nm->is_native_method()) && ((curr_comp_id < flush_target))) {
 549 
 550       if ((nm->method()->code() == nm)) {
 551         // This method has not been previously considered for
 552         // unloading or it was restored already


 560       }
 561 
 562       if (curr_comp_id > _highest_marked) {
 563         _highest_marked = curr_comp_id;
 564       }
 565     }
 566     nm = CodeCache::alive_nmethod(CodeCache::next(nm));
 567   }
 568 
 569   log_sweep("stop_cleaning",
 570                        "disconnected='" UINT32_FORMAT "' made_not_entrant='" UINT32_FORMAT "'",
 571                        disconnected, made_not_entrant);
 572 
 573   // Shut off compiler. Sweeper will start over with a new stack scan and
 574   // traversal cycle and turn it back on if it clears enough space.
 575   if (was_full()) {
 576     _last_was_full = os::javaTimeMillis();
 577     CompileBroker::set_should_compile_new_jobs(CompileBroker::stop_compilation);
 578   }
 579 
 580   jlong disconnect_end_counter = os::elapsed_counter();
 581   jlong disconnect_time = disconnect_end_counter - disconnect_start_counter;
 582   _total_disconnect_time += disconnect_time;
 583   _peak_disconnect_time = MAX2(disconnect_time, _peak_disconnect_time);
 584 
 585   EventCleanCodeCache event(UNTIMED);
 586   if (event.should_commit()) {
 587     event.set_starttime(disconnect_start_counter);
 588     event.set_endtime(disconnect_end_counter);
 589     event.set_disconnectedCount(disconnected);
 590     event.set_madeNonEntrantCount(made_not_entrant);
 591     event.commit();
 592   }
 593   _number_of_flushes++;
 594 
 595   // After two more traversals the sweeper will get rid of unrestored nmethods
 596   _was_full_traversal = _traversals;
 597 #ifdef ASSERT
 598 
 599   if(PrintMethodFlushing && Verbose) {
 600     tty->print_cr("### sweeper: unload time: " INT64_FORMAT, (jlong)disconnect_time);
 601   }