src/share/vm/runtime/sweeper.cpp
Index Unified diffs Context diffs Sdiffs Patch New Old Previous File Next File
*** old/src/share/vm/runtime/sweeper.cpp	Fri Nov  8 14:10:12 2013
--- new/src/share/vm/runtime/sweeper.cpp	Fri Nov  8 14:10:12 2013

*** 110,153 **** --- 110,155 ---- void NMethodSweeper::record_sweep(nmethod* nm, int line) { if (_records != NULL) { _records[_sweep_index].traversal = _traversals; _records[_sweep_index].traversal_mark = nm->_stack_traversal_mark; ! _records[_sweep_index].invocation = _invocations; ! _records[_sweep_index].invocation = _sweep_fractions_left; _records[_sweep_index].compile_id = nm->compile_id(); _records[_sweep_index].kind = nm->compile_kind(); _records[_sweep_index].state = nm->_state; _records[_sweep_index].vep = nm->verified_entry_point(); _records[_sweep_index].uep = nm->entry_point(); _records[_sweep_index].line = line; _sweep_index = (_sweep_index + 1) % SweeperLogEntries; } } #else #define SWEEP(nm) #endif nmethod* NMethodSweeper::_current = NULL; // Current nmethod ! long NMethodSweeper::_traversals = 0; // Nof. stack traversals performed int NMethodSweeper::_seen = 0; // Nof. nmethods we have currently processed in current pass of CodeCache ! long NMethodSweeper::_traversals = 0; // Stack scan count, also sweep ID. + long NMethodSweeper::_time_counter = 0; // Virtual time used to periodically invoke sweeper + long NMethodSweeper::_last_sweep = 0; // Value of _time_counter when the last sweep happened + int NMethodSweeper::_seen = 0; // Nof. nmethod we have currently processed in current pass of CodeCache int NMethodSweeper::_flushed_count = 0; // Nof. nmethods flushed in current sweep int NMethodSweeper::_zombified_count = 0; // Nof. nmethods made zombie in current sweep ! int NMethodSweeper::_marked_count = 0; // Nof. nmethods marked for reclaim in current sweep volatile int NMethodSweeper::_invocations = 0; // Nof. invocations left until we are completed with this pass volatile int NMethodSweeper::_sweep_started = 0; // Whether a sweep is in progress. ! int NMethodSweeper::_marked_for_reclamation_count = 0; // Nof. nmethods marked for reclaim in current sweep jint NMethodSweeper::_locked_seen = 0; jint NMethodSweeper::_not_entrant_seen_on_stack = 0; bool NMethodSweeper::_request_mark_phase = false; int NMethodSweeper::_total_nof_methods_reclaimed = 0; jlong NMethodSweeper::_total_time_sweeping = 0; jlong NMethodSweeper::_total_time_this_sweep = 0; jlong NMethodSweeper::_peak_sweep_time = 0; jlong NMethodSweeper::_peak_sweep_fraction_time = 0; + volatile bool NMethodSweeper::_should_sweep = true; // Indicates if we should invoke the sweeper + volatile int NMethodSweeper::_sweep_fractions_left = 0; // Nof. invocations left until we are completed with this pass + volatile int NMethodSweeper::_sweep_started = 0; // Flag to control conc sweeper + volatile int NMethodSweeper::_bytes_changed = 0; // Counts the total nmethod size if the nmethod changed from: + // 1) alive -> not_entrant + // 2) not_entrant -> zombie + // 3) zombie -> marked_for_reclamation + + int NMethodSweeper::_total_nof_methods_reclaimed = 0; // Accumulated nof methods flushed + jlong NMethodSweeper::_total_time_sweeping = 0; // Accumulated time sweeping + jlong NMethodSweeper::_total_time_this_sweep = 0; // Total time this sweep + jlong NMethodSweeper::_peak_sweep_time = 0; // Peak time for a full sweep + jlong NMethodSweeper::_peak_sweep_fraction_time = 0; // Peak time sweeping one fraction int NMethodSweeper::_hotness_counter_reset_val = 0; class MarkActivationClosure: public CodeBlobClosure { public:
*** 195,237 **** --- 197,272 ---- // to scan stacks if (!MethodFlushing) { return; } + // Increase time so that we can estimate when to invoke the sweeper again. + _time_counter++; + // Check for restart assert(CodeCache::find_blob_unsafe(_current) == _current, "Sweeper nmethod cached state invalid"); - if (!sweep_in_progress() && need_marking_phase()) { _seen = 0; ! _invocations = NmethodSweepFraction; ! _sweep_fractions_left = NmethodSweepFraction; _current = CodeCache::first_nmethod(); _traversals += 1; _total_time_this_sweep = 0; if (PrintMethodFlushing) { tty->print_cr("### Sweep: stack traversal %d", _traversals); } Threads::nmethods_do(&mark_activation_closure); // reset the flags since we started a scan from the beginning. reset_nmethod_marking(); _locked_seen = 0; _not_entrant_seen_on_stack = 0; } else { // Only set hotness counter Threads::nmethods_do(&set_hotness_closure); } OrderAccess::storestore(); } + /** + * This function invokes the sweeper if at least one of the three conditions is met: + * (1) The code cache is getting full + * (2) There are sufficient state changes in/since the last sweep. + * (3) We have not been sweeping for 'some time' + */ void NMethodSweeper::possibly_sweep() { assert(JavaThread::current()->thread_state() == _thread_in_vm, "must run in vm mode"); if (!MethodFlushing || !sweep_in_progress()) { return; } if (_invocations > 0) { + // If there was no state change while nmethod sweeping, 'should_sweep' will be false. + // This is one of the two places where should_sweep can be set to true. The general + // idea is as follows: If there is enough free space in the code cache, there is no + // need to invoke the sweeper. The following formula (which determines whether to invoke + // the sweeper or not) depends on the assumption that for larger ReservedCodeCacheSizes + // we need less frequent sweeps than for smaller ReservedCodecCacheSizes. Furthermore, + // the formula considers how much space in the code cache is currently used. Here are + // some examples that will (hopefully) help in understanding. + // + // Small ReservedCodeCacheSizes: (e.g., < 16M) We invoke the sweeper every time, since + // the result of the division is 0. This + // keeps the used code cache size small + // (important for embedded Java) + // Large ReservedCodeCacheSize : (e.g., 256M + code cache is 10% full). The formula + // computes: (256 / 16) - 1 = 15 + // As a result, we invoke the sweeper after + // 15 invocations of 'mark_active_nmethods. + // Large ReservedCodeCacheSize: (e.g., 256M + code Cache is 90% full). The formula + // computes: (256 / 16) - 10 = 6. + if (!_should_sweep) { + int time_since_last_sweep = _time_counter - _last_sweep; + int wait_until_next_sweep = (ReservedCodeCacheSize / (16 * M)) - time_since_last_sweep - + CodeCache::reverse_free_ratio(); + + if ((wait_until_next_sweep <= 0) || !CompileBroker::should_compile_new_jobs()) { + _should_sweep = true; + } + } + + if (_should_sweep && _sweep_fractions_left > 0) { // Only one thread at a time will sweep jint old = Atomic::cmpxchg( 1, &_sweep_started, 0 ); if (old != 0) { return; }
*** 240,282 **** --- 275,332 ---- // Create the ring buffer for the logging code _records = NEW_C_HEAP_ARRAY(SweeperRecord, SweeperLogEntries, mtGC); memset(_records, 0, sizeof(SweeperRecord) * SweeperLogEntries); } #endif if (_invocations > 0) { + + if (_sweep_fractions_left > 0) { sweep_code_cache(); ! _invocations--; ! _sweep_fractions_left--; + } + + // We are done with sweeping the code cache once. + if (_sweep_fractions_left == 0) { + _last_sweep = _time_counter; + // Reset flag; temporarily disables sweeper + _should_sweep = false; + // If there was enough state change, 'possibly_enable_sweeper()' + // sets '_should_sweep' to true + possibly_enable_sweeper(); + // Reset _bytes_changed only if there was enough state change. _bytes_changed + // can further increase by calls to 'report_state_change'. + if (_should_sweep) { + _bytes_changed = 0; + } } _sweep_started = 0; } } void NMethodSweeper::sweep_code_cache() { jlong sweep_start_counter = os::elapsed_counter(); _flushed_count = 0; _zombified_count = 0; ! _marked_count = 0; ! _marked_for_reclamation_count = 0; if (PrintMethodFlushing && Verbose) { ! tty->print_cr("### Sweep at %d out of %d. Invocations left: %d", _seen, CodeCache::nof_nmethods(), _invocations); ! tty->print_cr("### Sweep at %d out of %d. Invocations left: %d", _seen, CodeCache::nof_nmethods(), _sweep_fractions_left); } if (!CompileBroker::should_compile_new_jobs()) { // If we have turned off compilations we might as well do full sweeps // in order to reach the clean state faster. Otherwise the sleeping compiler // threads will slow down sweeping. ! _invocations = 1; ! _sweep_fractions_left = 1; } // We want to visit all nmethods after NmethodSweepFraction // invocations so divide the remaining number of nmethods by the // remaining number of invocations. This is only an estimate since // the number of nmethods changes during the sweep so the final // stage must iterate until it there are no more nmethods. ! int todo = (CodeCache::nof_nmethods() - _seen) / _invocations; ! int todo = (CodeCache::nof_nmethods() - _seen) / _sweep_fractions_left; int swept_count = 0; assert(!SafepointSynchronize::is_at_safepoint(), "should not be in safepoint when we get here"); assert(!CodeCache_lock->owned_by_self(), "just checking");
*** 284,298 **** --- 334,348 ---- int freed_memory = 0; { MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag); // The last invocation iterates until there are no more nmethods ! for (int i = 0; (i < todo || _invocations == 1) && _current != NULL; i++) { ! for (int i = 0; (i < todo || _sweep_fractions_left == 1) && _current != NULL; i++) { swept_count++; if (SafepointSynchronize::is_synchronizing()) { // Safepoint request if (PrintMethodFlushing && Verbose) { ! tty->print_cr("### Sweep at %d out of %d, invocation: %d, yielding to safepoint", _seen, CodeCache::nof_nmethods(), _invocations); ! tty->print_cr("### Sweep at %d out of %d, invocation: %d, yielding to safepoint", _seen, CodeCache::nof_nmethods(), _sweep_fractions_left); } MutexUnlockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag); assert(Thread::current()->is_Java_thread(), "should be java thread"); JavaThread* thread = (JavaThread*)Thread::current();
*** 312,334 **** --- 362,372 ---- _seen++; _current = next; } } ! assert(_invocations > 1 || _current == NULL, "must have scanned the whole cache"); if (!sweep_in_progress() && !need_marking_phase() && (_locked_seen || _not_entrant_seen_on_stack)) { // we've completed a scan without making progress but there were // nmethods we were unable to process either because they were // locked or were still on stack. We don't have to aggressively // clean them up so just stop scanning. We could scan once more // but that complicates the control logic and it's unlikely to // matter much. if (PrintMethodFlushing) { tty->print_cr("### Couldn't make progress on some nmethods so stopping sweep"); } } ! assert(_sweep_fractions_left > 1 || _current == NULL, "must have scanned the whole cache"); jlong sweep_end_counter = os::elapsed_counter(); jlong sweep_time = sweep_end_counter - sweep_start_counter; _total_time_sweeping += sweep_time; _total_time_this_sweep += sweep_time;
*** 338,362 **** --- 376,400 ---- EventSweepCodeCache event(UNTIMED); if (event.should_commit()) { event.set_starttime(sweep_start_counter); event.set_endtime(sweep_end_counter); event.set_sweepIndex(_traversals); ! event.set_sweepFractionIndex(NmethodSweepFraction - _invocations + 1); ! event.set_sweepFractionIndex(NmethodSweepFraction - _sweep_fractions_left + 1); event.set_sweptCount(swept_count); event.set_flushedCount(_flushed_count); ! event.set_markedCount(_marked_count); ! event.set_markedCount(_marked_for_reclamation_count); event.set_zombifiedCount(_zombified_count); event.commit(); } #ifdef ASSERT if(PrintMethodFlushing) { ! tty->print_cr("### sweeper: sweep time(%d): " INT64_FORMAT, _invocations, (jlong)sweep_time); ! tty->print_cr("### sweeper: sweep time(%d): " INT64_FORMAT, _sweep_fractions_left, (jlong)sweep_time); } #endif ! if (_invocations == 1) { ! if (_sweep_fractions_left == 1) { _peak_sweep_time = MAX2(_peak_sweep_time, _total_time_this_sweep); log_sweep("finished"); } // Sweeper is the only case where memory is released, check here if it
*** 366,378 **** --- 404,444 ---- // cases when compilation was disabled although there is 4MB (or more) free // memory in the code cache. The reason is code cache fragmentation. Therefore, // it only makes sense to re-enable compilation if we have actually freed memory. // Note that typically several kB are released for sweeping 16MB of the code // cache. As a result, 'freed_memory' > 0 to restart the compiler. ! if (UseCodeCacheFlushing && (!CompileBroker::should_compile_new_jobs() && (freed_memory > 0))) { CompileBroker::set_should_compile_new_jobs(CompileBroker::run_compilation); log_sweep("restart_compiler"); + if (CompileBroker::should_print_compiler_warning()) { + warning("Freed %d bytes from code cache. Compiler has been enabled.", freed_memory); + } + } + } + + /** + * This function updates the sweeper statistics that keep track of nmethods + * state changes. If there is 'enough' state change, the sweeper is invoked + * as soon as possible. There can be data races on _bytes_changed. The data + * races are benign, since it does not matter if we loose a couple of bytes. + * In the worst case we call the sweeper a little later. Also, we are guaranteed + * to invoke the sweeper if the code cache gets full. + */ + void NMethodSweeper::report_state_change(nmethod* nm) { + _bytes_changed += nm->total_size(); + possibly_enable_sweeper(); + } + + /** + * Function determines if there was 'enough' state change in the code cache to invoke + * the sweeper again. Currently, we determine 'enough' as more than 1% state change in + * the code cache since the last sweep. + */ + void NMethodSweeper::possibly_enable_sweeper() { + double percent_changed = ((double)_bytes_changed / (double)ReservedCodeCacheSize) * 100; + if (percent_changed > 1.0) { + _should_sweep = true; } } class NMethodMarker: public StackObj { private:
*** 422,434 **** --- 488,497 ---- if (nm->is_alive()) { // Clean inline caches that point to zombie/non-entrant methods MutexLocker cl(CompiledIC_lock); nm->cleanup_inline_caches(); SWEEP(nm); } else { _locked_seen++; SWEEP(nm); } return freed_memory; } if (nm->is_zombie()) {
*** 446,478 **** --- 509,538 ---- } else { if (PrintMethodFlushing && Verbose) { tty->print_cr("### Nmethod %3d/" PTR_FORMAT " (zombie) being marked for reclamation", nm->compile_id(), nm); } nm->mark_for_reclamation(); request_nmethod_marking(); ! _marked_count++; + // Keep track of code cache state change ! _bytes_changed += nm->total_size(); + _marked_for_reclamation_count++; SWEEP(nm); } } else if (nm->is_not_entrant()) { // If there are no current activations of this method on the // stack we can safely convert it to a zombie method if (nm->can_not_entrant_be_converted()) { if (PrintMethodFlushing && Verbose) { tty->print_cr("### Nmethod %3d/" PTR_FORMAT " (not entrant) being made zombie", nm->compile_id(), nm); } + // Code cache state change is tracked in make_zombie() nm->make_zombie(); request_nmethod_marking(); _zombified_count++; SWEEP(nm); } else { // Still alive, clean up its inline caches MutexLocker cl(CompiledIC_lock); nm->cleanup_inline_caches(); // we coudn't transition this nmethod so don't immediately // request a rescan. If this method stays on the stack for a // long time we don't want to keep rescanning the code cache. _not_entrant_seen_on_stack++; SWEEP(nm); } } else if (nm->is_unloaded()) { // Unloaded code, just make it a zombie if (PrintMethodFlushing && Verbose) {
*** 483,494 **** --- 543,554 ---- // No inline caches will ever point to osr methods, so we can just remove it freed_memory = nm->total_size(); release_nmethod(nm); _flushed_count++; } else { + // Code cache state change is tracked in make_zombie() nm->make_zombie(); request_nmethod_marking(); _zombified_count++; SWEEP(nm); } } else { if (UseCodeCacheFlushing) {
*** 512,522 **** --- 572,586 ---- // The second condition is necessary if we are dealing with very small code cache // sizes (e.g., <10m) and the code cache size is too small to hold all hot methods. // The second condition ensures that methods are not immediately made not-entrant // after compilation. nm->make_not_entrant(); request_nmethod_marking(); + // Code cache state change is tracked in make_not_entrant() + if (PrintMethodFlushing && Verbose) { + tty->print_cr("### Nmethod %d/" PTR_FORMAT "made not-entrant: hotness counter %d/%d threshold %f", + nm->compile_id(), nm, nm->hotness_counter(), reset_val, threshold); + } } } } // Clean-up all inline caches that point to zombie/non-reentrant methods MutexLocker cl(CompiledIC_lock);

src/share/vm/runtime/sweeper.cpp
Index Unified diffs Context diffs Sdiffs Patch New Old Previous File Next File