src/share/vm/runtime/sweeper.cpp
Print this page
rev 5099 : dummy
rev 5100 : dummy
@@ -141,12 +141,10 @@
jint NMethodSweeper::_locked_seen = 0;
jint NMethodSweeper::_not_entrant_seen_on_stack = 0;
bool NMethodSweeper::_resweep = false;
jint NMethodSweeper::_flush_token = 0;
jlong NMethodSweeper::_last_full_flush_time = 0;
-int NMethodSweeper::_highest_marked = 0;
-int NMethodSweeper::_dead_compile_ids = 0;
long NMethodSweeper::_last_flush_traversal_id = 0;
int NMethodSweeper::_number_of_flushes = 0; // Total of full traversals caused by full cache
int NMethodSweeper::_total_nof_methods_reclaimed = 0;
jlong NMethodSweeper::_total_time_sweeping = 0;
@@ -157,13 +155,17 @@
jlong NMethodSweeper::_peak_disconnect_time = 0;
class MarkActivationClosure: public CodeBlobClosure {
public:
virtual void do_code_blob(CodeBlob* cb) {
+ if (cb->is_nmethod()) {
+ nmethod* nm = (nmethod*)cb;
+ nm->set_hotness_counter(NMethodSweeper::hc_reset_value);
// If we see an activation belonging to a non_entrant nmethod, we mark it.
- if (cb->is_nmethod() && ((nmethod*)cb)->is_not_entrant()) {
- ((nmethod*)cb)->mark_as_seen_on_stack();
+ if (nm->is_not_entrant()) {
+ nm->mark_as_seen_on_stack();
+ }
}
}
};
static MarkActivationClosure mark_activation_closure;
@@ -307,11 +309,11 @@
assert(_invocations > 1 || _current == NULL, "must have scanned the whole cache");
if (!sweep_in_progress() && !_resweep && (_locked_seen || _not_entrant_seen_on_stack)) {
// we've completed a scan without making progress but there were
// nmethods we were unable to process either because they were
- // locked or were still on stack. We don't have to aggresively
+ // locked or were still on stack. We don't have to aggressively
// clean them up so just stop scanning. We could scan once more
// but that complicates the control logic and it's unlikely to
// matter much.
if (PrintMethodFlushing) {
tty->print_cr("### Couldn't make progress on some nmethods so stopping sweep");
@@ -390,22 +392,22 @@
nm->flush();
}
void NMethodSweeper::process_nmethod(nmethod *nm) {
assert(!CodeCache_lock->owned_by_self(), "just checking");
-
// Make sure this nmethod doesn't get unloaded during the scan,
- // since the locks acquired below might safepoint.
+ // since the locks acquired might below the safepoint.
NMethodMarker nmm(nm);
+ nm->dec_hotness_counter(NMethodSweeper::hc_dec_value);
SWEEP(nm);
// Skip methods that are currently referenced by the VM
if (nm->is_locked_by_vm()) {
// But still remember to clean-up inline caches for alive nmethods
if (nm->is_alive()) {
- // Clean-up all inline caches that points to zombie/non-reentrant methods
+ // Clean-up all inline caches that point to zombie/non-reentrant methods
MutexLocker cl(CompiledIC_lock);
nm->cleanup_inline_caches();
SWEEP(nm);
} else {
_locked_seen++;
@@ -413,12 +415,12 @@
}
return;
}
if (nm->is_zombie()) {
- // If it is first time, we see nmethod then we mark it. Otherwise,
- // we reclame it. When we have seen a zombie method twice, we know that
+ // If it is the first time we see nmethod then we mark it. Otherwise,
+ // we reclaim it. When we have seen a zombie method twice, we know that
// there are no inline caches that refer to it.
if (nm->is_marked_for_reclamation()) {
assert(!nm->is_locked_by_vm(), "must not flush locked nmethods");
if (PrintMethodFlushing && Verbose) {
tty->print_cr("### Nmethod %3d/" PTR_FORMAT " (marked for reclamation) being flushed", nm->compile_id(), nm);
@@ -433,11 +435,11 @@
_resweep = true;
_marked_count++;
SWEEP(nm);
}
} else if (nm->is_not_entrant()) {
- // If there is no current activations of this method on the
+ // If there are no current activations of this method on the
// stack we can safely convert it to a zombie method
if (nm->can_not_entrant_be_converted()) {
if (PrintMethodFlushing && Verbose) {
tty->print_cr("### Nmethod %3d/" PTR_FORMAT " (not entrant) being made zombie", nm->compile_id(), nm);
}
@@ -471,30 +473,35 @@
_zombified_count++;
SWEEP(nm);
}
} else {
assert(nm->is_alive(), "should be alive");
-
if (UseCodeCacheFlushing) {
- if (nm->is_speculatively_disconnected() && !nm->is_locked_by_vm() && !nm->is_osr_method() &&
- (_traversals > _last_flush_traversal_id + 2) && (nm->compile_id() < _highest_marked)) {
+ if (!nm->is_locked_by_vm() && !nm->is_osr_method()) {
+ if (!nm->is_speculatively_disconnected()) {
+ // This method is cold and the code cache fills up => get rid of it.
+ double threshold = -100 + (CodeCache::reverse_free_ratio() * NmethodSweepActivity);
+ if (nm->get_hotness_counter() < threshold) {
+ nm->make_not_entrant();
+ }
+ } else if (nm->is_speculatively_disconnected() && (_traversals > _last_flush_traversal_id + 2)) {
// This method has not been called since the forced cleanup happened
nm->make_not_entrant();
}
}
-
+ }
// Clean-up all inline caches that points to zombie/non-reentrant methods
MutexLocker cl(CompiledIC_lock);
nm->cleanup_inline_caches();
SWEEP(nm);
}
}
// Code cache unloading: when compilers notice the code cache is getting full,
// they will call a vm op that comes here. This code attempts to speculatively
-// unload the oldest half of the nmethods (based on the compile job id) by
-// saving the old code in a list in the CodeCache. Then
+// unload the coldest part (the part is defined by CodeCacheFlushingFraction) of
+// the nmethods by saving the cold code in a list in the CodeCache. Then
// execution resumes. If a method so marked is not called by the second sweeper
// stack traversal after the current one, the nmethod will be marked non-entrant and
// got rid of by normal sweeping. If the method is called, the Method*'s
// _code field is restored and the Method*/nmethod
// go back to their normal state.
@@ -520,61 +527,75 @@
// resweep again as soon as possible
_resweep = true;
}
+int NMethodSweeper::sort_nmentod_by_hotness(nmethod** nm1, nmethod** nm2) {
+ return ((*(nm1))->get_hotness_counter() > (*nm2)->get_hotness_counter());
+}
+
void NMethodSweeper::speculative_disconnect_nmethods(bool is_full) {
// If there was a race in detecting full code cache, only run
// one vm op for it or keep the compiler shut off
-
jlong disconnect_start_counter = os::elapsed_counter();
- // Traverse the code cache trying to dump the oldest nmethods
- int curr_max_comp_id = CompileBroker::get_compilation_id();
- int flush_target = ((curr_max_comp_id - _dead_compile_ids) / CodeCacheFlushingFraction) + _dead_compile_ids;
-
- log_sweep("start_cleaning");
-
- nmethod* nm = CodeCache::alive_nmethod(CodeCache::first());
jint disconnected = 0;
jint made_not_entrant = 0;
jint nmethod_count = 0;
- while ((nm != NULL)){
- int curr_comp_id = nm->compile_id();
+ log_sweep("start_cleaning");
- // OSR methods cannot be flushed like this. Also, don't flush native methods
- // since they are part of the JDK in most cases
- if (!nm->is_osr_method() && !nm->is_locked_by_vm() && !nm->is_native_method()) {
+ {
+ ResourceMark rm;
+ GrowableArray<nmethod*>* live_methods= new GrowableArray<nmethod*>();
+ nmethod* nm = CodeCache::next_nmethod(CodeCache::first());
- // only count methods that can be speculatively disconnected
- nmethod_count++;
+ size_t methods_to_flush = CodeCache::nof_nmethods() / CodeCacheFlushingFraction;
+ size_t methods_will_be_flushed = 0;
+ size_t nmethods = 0;
- if (nm->is_in_use() && (curr_comp_id < flush_target)) {
- if ((nm->method()->code() == nm)) {
- // This method has not been previously considered for
- // unloading or it was restored already
- CodeCache::speculatively_disconnect(nm);
- disconnected++;
+ // See how many methods are 'in flight' of being flushed
+ while ((nm != NULL) && (methods_will_be_flushed < methods_to_flush)) {
+ // OSR methods cannot be flushed like this. Also, don't flush native methods
+ // since they are part of the JDK in most cases
+ if (!nm->is_osr_method() && !nm->is_locked_by_vm() && !nm->is_native_method() && nm->is_alive()) {
+ if ((nm->is_in_use()) && (nm->method()->code() == nm)) {
+ live_methods->append(nm);
} else if (nm->is_speculatively_disconnected()) {
// This method was previously considered for preemptive unloading and was not called since then
CompilationPolicy::policy()->delay_compilation(nm->method());
nm->make_not_entrant();
made_not_entrant++;
+ methods_will_be_flushed++;
+ } else {
+ methods_will_be_flushed++;
+ }
+ }
+ nm = CodeCache::next_nmethod(nm);
}
- if (curr_comp_id > _highest_marked) {
- _highest_marked = curr_comp_id;
+ // Speculatively disconnect methods until we reach 'memory_to_flush'
+ if (methods_will_be_flushed < methods_to_flush) {
+ live_methods->sort(sort_nmentod_by_hotness);
+ //Iterate over sorted array and speculatively disconnect these nmethods
+ for (int i = 0; i < live_methods->length(); i++) {
+ nm = live_methods->at(i);
+ if (methods_will_be_flushed < methods_to_flush) {
+ // Method was not previously disconnected
+ if ((nm->method()->code() == nm)) {
+ CodeCache::speculatively_disconnect(nm);
+ disconnected++;
+ methods_will_be_flushed++;
+ }
+ } else {
+ // The requested number of nmethods is scheduled for flushing
+ break;
}
}
}
- nm = CodeCache::alive_nmethod(CodeCache::next(nm));
}
- // remember how many compile_ids wheren't seen last flush.
- _dead_compile_ids = curr_max_comp_id - nmethod_count;
-
log_sweep("stop_cleaning",
"disconnected='" UINT32_FORMAT "' made_not_entrant='" UINT32_FORMAT "'",
disconnected, made_not_entrant);
// Shut off compiler. Sweeper will start over with a new stack scan and