--- old/src/share/vm/code/nmethod.cpp 2013-09-04 12:24:47.383238619 +0200 +++ new/src/share/vm/code/nmethod.cpp 2013-09-04 12:24:47.327238621 +0200 @@ -686,6 +686,7 @@ _osr_entry_point = NULL; _exception_cache = NULL; _pc_desc_cache.reset_to(NULL); + _hotness_counter = (ReservedCodeCacheSize / M) * 2; code_buffer->copy_values_to(this); if (ScavengeRootsInCode && detect_scavenge_root_oops()) { @@ -770,6 +771,7 @@ _osr_entry_point = NULL; _exception_cache = NULL; _pc_desc_cache.reset_to(NULL); + _hotness_counter = (ReservedCodeCacheSize / M) * 2; code_buffer->copy_values_to(this); debug_only(verify_scavenge_root_oops()); @@ -842,6 +844,7 @@ _comp_level = comp_level; _compiler = compiler; _orig_pc_offset = orig_pc_offset; + _hotness_counter = (ReservedCodeCacheSize / M) * 2; // Section offsets _consts_offset = content_offset() + code_buffer->total_offset_of(code_buffer->consts()); @@ -1261,7 +1264,7 @@ set_osr_link(NULL); //set_scavenge_root_link(NULL); // done by prune_scavenge_root_nmethods - NMethodSweeper::notify(this); + NMethodSweeper::notify(); } void nmethod::invalidate_osr_method() { @@ -1416,7 +1419,7 @@ } // Make sweeper aware that there is a zombie method that needs to be removed - NMethodSweeper::notify(this); + NMethodSweeper::notify(); return true; } --- old/src/share/vm/code/nmethod.hpp 2013-09-04 12:24:47.631238609 +0200 +++ new/src/share/vm/code/nmethod.hpp 2013-09-04 12:24:47.579238611 +0200 @@ -202,11 +202,18 @@ // not_entrant method removal. Each mark_sweep pass will update // this mark to current sweep invocation count if it is seen on the - // stack. An not_entrant method can be removed when there is no + // stack. An not_entrant method can be removed when there are no // more activations, i.e., when the _stack_traversal_mark is less than // current sweep traversal index. long _stack_traversal_mark; + // The _hotness_counter indicates the hotness of a method. The higher + // the value the hotter the method. The hotness counter of a nmethod is + // set to [(ReservedCodeCacheSize / (1024 * 1024)) * 2] each time the method + // is active while stack scanning (mark_active_nmethods()). The hotness + // counter is decreased (by 1) while sweeping. + int _hotness_counter; + ExceptionCache *_exception_cache; PcDescCache _pc_desc_cache; @@ -382,6 +389,11 @@ int total_size () const; + // Hotness counter accessor methods + void dec_hotness_counter(int val) { _hotness_counter-= val; } + void set_hotness_counter(int val) { _hotness_counter = val; } + int get_hotness_counter() const { return _hotness_counter; } + // Containment bool consts_contains (address addr) const { return consts_begin () <= addr && addr < consts_end (); } bool insts_contains (address addr) const { return insts_begin () <= addr && addr < insts_end (); } --- old/src/share/vm/runtime/arguments.cpp 2013-09-04 12:24:47.863238601 +0200 +++ new/src/share/vm/runtime/arguments.cpp 2013-09-04 12:24:47.807238603 +0200 @@ -1126,6 +1126,16 @@ Tier3InvokeNotifyFreqLog = 0; Tier4InvocationThreshold = 0; } + if (FLAG_IS_DEFAULT(NmethodSweepFraction)) { + // Make sure that NmethodSweepFraction is between 1 and 16. + if (ReservedCodeCacheSize < (16 * M)) { + FLAG_SET_DEFAULT(NmethodSweepFraction, 1); + } else if (ReservedCodeCacheSize > (256 * M)) { + FLAG_SET_DEFAULT(NmethodSweepFraction, 16); + } else { + FLAG_SET_DEFAULT(NmethodSweepFraction, ReservedCodeCacheSize / (16 * M)); + } + } } #if INCLUDE_ALL_GCS --- old/src/share/vm/runtime/globals.hpp 2013-09-04 12:24:48.079238592 +0200 +++ new/src/share/vm/runtime/globals.hpp 2013-09-04 12:24:48.027238594 +0200 @@ -2826,6 +2826,9 @@ product(intx, NmethodSweepCheckInterval, 5, \ "Compilers wake up every n seconds to possibly sweep nmethods") \ \ + product(intx, NmethodSweepActivity, 10, \ + "Higher values result in more aggressive sweeping") \ + \ notproduct(bool, LogSweeper, false, \ "Keep a ring buffer of sweeper activity") \ \ @@ -3203,8 +3206,8 @@ product(uintx, CodeCacheFlushingMinimumFreeSpace, 1500*K, \ "When less than X space left, start code cache cleaning") \ \ - product(uintx, CodeCacheFlushingFraction, 2, \ - "Fraction of the code cache that is flushed when full") \ + product(uintx, CodeCacheFlushingMinimumPercentage, 0, \ + "Minimum percentage of the code cache that will be flushed") \ \ /* interpreter debugging */ \ develop(intx, BinarySwitchThreshold, 5, \ --- old/src/share/vm/runtime/safepoint.cpp 2013-09-04 12:24:48.339238583 +0200 +++ new/src/share/vm/runtime/safepoint.cpp 2013-09-04 12:24:48.271238585 +0200 @@ -519,8 +519,8 @@ } { - TraceTime t4("sweeping nmethods", TraceSafepointCleanupTime); - NMethodSweeper::scan_stacks(); + TraceTime t4("mark nmethods", TraceSafepointCleanupTime); + NMethodSweeper::mark_active_nmethods(); } if (SymbolTable::needs_rehashing()) { --- old/src/share/vm/runtime/sweeper.cpp 2013-09-04 12:24:48.559238574 +0200 +++ new/src/share/vm/runtime/sweeper.cpp 2013-09-04 12:24:48.487238577 +0200 @@ -140,11 +140,9 @@ jint NMethodSweeper::_locked_seen = 0; jint NMethodSweeper::_not_entrant_seen_on_stack = 0; -bool NMethodSweeper::_resweep = false; +bool NMethodSweeper::_request_mark_phase = false; jint NMethodSweeper::_flush_token = 0; jlong NMethodSweeper::_last_full_flush_time = 0; -int NMethodSweeper::_highest_marked = 0; -int NMethodSweeper::_dead_compile_ids = 0; long NMethodSweeper::_last_flush_traversal_id = 0; int NMethodSweeper::_number_of_flushes = 0; // Total of full traversals caused by full cache @@ -155,36 +153,66 @@ jlong NMethodSweeper::_peak_sweep_fraction_time = 0; jlong NMethodSweeper::_total_disconnect_time = 0; jlong NMethodSweeper::_peak_disconnect_time = 0; +int NMethodSweeper::_hotness_counter_reset_val = 0; + +enum {hotness_counter_decay = 1 }; class MarkActivationClosure: public CodeBlobClosure { public: virtual void do_code_blob(CodeBlob* cb) { - // If we see an activation belonging to a non_entrant nmethod, we mark it. - if (cb->is_nmethod() && ((nmethod*)cb)->is_not_entrant()) { - ((nmethod*)cb)->mark_as_seen_on_stack(); + if (cb->is_nmethod()) { + nmethod* nm = (nmethod*)cb; + nm->set_hotness_counter(NMethodSweeper::get_hotness_counter_reset_val()); + // If we see an activation belonging to a non_entrant nmethod, we mark it. + if (nm->is_not_entrant()) { + nm->mark_as_seen_on_stack(); + } } } }; static MarkActivationClosure mark_activation_closure; +class SetHotnessClosure: public CodeBlobClosure { +public: + virtual void do_code_blob(CodeBlob* cb) { + if (cb->is_nmethod()) { + nmethod* nm = (nmethod*)cb; + nm->set_hotness_counter(NMethodSweeper::get_hotness_counter_reset_val()); + } + } +}; +static SetHotnessClosure set_hotness_closure; + + +int NMethodSweeper::get_hotness_counter_reset_val() { + if (_hotness_counter_reset_val == 0) { + _hotness_counter_reset_val = (ReservedCodeCacheSize / M) * 2; + } + return _hotness_counter_reset_val; +} bool NMethodSweeper::sweep_in_progress() { return (_current != NULL); } -void NMethodSweeper::scan_stacks() { +// Scans the stacks of all Java threads and marks activations of not-entrant methods. +// No need to synchronize access, since 'mark_active_nmethods' is always executed at a +// safepoint. +void NMethodSweeper::mark_active_nmethods() { assert(SafepointSynchronize::is_at_safepoint(), "must be executed at a safepoint"); - if (!MethodFlushing) return; - - // No need to synchronize access, since this is always executed at a - // safepoint. + // If we do not want to reclaim not-entrant or zombie methods there is no need + // to scan stacks + if (!MethodFlushing) { + return; + } // Make sure CompiledIC_lock in unlocked, since we might update some // inline caches. If it is, we just bail-out and try later. if (CompiledIC_lock->is_locked() || Patching_lock->is_locked()) return; + // Check for restart assert(CodeCache::find_blob_unsafe(_current) == _current, "Sweeper nmethod cached state invalid"); - if (!sweep_in_progress() && _resweep) { + if (!sweep_in_progress() && need_marking_phase()) { _seen = 0; _invocations = NmethodSweepFraction; _current = CodeCache::first_nmethod(); @@ -197,30 +225,30 @@ Threads::nmethods_do(&mark_activation_closure); // reset the flags since we started a scan from the beginning. - _resweep = false; + reset_nmethod_marking(); _locked_seen = 0; _not_entrant_seen_on_stack = 0; + // Only set hotness counter + } else { + Threads::nmethods_do(&set_hotness_closure); } if (UseCodeCacheFlushing) { - // only allow new flushes after the interval is complete. + // Only allow new flushes after the interval is complete. jlong now = os::javaTimeMillis(); jlong max_interval = (jlong)MinCodeCacheFlushingInterval * (jlong)1000; jlong curr_interval = now - _last_full_flush_time; if (curr_interval > max_interval) { _flush_token = 0; } - - if (!CodeCache::needs_flushing() && !CompileBroker::should_compile_new_jobs()) { - CompileBroker::set_should_compile_new_jobs(CompileBroker::run_compilation); - log_sweep("restart_compiler"); - } } } void NMethodSweeper::possibly_sweep() { assert(JavaThread::current()->thread_state() == _thread_in_vm, "must run in vm mode"); - if (!MethodFlushing || !sweep_in_progress()) return; + if (!MethodFlushing || !sweep_in_progress()) { + return; + } if (_invocations > 0) { // Only one thread at a time will sweep @@ -258,8 +286,7 @@ if (!CompileBroker::should_compile_new_jobs()) { // If we have turned off compilations we might as well do full sweeps // in order to reach the clean state faster. Otherwise the sleeping compiler - // threads will slow down sweeping. After a few iterations the cache - // will be clean and sweeping stops (_resweep will not be set) + // threads will slow down sweeping. _invocations = 1; } @@ -269,7 +296,6 @@ // the number of nmethods changes during the sweep so the final // stage must iterate until it there are no more nmethods. int todo = (CodeCache::nof_nmethods() - _seen) / _invocations; - assert(!SafepointSynchronize::is_at_safepoint(), "should not be in safepoint when we get here"); assert(!CodeCache_lock->owned_by_self(), "just checking"); @@ -306,11 +332,11 @@ assert(_invocations > 1 || _current == NULL, "must have scanned the whole cache"); - if (!sweep_in_progress() && !_resweep && (_locked_seen || _not_entrant_seen_on_stack)) { + if (!sweep_in_progress() && !need_marking_phase() && (_locked_seen || _not_entrant_seen_on_stack)) { // we've completed a scan without making progress but there were // nmethods we were unable to process either because they were - // locked or were still on stack. We don't have to aggresively - // clean them up so just stop scanning. We could scan once more + // locked or were still on stack. We don't have to aggressively + // clean them up so just stop scanning. We could scan once more // but that complicates the control logic and it's unlikely to // matter much. if (PrintMethodFlushing) { @@ -365,8 +391,8 @@ _thread = CompilerThread::current(); if (!nm->is_zombie() && !nm->is_unloaded()) { // Only expose live nmethods for scanning - _thread->set_scanned_nmethod(nm); - } + _thread->set_scanned_nmethod(nm); + } } ~NMethodMarker() { _thread->set_scanned_nmethod(NULL); @@ -392,18 +418,16 @@ void NMethodSweeper::process_nmethod(nmethod *nm) { assert(!CodeCache_lock->owned_by_self(), "just checking"); - // Make sure this nmethod doesn't get unloaded during the scan, - // since the locks acquired below might safepoint. + // since the locks acquired might below the safepoint. NMethodMarker nmm(nm); - SWEEP(nm); // Skip methods that are currently referenced by the VM if (nm->is_locked_by_vm()) { // But still remember to clean-up inline caches for alive nmethods if (nm->is_alive()) { - // Clean-up all inline caches that points to zombie/non-reentrant methods + // Clean-up all inline caches that point to zombie/non-reentrant methods MutexLocker cl(CompiledIC_lock); nm->cleanup_inline_caches(); SWEEP(nm); @@ -415,8 +439,8 @@ } if (nm->is_zombie()) { - // If it is first time, we see nmethod then we mark it. Otherwise, - // we reclame it. When we have seen a zombie method twice, we know that + // If it is the first time we see nmethod then we mark it. Otherwise, + // we reclaim it. When we have seen a zombie method twice, we know that // there are no inline caches that refer to it. if (nm->is_marked_for_reclamation()) { assert(!nm->is_locked_by_vm(), "must not flush locked nmethods"); @@ -430,19 +454,19 @@ tty->print_cr("### Nmethod %3d/" PTR_FORMAT " (zombie) being marked for reclamation", nm->compile_id(), nm); } nm->mark_for_reclamation(); - _resweep = true; + request_nmethod_marking(); _marked_count++; SWEEP(nm); } } else if (nm->is_not_entrant()) { - // If there is no current activations of this method on the + // If there are no current activations of this method on the // stack we can safely convert it to a zombie method if (nm->can_not_entrant_be_converted()) { if (PrintMethodFlushing && Verbose) { tty->print_cr("### Nmethod %3d/" PTR_FORMAT " (not entrant) being made zombie", nm->compile_id(), nm); } nm->make_zombie(); - _resweep = true; + request_nmethod_marking(); _zombified_count++; SWEEP(nm); } else { @@ -467,22 +491,41 @@ _flushed_count++; } else { nm->make_zombie(); - _resweep = true; + request_nmethod_marking(); _zombified_count++; SWEEP(nm); } } else { - assert(nm->is_alive(), "should be alive"); - if (UseCodeCacheFlushing) { - if (nm->is_speculatively_disconnected() && !nm->is_locked_by_vm() && !nm->is_osr_method() && - (_traversals > _last_flush_traversal_id + 2) && (nm->compile_id() < _highest_marked)) { - // This method has not been called since the forced cleanup happened - nm->make_not_entrant(); + if (!nm->is_locked_by_vm() && !nm->is_osr_method()) { + // Do not make native methods and OSR-methods not-entrant + if (!nm->is_speculatively_disconnected() && !nm->is_native_method() && !nm->is_osr_method()) { + nm->dec_hotness_counter(hotness_counter_decay); + // This method is cold and the code cache fills up => get rid of it. + int reset_val = get_hotness_counter_reset_val(); + int time_since_reset = reset_val - nm->get_hotness_counter(); + double threshold = -reset_val + (CodeCache::reverse_free_ratio() * NmethodSweepActivity); + // A method is marked as not-entrance if the method is + // 1) 'old enough': nm->get_hotness_counter() < threshold + // 2) The method was in_use for a minimum amount of time: (time_since_reset > 10) + // The second condition is necessary if we are dealing with very small code cache + // sizes (e.g., <10m) and the code cache size is too small to hold all hot methods. + // The second condition ensures that methods are not immediately made not-entrant + // just after compilation. + if ((nm->get_hotness_counter() < threshold) && (time_since_reset > 10)) { + nm->make_not_entrant(); + nm->set_hotness_counter(-reset_val); + request_nmethod_marking(); + } + } else if (nm->is_speculatively_disconnected() && (_traversals > _last_flush_traversal_id + 2)) { + // This method has not been called since the forced cleanup happened + nm->make_not_entrant(); + nm->set_hotness_counter(-get_hotness_counter_reset_val()); + request_nmethod_marking(); + } } } - - // Clean-up all inline caches that points to zombie/non-reentrant methods + // Clean-up all inline caches that point to zombie/non-reentrant methods MutexLocker cl(CompiledIC_lock); nm->cleanup_inline_caches(); SWEEP(nm); @@ -491,13 +534,12 @@ // Code cache unloading: when compilers notice the code cache is getting full, // they will call a vm op that comes here. This code attempts to speculatively -// unload the oldest half of the nmethods (based on the compile job id) by -// saving the old code in a list in the CodeCache. Then -// execution resumes. If a method so marked is not called by the second sweeper -// stack traversal after the current one, the nmethod will be marked non-entrant and -// got rid of by normal sweeping. If the method is called, the Method*'s -// _code field is restored and the Method*/nmethod -// go back to their normal state. +// unload the coldest part of the nmethods by saving the cold code in a list in +// the CodeCache. Then execution resumes. If a method so marked is not called by +// the second sweeper stack traversal after the current one, the nmethod will be +// marked non-entrant and got rid of by normal sweeping. If the method is called, +// the Method*'s _code field is restored and the Method*/nmethod go back to their +// normal state. void NMethodSweeper::handle_full_code_cache(bool is_full) { if (is_full) { @@ -508,7 +550,7 @@ } // Make sure only one thread can flush - // The token is reset after CodeCacheMinimumFlushInterval in scan stacks, + // The token is reset after MinCodeCacheFlushingInterval in scan stacks, // no need to check the timeout here. jint old = Atomic::cmpxchg( 1, &_flush_token, 0 ); if (old != 0) { @@ -518,64 +560,156 @@ VM_HandleFullCodeCache op(is_full); VMThread::execute(&op); - // resweep again as soon as possible - _resweep = true; + // Do marking as soon as possible + request_nmethod_marking(); +} + +int NMethodSweeper::sort_nmethods_by_hotness(nmethod** nm1, nmethod** nm2) { + return ((*(nm1))->get_hotness_counter() > (*nm2)->get_hotness_counter()); } +class NMethodBlock : public CHeapObj { + private: + GrowableArray* _nmethods; + int _block_size; + double _hotness; + + public: + NMethodBlock() { + _nmethods = new GrowableArray(); + _hotness = 0; + _block_size = 0; + } + + void append(nmethod* nm) { + _nmethods->append(nm); + _block_size += nm->total_size(); + } + + int get_length() const { + return _nmethods->length(); + } + + int get_size_in_bytes() const { + return _block_size; + } + + nmethod* at(int i) const { + return _nmethods->at(i); + } + + // Computes the average hotness of a nmethod block + void computer_hotness() { + if (_block_size > 0) { + for (int i = 0; i < _nmethods->length(); i++) { + nmethod* nm = _nmethods->at(i); + _hotness += nm->total_size() * nm->get_hotness_counter(); + } + _hotness /= get_size_in_bytes(); + } + } + + double get_hotness() { + computer_hotness(); + return _hotness; + } +}; + +static int sort_nmethod_blocks_by_hotness(NMethodBlock** b1, NMethodBlock** b2) { + return ((*(b1))->get_hotness() > (*b2)->get_hotness()); +} + + void NMethodSweeper::speculative_disconnect_nmethods(bool is_full) { // If there was a race in detecting full code cache, only run // one vm op for it or keep the compiler shut off - jlong disconnect_start_counter = os::elapsed_counter(); - // Traverse the code cache trying to dump the oldest nmethods - int curr_max_comp_id = CompileBroker::get_compilation_id(); - int flush_target = ((curr_max_comp_id - _dead_compile_ids) / CodeCacheFlushingFraction) + _dead_compile_ids; - - log_sweep("start_cleaning"); - - nmethod* nm = CodeCache::alive_nmethod(CodeCache::first()); jint disconnected = 0; jint made_not_entrant = 0; jint nmethod_count = 0; - while ((nm != NULL)){ - int curr_comp_id = nm->compile_id(); + log_sweep("start_cleaning"); + { + ResourceMark rm; + nmethod* nm = CodeCache::first_nmethod(); - // OSR methods cannot be flushed like this. Also, don't flush native methods - // since they are part of the JDK in most cases - if (!nm->is_osr_method() && !nm->is_locked_by_vm() && !nm->is_native_method()) { - - // only count methods that can be speculatively disconnected - nmethod_count++; - - if (nm->is_in_use() && (curr_comp_id < flush_target)) { - if ((nm->method()->code() == nm)) { - // This method has not been previously considered for - // unloading or it was restored already - CodeCache::speculatively_disconnect(nm); - disconnected++; - } else if (nm->is_speculatively_disconnected()) { + // The intention behind flushing_fraction is that for smaller code cache sizes + // more memory is speculatively disconnected than for large code cache sizes. + // See the following examples + // CodeCacheSize[mb] memory flushed [mb] + // 256 40 + // 128 25 + // 64 16 + // 32 10 + // 16 6 + // 8 4 + // + // In addition, it is possible to increase the amount of memory that is flushed by + // using 'CodeCacheFlushingMinimumPercentage' + const double flushing_fraction = 1 - (pow((ReservedCodeCacheSize / M), -1/3) + CodeCacheFlushingMinimumPercentage / 100); + const int memory_to_flush = ReservedCodeCacheSize * flushing_fraction; + int memory_will_be_flushed = 0; + // Put methods that are speculatively disconnected into a nmethod block. + // Flushing whole blocks should help to reduce code cache fragmentation. + GrowableArray* nmethod_blocks = new GrowableArray(); + NMethodBlock* nm_block = new NMethodBlock(); + nmethod_blocks->append(nm_block); + const int nmethod_block_size = 1 * M; + + // See how many methods are 'in flight' of being flushed + while (nm != NULL) { + // OSR methods cannot be flushed like this. Also, don't flush native methods + // since they are part of the JDK in most cases + if (nm->is_in_use()) { + if (!nm->is_locked_by_vm() && !nm->is_osr_method() && !nm->is_native_method()) { + nm_block->append(nm); // This method was previously considered for preemptive unloading and was not called since then - CompilationPolicy::policy()->delay_compilation(nm->method()); - nm->make_not_entrant(); - made_not_entrant++; + if (nm->is_speculatively_disconnected()) { + CompilationPolicy::policy()->delay_compilation(nm->method()); + nm->make_not_entrant(); + made_not_entrant++; + memory_will_be_flushed += nm->total_size(); + } } + // These checks ensure that we only add nmethods that can be removed from the code cache + } else if (nm->is_not_entrant() || nm->is_zombie() || nm->is_unloaded()) { + memory_will_be_flushed += nm->total_size(); + nm_block->append(nm); + } - if (curr_comp_id > _highest_marked) { - _highest_marked = curr_comp_id; - } + if (nm_block->get_size_in_bytes() > nmethod_block_size) { + nm_block = new NMethodBlock(); + nmethod_blocks->append(nm_block); } + nm = CodeCache::next_nmethod(nm); } - nm = CodeCache::alive_nmethod(CodeCache::next(nm)); - } - // remember how many compile_ids wheren't seen last flush. - _dead_compile_ids = curr_max_comp_id - nmethod_count; + // Speculatively disconnect methods until we reach 'memory_to_flush' + if (memory_will_be_flushed < memory_to_flush) { + nmethod_blocks->sort(sort_nmethod_blocks_by_hotness); + // Iterate over sorted array and speculatively disconnect these nmethods + for (int block_idx = 0; block_idx < nmethod_blocks->length(); block_idx++) { + nm_block = nmethod_blocks->at(block_idx); + for (int nmethod_idx = 0; nmethod_idx < nm_block->get_length(); nmethod_idx++) { + nm = nm_block->at(nmethod_idx); + if ((nm->is_in_use()) && (nm->method()->code() == nm)) { + CodeCache::speculatively_disconnect(nm); + disconnected++; + } + } + memory_will_be_flushed += nm_block->get_size_in_bytes(); + // Stop flushing + if (memory_will_be_flushed >= memory_to_flush) { + break; + } + } + } + } // End ResourceMark log_sweep("stop_cleaning", - "disconnected='" UINT32_FORMAT "' made_not_entrant='" UINT32_FORMAT "'", - disconnected, made_not_entrant); + "disconnected='" UINT32_FORMAT "' made_not_entrant='" UINT32_FORMAT "'", + disconnected, made_not_entrant); // Shut off compiler. Sweeper will start over with a new stack scan and // traversal cycle and turn it back on if it clears enough space. @@ -600,7 +734,7 @@ // After two more traversals the sweeper will get rid of unrestored nmethods _last_flush_traversal_id = _traversals; - _resweep = true; + request_nmethod_marking(); #ifdef ASSERT if(PrintMethodFlushing && Verbose) { --- old/src/share/vm/runtime/sweeper.hpp 2013-09-04 12:24:48.751238567 +0200 +++ new/src/share/vm/runtime/sweeper.hpp 2013-09-04 12:24:48.703238569 +0200 @@ -27,8 +27,34 @@ // An NmethodSweeper is an incremental cleaner for: // - cleanup inline caches -// - reclamation of unreferences zombie nmethods -// +// - reclamation of nmethods +// Removing nmethods from the code cache includes three operations +// 1) mark active nmethods +// Is done in 'mark_active_nmethods()'. This function is called at a +// safepoint and marks all nmethods that are active on a thread's stack. +// 2) sweep nmethods +// Is done in sweep_code_cache(). This function is the only place in the +// sweeper where memory is reclaimed. Note that sweep_code_cache() is not +// called at a safepoint. However, sweep_code_cache() stops executing if +// another thread requests a safepoint. Consequently, 'mark_active_nmethods()' +// and sweep_code_cache() cannot execute at the same time. +// To reclaim memory, nmethods are first marked as 'not-entrant'. Not-entrant +// nmethod cannot be called by Java threads, but they can still be active on the +// stack. To ensure that active nmethod are not reclaimed, we have to wait until the +// next marking phase has completed. If a not-entrant nmethod was NOT marked as active, +// it can be converted to 'zombie' state. To safely remove the nmethod, all inline caches +// (IC) that point to the the nmethod must be cleared. After that, the nmethod can be +// evicted from the code cache. +// 3) code cache flushing +// Is done in 'speculative_disconnect_nmethods()' and performed as a VM operation. +// Code cache flushing is performed as a last resort to clean the code cache. Flushing +// is performed in two steps. First, a subset of nmethods is selected for eviction. The +// selected nmethods are 'speculatively disconnected, i.e., the methods are added to a +// linked list AND the method's code pointer is set to null (nm->method()->code() = NULL) +// If the compiler tries to compile the disconnected nmethods (the compiler has to, since +// method()->code() == NULL), the compiler first searches the linked list where speculatively +// disconnected nmethods are stored. If the compiler finds the nmethod, the compiler 're-animates' +// the nmethod (restores the code pointer). If not, the method is removed by normal sweeping. class NMethodSweeper : public AllStatic { static long _traversals; // Stack scan count, also sweep ID. @@ -41,21 +67,17 @@ static volatile int _invocations; // No. of invocations left until we are completed with this pass static volatile int _sweep_started; // Flag to control conc sweeper - //The following are reset in scan_stacks and synchronized by the safepoint - static bool _resweep; // Indicates that a change has happend and we want another sweep, - // always checked and reset at a safepoint so memory will be in sync. - static int _locked_seen; // Number of locked nmethods encountered during the scan + //The following are reset in mark_active_nmethods and synchronized by the safepoint + static bool _request_mark_phase; // Indicates that a change has happend and we need another mark pahse, + // always checked and reset at a safepoint so memory will be in sync. + static int _locked_seen; // Number of locked nmethods encountered during the scan static int _not_entrant_seen_on_stack; // Number of not entrant nmethod were are still on stack - static jint _flush_token; // token that guards method flushing, making sure it is executed only once. + static jint _flush_token; // token that guards method flushing, making sure it is executed only once. // These are set during a flush, a VM-operation static long _last_flush_traversal_id; // trav number at last flush unloading static jlong _last_full_flush_time; // timestamp of last emergency unloading - // These are synchronized by the _sweep_started token - static int _highest_marked; // highest compile id dumped at last emergency unloading - static int _dead_compile_ids; // number of compile ids that where not in the cache last flush - // Stat counters static int _number_of_flushes; // Total of full traversals caused by full cache static int _total_nof_methods_reclaimed; // Accumulated nof methods flushed @@ -71,6 +93,12 @@ static void log_sweep(const char* msg, const char* format = NULL, ...); static bool sweep_in_progress(); + static void sweep_code_cache(); + static void request_nmethod_marking() { _request_mark_phase = true; } + static void reset_nmethod_marking() { _request_mark_phase = false; } + static bool need_marking_phase() { return _request_mark_phase; } + + static int _hotness_counter_reset_val; public: static long traversal_count() { return _traversals; } @@ -90,15 +118,17 @@ static void report_events(); #endif - static void scan_stacks(); // Invoked at the end of each safepoint - static void sweep_code_cache(); // Concurrent part of sweep job - static void possibly_sweep(); // Compiler threads call this to sweep + static void mark_active_nmethods(); // Invoked at the end of each safepoint + static void possibly_sweep(); // Compiler threads call this to sweep + + static int sort_nmethods_by_hotness(nmethod** nm1, nmethod** nm2); + static int get_hotness_counter_reset_val(); - static void notify(nmethod* nm) { + static void notify() { // Request a new sweep of the code cache from the beginning. No // need to synchronize the setting of this flag since it only // changes to false at safepoint so we can never overwrite it with false. - _resweep = true; + request_nmethod_marking(); } static void handle_full_code_cache(bool is_full); // Called by compilers who fail to allocate