--- old/src/share/vm/ci/ciEnv.cpp 2010-01-13 16:08:15.000000000 -0800 +++ new/src/share/vm/ci/ciEnv.cpp 2010-01-13 16:08:15.000000000 -0800 @@ -938,18 +938,10 @@ if (nm == NULL) { // The CodeCache is full. Print out warning and disable compilation. record_failure("code cache is full"); - UseInterpreter = true; - if (UseCompiler || AlwaysCompileLoopMethods ) { -#ifndef PRODUCT - warning("CodeCache is full. Compiler has been disabled"); - if (CompileTheWorld || ExitOnFullCodeCache) { - before_exit(JavaThread::current()); - exit_globals(); // will delete tty - vm_direct_exit(CompileTheWorld ? 0 : 1); - } -#endif - UseCompiler = false; - AlwaysCompileLoopMethods = false; + { + MutexUnlocker ml(Compile_lock); + MutexUnlocker locker(MethodCompileQueue_lock); + CompileBroker::handle_full_code_cache(); } } else { NOT_PRODUCT(nm->set_has_debug_info(has_debug_info); ) --- old/src/share/vm/code/nmethod.cpp 2010-01-13 16:08:16.000000000 -0800 +++ new/src/share/vm/code/nmethod.cpp 2010-01-13 16:08:16.000000000 -0800 @@ -1034,7 +1034,7 @@ if( cb != NULL && cb->is_nmethod() ) { nmethod* nm = (nmethod*)cb; // Clean inline caches pointing to both zombie and not_entrant methods - if (!nm->is_in_use()) ic->set_to_clean(); + if (!nm->is_in_use() || (nm->method()->code() != nm)) ic->set_to_clean(); } break; } @@ -1044,7 +1044,7 @@ if( cb != NULL && cb->is_nmethod() ) { nmethod* nm = (nmethod*)cb; // Clean inline caches pointing to both zombie and not_entrant methods - if (!nm->is_in_use()) csc->set_to_clean(); + if (!nm->is_in_use() || (nm->method()->code() != nm)) csc->set_to_clean(); } break; } @@ -1303,7 +1303,8 @@ // completely deallocate this method EventMark m("flushing nmethod " INTPTR_FORMAT " %s", this, ""); if (PrintMethodFlushing) { - tty->print_cr("*flushing nmethod " INTPTR_FORMAT ". Live blobs: %d", this, CodeCache::nof_blobs()); + tty->print_cr("*flushing nmethod " INTPTR_FORMAT ". " UINT32_FORMAT "/" SIZE_FORMAT "/" SIZE_FORMAT, + this, CodeCache::nof_blobs(), CodeCache::unallocated_capacity(), CodeCache::max_capacity()); } // We need to deallocate any ExceptionCache data. --- old/src/share/vm/compiler/compileBroker.cpp 2010-01-13 16:08:17.000000000 -0800 +++ new/src/share/vm/compiler/compileBroker.cpp 2010-01-13 16:08:17.000000000 -0800 @@ -69,6 +69,7 @@ bool CompileBroker::_initialized = false; volatile bool CompileBroker::_should_block = false; +volatile jint CompileBroker::_should_compile_new_jobs = run_compilation; // The installed compiler(s) AbstractCompiler* CompileBroker::_compilers[2]; @@ -461,9 +462,9 @@ // Get the next CompileTask from a CompileQueue CompileTask* CompileQueue::get() { MutexLocker locker(lock()); - + // Wait for an available CompileTask. - while (_first == NULL) { + while ((_first == NULL) || (!CompileBroker::should_compile_new_jobs())) { // There is no work to be done right now. Wait. lock()->wait(); } @@ -1325,27 +1326,14 @@ { // We need this HandleMark to avoid leaking VM handles. HandleMark hm(thread); + if (CodeCache::unallocated_capacity() < CodeCacheMinimumFreeSpace) { - // The CodeCache is full. Print out warning and disable compilation. - UseInterpreter = true; - if (UseCompiler || AlwaysCompileLoopMethods ) { - if (log != NULL) { - log->begin_elem("code_cache_full"); - log->stamp(); - log->end_elem(); - } -#ifndef PRODUCT - warning("CodeCache is full. Compiler has been disabled"); - if (CompileTheWorld || ExitOnFullCodeCache) { - before_exit(thread); - exit_globals(); // will delete tty - vm_direct_exit(CompileTheWorld ? 0 : 1); - } -#endif - UseCompiler = false; - AlwaysCompileLoopMethods = false; - } - } + // the code cache is really full + handle_full_code_cache(); + } else if (UseCodeCacheFlushing && (CodeCache::unallocated_capacity() < CodeCacheFlushingMinimumFreeSpace)) { + // Attempt to start cleaning the code cache while there is still a little headroom + NMethodSweeper::handle_full_code_cache(false); + } CompileTask* task = queue->get(); @@ -1369,7 +1357,7 @@ // Never compile a method if breakpoints are present in it if (method()->number_of_breakpoints() == 0) { // Compile the method. - if (UseCompiler || AlwaysCompileLoopMethods) { + if ((UseCompiler || AlwaysCompileLoopMethods) && CompileBroker::should_compile_new_jobs()) { #ifdef COMPILER1 // Allow repeating compilations for the purpose of benchmarking // compile speed. This is not useful for customers. @@ -1614,6 +1602,31 @@ // ------------------------------------------------------------------ +// CompileBroker::handle_full_code_cache +// +// The CodeCache is full. Print out warning and disable compilation or +// try code cache cleaning so compilation can continue later. +void CompileBroker::handle_full_code_cache() { + UseInterpreter = true; + if (UseCompiler || AlwaysCompileLoopMethods ) { + #ifndef PRODUCT + warning("CodeCache is full. Compiler has been disabled"); + if (CompileTheWorld || ExitOnFullCodeCache) { + before_exit(JavaThread::current()); + exit_globals(); // will delete tty + vm_direct_exit(CompileTheWorld ? 0 : 1); + } + #endif + if (UseCodeCacheFlushing) { + NMethodSweeper::handle_full_code_cache(true); + } else { + UseCompiler = false; + AlwaysCompileLoopMethods = false; + } + } +} + +// ------------------------------------------------------------------ // CompileBroker::set_last_compile // // Record this compilation for debugging purposes. --- old/src/share/vm/compiler/compileBroker.hpp 2010-01-13 16:08:19.000000000 -0800 +++ new/src/share/vm/compiler/compileBroker.hpp 2010-01-13 16:08:18.000000000 -0800 @@ -192,6 +192,9 @@ private: static bool _initialized; static volatile bool _should_block; + + // This flag can be used to stop compilation or turn it back on + static volatile jint _should_compile_new_jobs; // The installed compiler(s) static AbstractCompiler* _compilers[2]; @@ -319,6 +322,7 @@ static void compiler_thread_loop(); + static uint get_compilation_id() { return _compilation_id; } static bool is_idle(); // Set _should_block. @@ -327,7 +331,21 @@ // Call this from the compiler at convenient points, to poll for _should_block. static void maybe_block(); - + + enum { + // Flags for toggling compiler activity + stop_compilation = 0, + run_compilation = 1 + }; + + static bool should_compile_new_jobs() { return UseCompiler && (_should_compile_new_jobs == run_compilation); } + static bool set_should_compile_new_jobs(jint new_state) { + // Return success if the current caller set it + jint old = Atomic::cmpxchg(new_state, &_should_compile_new_jobs, 1-new_state); + return (old == (1-new_state)); + } + static void handle_full_code_cache(); + // Return total compilation ticks static jlong total_compilation_ticks() { return _perf_total_compilation != NULL ? _perf_total_compilation->get_value() : 0; --- old/src/share/vm/includeDB_core 2010-01-13 16:08:20.000000000 -0800 +++ new/src/share/vm/includeDB_core 2010-01-13 16:08:19.000000000 -0800 @@ -1092,6 +1092,7 @@ compileBroker.cpp oop.inline.hpp compileBroker.cpp os.hpp compileBroker.cpp sharedRuntime.hpp +compileBroker.cpp sweeper.hpp compileBroker.cpp systemDictionary.hpp compileBroker.cpp vmSymbols.hpp @@ -3682,6 +3683,7 @@ sharedRuntime.cpp abstractCompiler.hpp sharedRuntime.cpp arguments.hpp sharedRuntime.cpp biasedLocking.hpp +sharedRuntime.cpp compileBroker.hpp sharedRuntime.cpp compiledIC.hpp sharedRuntime.cpp compilerOracle.hpp sharedRuntime.cpp copy.hpp @@ -3934,6 +3936,7 @@ sweeper.cpp atomic.hpp sweeper.cpp codeCache.hpp +sweeper.cpp compileBroker.hpp sweeper.cpp events.hpp sweeper.cpp methodOop.hpp sweeper.cpp mutexLocker.hpp @@ -3941,6 +3944,7 @@ sweeper.cpp os.hpp sweeper.cpp resourceArea.hpp sweeper.cpp sweeper.hpp +sweeper.cpp vm_operations.hpp symbolKlass.cpp gcLocker.hpp symbolKlass.cpp handles.inline.hpp @@ -4594,6 +4598,7 @@ vm_operations.cpp interfaceSupport.hpp vm_operations.cpp isGCActiveMark.hpp vm_operations.cpp resourceArea.hpp +vm_operations.cpp sweeper.hpp vm_operations.cpp threadService.hpp vm_operations.cpp thread_.inline.hpp vm_operations.cpp vmSymbols.hpp --- old/src/share/vm/oops/methodOop.cpp 2010-01-13 16:08:21.000000000 -0800 +++ new/src/share/vm/oops/methodOop.cpp 2010-01-13 16:08:21.000000000 -0800 @@ -612,6 +612,16 @@ backedge_counter()->set_state(InvocationCounter::wait_for_nothing); } +// Clear the code ptr during emergency code cache sweeping +// It will be restored if it is actually called +void methodOopDesc::clear_code_hedge() { + // should guarantee at safepoint + nmethod* tmp = code(); + clear_code(); + _saved_code = tmp; + assert( ! _saved_code->is_osr_method(), "should not get here for osr" ); +} + // Revert to using the interpreter and clear out the nmethod void methodOopDesc::clear_code() { @@ -626,6 +636,7 @@ _from_interpreted_entry = _i2i_entry; OrderAccess::storestore(); _code = NULL; + _saved_code = NULL; } // Called by class data sharing to remove any entry points (which are not shared) @@ -705,6 +716,15 @@ // This function must not hit a safepoint! address methodOopDesc::verified_code_entry() { debug_only(No_Safepoint_Verifier nsv;) + nmethod *code = (nmethod *)OrderAccess::load_ptr_acquire(&_code); + nmethod *saved_code = (nmethod *)OrderAccess::load_ptr_acquire(&_saved_code); + if (( code == NULL) && (saved_code != NULL) && (saved_code->is_in_use())) { + methodHandle method(this); + assert(UseCodeCacheFlushing, "UseCodeCacheFlushing should be on"); + assert( ! saved_code->is_osr_method(), "should not get here for osr" ); + set_code( method, saved_code ); + } + assert(_from_compiled_entry != NULL, "must be set"); return _from_compiled_entry; } @@ -725,6 +745,8 @@ guarantee(mh->adapter() != NULL, "Adapter blob must already exist!"); + mh->set_saved_code(NULL); + // These writes must happen in this order, because the interpreter will // directly jump to from_interpreted_entry which jumps to an i2c adapter // which jumps to _from_compiled_entry. --- old/src/share/vm/oops/methodOop.hpp 2010-01-13 16:08:22.000000000 -0800 +++ new/src/share/vm/oops/methodOop.hpp 2010-01-13 16:08:22.000000000 -0800 @@ -126,6 +126,7 @@ // time (whenever a compile completes). It can transition from not-null to // NULL only at safepoints (because of a de-opt). nmethod* volatile _code; // Points to the corresponding piece of native code + nmethod* volatile _saved_code; // remember nmethod while we attempt to clear code cache space volatile address _from_interpreted_entry; // Cache of _code ? _adapter->i2c_entry() : _i2i_entry public: @@ -302,7 +303,10 @@ address verified_code_entry(); bool check_code() const; // Not inline to avoid circular ref nmethod* volatile code() const { assert( check_code(), "" ); return (nmethod *)OrderAccess::load_ptr_acquire(&_code); } + nmethod* saved_code() const { return _saved_code; } + void set_saved_code(nmethod* code) { _saved_code = code; } void clear_code(); // Clear out any compiled code + void clear_code_hedge(); // Clear out any compiled code and save code ptr in attempt to clean up code cache void set_code(methodHandle mh, nmethod* code); void set_adapter_entry(AdapterHandlerEntry* adapter) { _adapter = adapter; } address get_i2c_entry(); --- old/src/share/vm/runtime/compilationPolicy.cpp 2010-01-13 16:08:23.000000000 -0800 +++ new/src/share/vm/runtime/compilationPolicy.cpp 2010-01-13 16:08:23.000000000 -0800 @@ -66,7 +66,7 @@ if (!canBeCompiled(m)) return false; return !UseInterpreter || // must compile all methods - (UseCompiler && AlwaysCompileLoopMethods && m->has_loops()); // eagerly compile loop methods + (UseCompiler && AlwaysCompileLoopMethods && m->has_loops() && CompileBroker::should_compile_new_jobs()); // eagerly compile loop methods } // Returns true if m is allowed to be compiled @@ -127,7 +127,7 @@ reset_counter_for_invocation_event(m); const char* comment = "count"; - if (!delayCompilationDuringStartup() && canBeCompiled(m) && UseCompiler) { + if (!delayCompilationDuringStartup() && canBeCompiled(m) && UseCompiler && CompileBroker::should_compile_new_jobs()) { nmethod* nm = m->code(); if (nm == NULL ) { const char* comment = "count"; @@ -152,7 +152,7 @@ int hot_count = m->backedge_count(); const char* comment = "backedge_count"; - if (!m->is_not_osr_compilable() && !delayCompilationDuringStartup() && canBeCompiled(m)) { + if (!m->is_not_osr_compilable() && !delayCompilationDuringStartup() && canBeCompiled(m) && CompileBroker::should_compile_new_jobs()) { CompileBroker::compile_method(m, loop_top_bci, m, hot_count, comment, CHECK); NOT_PRODUCT(trace_osr_completion(m->lookup_osr_nmethod_for(loop_top_bci));) @@ -194,7 +194,7 @@ reset_counter_for_invocation_event(m); const char* comment = "count"; - if (m->code() == NULL && !delayCompilationDuringStartup() && canBeCompiled(m) && UseCompiler) { + if (m->code() == NULL && !delayCompilationDuringStartup() && canBeCompiled(m) && UseCompiler && CompileBroker::should_compile_new_jobs()) { ResourceMark rm(THREAD); JavaThread *thread = (JavaThread*)THREAD; frame fr = thread->last_frame(); @@ -238,7 +238,7 @@ int hot_count = m->backedge_count(); const char* comment = "backedge_count"; - if (!m->is_not_osr_compilable() && !delayCompilationDuringStartup() && canBeCompiled(m)) { + if (!m->is_not_osr_compilable() && !delayCompilationDuringStartup() && canBeCompiled(m) && CompileBroker::should_compile_new_jobs()) { CompileBroker::compile_method(m, loop_top_bci, m, hot_count, comment, CHECK); NOT_PRODUCT(trace_osr_completion(m->lookup_osr_nmethod_for(loop_top_bci));) --- old/src/share/vm/runtime/globals.hpp 2010-01-13 16:08:24.000000000 -0800 +++ new/src/share/vm/runtime/globals.hpp 2010-01-13 16:08:24.000000000 -0800 @@ -3018,6 +3018,15 @@ notproduct(bool, ExitOnFullCodeCache, false, \ "Exit the VM if we fill the code cache.") \ \ + product(bool, UseCodeCacheFlushing, false, \ + "Attempt to clean the code cache before shutting off compiler") \ + \ + product(intx, MinCodeCacheFlushingInterval, 30, \ + "Min number of seconds between code cache cleaning sessions") \ + \ + product(uintx, CodeCacheFlushingMinimumFreeSpace, 1500*K, \ + "When less than X space left, start code cache cleaning") \ + \ /* interpreter debugging */ \ develop(intx, BinarySwitchThreshold, 5, \ "Minimal number of lookupswitch entries for rewriting to binary " \ --- old/src/share/vm/runtime/sharedRuntime.cpp 2010-01-13 16:08:26.000000000 -0800 +++ new/src/share/vm/runtime/sharedRuntime.cpp 2010-01-13 16:08:25.000000000 -0800 @@ -1903,19 +1903,8 @@ // CodeCache is full, disable compilation // Ought to log this but compile log is only per compile thread // and we're some non descript Java thread. - UseInterpreter = true; - if (UseCompiler || AlwaysCompileLoopMethods ) { -#ifndef PRODUCT - warning("CodeCache is full. Compiler has been disabled"); - if (CompileTheWorld || ExitOnFullCodeCache) { - before_exit(JavaThread::current()); - exit_globals(); // will delete tty - vm_direct_exit(CompileTheWorld ? 0 : 1); - } -#endif - UseCompiler = false; - AlwaysCompileLoopMethods = false; - } + MutexUnlocker mu(AdapterHandlerLibrary_lock); + CompileBroker::handle_full_code_cache(); return 0; // Out of CodeCache space (_handlers[0] == NULL) } entry->relocate(B->instructions_begin()); @@ -2044,19 +2033,8 @@ // CodeCache is full, disable compilation // Ought to log this but compile log is only per compile thread // and we're some non descript Java thread. - UseInterpreter = true; - if (UseCompiler || AlwaysCompileLoopMethods ) { -#ifndef PRODUCT - warning("CodeCache is full. Compiler has been disabled"); - if (CompileTheWorld || ExitOnFullCodeCache) { - before_exit(JavaThread::current()); - exit_globals(); // will delete tty - vm_direct_exit(CompileTheWorld ? 0 : 1); - } -#endif - UseCompiler = false; - AlwaysCompileLoopMethods = false; - } + MutexUnlocker mu(AdapterHandlerLibrary_lock); + CompileBroker::handle_full_code_cache(); } return nm; } --- old/src/share/vm/runtime/sweeper.cpp 2010-01-13 16:08:27.000000000 -0800 +++ new/src/share/vm/runtime/sweeper.cpp 2010-01-13 16:08:27.000000000 -0800 @@ -33,6 +33,11 @@ jint NMethodSweeper::_locked_seen = 0; jint NMethodSweeper::_not_entrant_seen_on_stack = 0; bool NMethodSweeper::_rescan = false; +bool NMethodSweeper::_was_full = false; +jlong NMethodSweeper::_advise_to_sweep = 0; +jlong NMethodSweeper::_last_was_full = 0; +uint NMethodSweeper::_highest_marked = 0; +long NMethodSweeper::_was_full_traversal = 0; class MarkActivationClosure: public CodeBlobClosure { public: @@ -114,6 +119,34 @@ tty->print_cr("### Couldn't make progress on some nmethods so stopping sweep"); } } + + if (UseCodeCacheFlushing) { + if (CodeCache::unallocated_capacity() > CodeCacheFlushingMinimumFreeSpace) { + // In a safepoint, no race with setters + _advise_to_sweep = false; + } + + if (was_full()) { + // There was some progress so attempt to restart the compiler + jlong now = os::javaTimeMillis(); + jlong max_interval = (jlong)MinCodeCacheFlushingInterval * (jlong)1000; + jlong curr_interval = now - _last_was_full; + if ((CodeCache::unallocated_capacity() > CodeCacheFlushingMinimumFreeSpace) && + (curr_interval > max_interval)){ + CompileBroker::set_should_compile_new_jobs(CompileBroker::run_compilation); + set_was_full(false); + + // Update the _last_was_full time so we can tell how fast the + // code cache is filling up + _last_was_full = os::javaTimeMillis(); + + if (PrintMethodFlushing) { + tty->print_cr("### sweeper: " UINT32_FORMAT "/" SIZE_FORMAT "/" SIZE_FORMAT " restarting compiler", + CodeCache::nof_blobs(), CodeCache::unallocated_capacity(), CodeCache::max_capacity()); + } + } + } + } } @@ -177,7 +210,132 @@ } } else { assert(nm->is_alive(), "should be alive"); + + if (UseCodeCacheFlushing) { + if ((nm->method()->code() != nm) && !(nm->is_locked_by_vm()) && !(nm->is_osr_method()) && + (_traversals > _was_full_traversal+2) && (((uint)nm->compile_id()) < _highest_marked) && + (CodeCache::unallocated_capacity() < CodeCacheFlushingMinimumFreeSpace)) { + // This method has not been called since the forced cleanup happened + nm->make_not_entrant(); + nm->method()->set_saved_code(NULL); + } + } + // Clean-up all inline caches that points to zombie/non-reentrant methods nm->cleanup_inline_caches(); } } + +// Code cache unloading: when compilers notice the code cache is getting full, +// they will call a vm op that comes here. This code attempts to speculatively +// unload the oldest half of the nmethods (based on the compile job id) by +// hiding the methodOop's ref to the nmethod in the _saved_code field. Then +// execution resumes. If a method so marked is not called by the second +// safepoint from the current one, the nmethod will be marked non-entrant and +// got rid of by normal sweeping. If the method is called, the methodOop's +// _code field is restored from the _saved_code field and the methodOop/nmethod +// go back to their normal state. +void NMethodSweeper::handle_full_code_cache(bool is_full) { + // Only the first one to notice can advise us to start early cleaning + if (!is_full){ + jlong old = Atomic::cmpxchg( 1, &_advise_to_sweep, 0 ); + if (old != 0) { + return; + } + } + + if (is_full) { + // Since code cache is full, immediately stop new compiles + bool did_set = CompileBroker::set_should_compile_new_jobs(CompileBroker::stop_compilation); + if (!did_set) { + // only the first to notice can start the cleaning, + // others will go back and block + return; + } + set_was_full(true); + + // If we run out within MinCodeCacheFlushingInterval of the last unload time, give up + jlong now = os::javaTimeMillis(); + jlong max_interval = (jlong)MinCodeCacheFlushingInterval * (jlong)1000; + jlong curr_interval = now - _last_was_full; + if (curr_interval < max_interval) { + _rescan = true; + if (PrintMethodFlushing) { + tty->print_cr("### handle full too often, turning off compiler"); + } + return; + } + } + + VM_HandleFullCodeCache op(is_full); + VMThread::execute(&op); + + // rescan again as soon as possible + _rescan = true; +} + +void NMethodSweeper::speculative_disconnect_nmethods(bool is_full) { + // If there was a race in detecting full code cache, only run + // one vm op for it or keep the compiler shut off + + debug_only(jlong start = os::javaTimeMillis();) + + if ((!was_full()) && (is_full)) { + if (CodeCache::unallocated_capacity() > CodeCacheFlushingMinimumFreeSpace) { + if (PrintMethodFlushing) { + tty->print_cr("### sweeper: " UINT32_FORMAT "/" SIZE_FORMAT "/" SIZE_FORMAT " restarting compiler", + CodeCache::nof_blobs(), CodeCache::unallocated_capacity(), CodeCache::max_capacity()); + } + CompileBroker::set_should_compile_new_jobs(CompileBroker::run_compilation); + return; + } + } + + // Traverse the code cache trying to dump the oldest nmethods + uint curr_max_comp_id = CompileBroker::get_compilation_id(); + uint flush_target = ((curr_max_comp_id - _highest_marked) >> 1) + _highest_marked; + if (PrintMethodFlushing) { + tty->print_cr("### Cleaning code cache: " UINT32_FORMAT "/" SIZE_FORMAT "/" SIZE_FORMAT, + CodeCache::nof_blobs(), CodeCache::unallocated_capacity(), CodeCache::max_capacity()); + } + + nmethod* nm = CodeCache::alive_nmethod(CodeCache::first()); + + while ((nm != NULL)){ + uint curr_comp_id = nm->compile_id(); + + // OSR methods cannot be flushed like this. Also, don't flush native methods + // since they are part of the JDK in most cases + if(nm->is_in_use() && (!nm->is_osr_method()) && (!nm->is_locked_by_vm()) && + (!nm->is_native_method()) && ((curr_comp_id < flush_target))) { + + if ((nm->method()->code() == nm)) { + // This method has not been previously considered for + // unloading or it was restored already + nm->method()->clear_code_hedge(); + } else if (nm->method()->saved_code() == nm) { + // This method was previously considered for preemptive unloading and was not called since then + nm->method()->set_saved_code(NULL); + nm->method()->invocation_counter()->decay(); + nm->method()->backedge_counter()->decay(); + nm->make_not_entrant(); + } + + if (curr_comp_id > _highest_marked) { + _highest_marked = curr_comp_id; + } + } + nm = CodeCache::alive_nmethod(CodeCache::next(nm)); + } + + // Shut off compiler. Sweeper will run exiting from this safepoint + // and turn it back on if it clears enough space + if (was_full()) { + _last_was_full = os::javaTimeMillis(); + CompileBroker::set_should_compile_new_jobs(CompileBroker::stop_compilation); + } + + // After two more traversals the sweeper will get rid of unrestored nmethods + _was_full_traversal = _traversals; + debug_only(jlong end = os::javaTimeMillis(); if(PrintMethodFlushing) tty->print_cr("### sweeper: unload time: " INT64_FORMAT, end-start);) +} --- old/src/share/vm/runtime/sweeper.hpp 2010-01-13 16:08:28.000000000 -0800 +++ new/src/share/vm/runtime/sweeper.hpp 2010-01-13 16:08:28.000000000 -0800 @@ -38,6 +38,11 @@ static int _locked_seen; // Number of locked nmethods encountered during the scan static int _not_entrant_seen_on_stack; // Number of not entrant nmethod were are still on stack + static bool _was_full; // remember if we did emergency unloading + static jlong _advise_to_sweep; // flag to indicate code cache getting full + static jlong _last_was_full; // timestamp of last emergency unloading + static uint _highest_marked; // highest compile id dumped at last emergency unloading + static long _was_full_traversal; // trav number at last emergency unloading static void process_nmethod(nmethod *nm); public: @@ -51,4 +56,10 @@ // changes to false at safepoint so we can never overwrite it with false. _rescan = true; } + + static void handle_full_code_cache(bool is_full); // Called by compilers who fail to allocate + static void speculative_disconnect_nmethods(bool was_full); // Called by vm op to deal with alloc failure + + static void set_was_full(bool state) { _was_full = state; } + static bool was_full() { return _was_full; } }; --- old/src/share/vm/runtime/vm_operations.cpp 2010-01-13 16:08:29.000000000 -0800 +++ new/src/share/vm/runtime/vm_operations.cpp 2010-01-13 16:08:29.000000000 -0800 @@ -151,6 +151,10 @@ #endif // !PRODUCT +void VM_HandleFullCodeCache::doit() { + NMethodSweeper::speculative_disconnect_nmethods(_is_full); +} + void VM_Verify::doit() { Universe::verify(); } --- old/src/share/vm/runtime/vm_operations.hpp 2010-01-13 16:08:30.000000000 -0800 +++ new/src/share/vm/runtime/vm_operations.hpp 2010-01-13 16:08:30.000000000 -0800 @@ -41,6 +41,7 @@ template(DeoptimizeFrame) \ template(DeoptimizeAll) \ template(ZombieAll) \ + template(HandleFullCodeCache) \ template(Verify) \ template(PrintJNI) \ template(HeapDumper) \ @@ -241,6 +242,16 @@ bool allow_nested_vm_operations() const { return true; } }; +class VM_HandleFullCodeCache: public VM_Operation { + private: + bool _is_full; + public: + VM_HandleFullCodeCache(bool is_full) { _is_full = is_full; } + VMOp_Type type() const { return VMOp_HandleFullCodeCache; } + void doit(); + bool allow_nested_vm_operations() const { return true; } +}; + #ifndef PRODUCT class VM_DeoptimizeAll: public VM_Operation { private: