src/share/vm/runtime/sweeper.cpp
Index Unified diffs Context diffs Sdiffs Patch New Old Previous File Next File hotspot Cdiff src/share/vm/runtime/sweeper.cpp

src/share/vm/runtime/sweeper.cpp

Print this page
rev 7390 : 8064669: compiler/whitebox/AllocationCodeBlobTest.java crashes / asserts
Reviewed-by: kvn, anoll

*** 140,152 **** long NMethodSweeper::_traversals = 0; // Stack scan count, also sweep ID. long NMethodSweeper::_total_nof_code_cache_sweeps = 0; // Total number of full sweeps of the code cache long NMethodSweeper::_time_counter = 0; // Virtual time used to periodically invoke sweeper long NMethodSweeper::_last_sweep = 0; // Value of _time_counter when the last sweep happened int NMethodSweeper::_seen = 0; // Nof. nmethod we have currently processed in current pass of CodeCache - int NMethodSweeper::_flushed_count = 0; // Nof. nmethods flushed in current sweep - int NMethodSweeper::_zombified_count = 0; // Nof. nmethods made zombie in current sweep - int NMethodSweeper::_marked_for_reclamation_count = 0; // Nof. nmethods marked for reclaim in current sweep volatile bool NMethodSweeper::_should_sweep = true; // Indicates if we should invoke the sweeper volatile int NMethodSweeper::_bytes_changed = 0; // Counts the total nmethod size if the nmethod changed from: // 1) alive -> not_entrant // 2) not_entrant -> zombie --- 140,149 ----
*** 159,168 **** --- 156,166 ---- Tickspan NMethodSweeper::_total_time_sweeping; // Accumulated time sweeping Tickspan NMethodSweeper::_total_time_this_sweep; // Total time this sweep Tickspan NMethodSweeper::_peak_sweep_time; // Peak time for a full sweep Tickspan NMethodSweeper::_peak_sweep_fraction_time; // Peak time sweeping one fraction + Monitor* NMethodSweeper::_stat_lock = new Monitor(Mutex::special, "Sweeper::Statistics", true); class MarkActivationClosure: public CodeBlobClosure { public: virtual void do_code_blob(CodeBlob* cb) { assert(cb->is_nmethod(), "CodeBlob should be nmethod");
*** 368,380 **** void NMethodSweeper::sweep_code_cache() { ResourceMark rm; Ticks sweep_start_counter = Ticks::now(); ! _flushed_count = 0; ! _zombified_count = 0; ! _marked_for_reclamation_count = 0; if (PrintMethodFlushing && Verbose) { tty->print_cr("### Sweep at %d out of %d", _seen, CodeCache::nof_nmethods()); } --- 366,379 ---- void NMethodSweeper::sweep_code_cache() { ResourceMark rm; Ticks sweep_start_counter = Ticks::now(); ! int flushed_count = 0; ! int zombified_count = 0; ! int marked_for_reclamation_count = 0; ! int flushed_c2_count = 0; if (PrintMethodFlushing && Verbose) { tty->print_cr("### Sweep at %d out of %d", _seen, CodeCache::nof_nmethods()); }
*** 384,441 **** int freed_memory = 0; { MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag); - // The last invocation iterates until there are no more nmethods while (!_current.end()) { swept_count++; - handle_safepoint_request(); // Since we will give up the CodeCache_lock, always skip ahead // to the next nmethod. Other blobs can be deleted by other // threads but nmethods are only reclaimed by the sweeper. nmethod* nm = _current.method(); _current.next(); // Now ready to process nmethod and give up CodeCache_lock { MutexUnlockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag); ! freed_memory += process_nmethod(nm); } _seen++; } } assert(_current.end(), "must have scanned the whole cache"); const Ticks sweep_end_counter = Ticks::now(); const Tickspan sweep_time = sweep_end_counter - sweep_start_counter; _total_time_sweeping += sweep_time; _total_time_this_sweep += sweep_time; _peak_sweep_fraction_time = MAX2(sweep_time, _peak_sweep_fraction_time); _total_flushed_size += freed_memory; ! _total_nof_methods_reclaimed += _flushed_count; ! EventSweepCodeCache event(UNTIMED); if (event.should_commit()) { event.set_starttime(sweep_start_counter); event.set_endtime(sweep_end_counter); event.set_sweepIndex(_traversals); event.set_sweptCount(swept_count); ! event.set_flushedCount(_flushed_count); ! event.set_markedCount(_marked_for_reclamation_count); ! event.set_zombifiedCount(_zombified_count); event.commit(); } #ifdef ASSERT if(PrintMethodFlushing) { tty->print_cr("### sweeper: sweep time(%d): ", (jlong)sweep_time.value()); } #endif - _peak_sweep_time = MAX2(_peak_sweep_time, _total_time_this_sweep); log_sweep("finished"); // Sweeper is the only case where memory is released, check here if it // is time to restart the compiler. Only checking if there is a certain // amount of free memory in the code cache might lead to re-enabling --- 383,464 ---- int freed_memory = 0; { MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag); while (!_current.end()) { swept_count++; // Since we will give up the CodeCache_lock, always skip ahead // to the next nmethod. Other blobs can be deleted by other // threads but nmethods are only reclaimed by the sweeper. nmethod* nm = _current.method(); _current.next(); // Now ready to process nmethod and give up CodeCache_lock { MutexUnlockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag); ! int size = nm->total_size(); ! bool is_c2_method = nm->is_compiled_by_c2(); ! ! MethodStateChange type = process_nmethod(nm); ! switch (type) { ! case Flushed: ! freed_memory += size; ! ++flushed_count; ! if (is_c2_method) { ! ++flushed_c2_count; ! } ! break; ! case MarkedForReclamation: ! ++marked_for_reclamation_count; ! break; ! case MadeZombie: ! ++zombified_count; ! break; ! case None: ! break; ! default: ! ShouldNotReachHere(); ! } } _seen++; + handle_safepoint_request(); } } assert(_current.end(), "must have scanned the whole cache"); const Ticks sweep_end_counter = Ticks::now(); const Tickspan sweep_time = sweep_end_counter - sweep_start_counter; + { + MutexLockerEx mu(_stat_lock, Mutex::_no_safepoint_check_flag); _total_time_sweeping += sweep_time; _total_time_this_sweep += sweep_time; _peak_sweep_fraction_time = MAX2(sweep_time, _peak_sweep_fraction_time); _total_flushed_size += freed_memory; ! _total_nof_methods_reclaimed += flushed_count; ! _total_nof_c2_methods_reclaimed += flushed_c2_count; ! _peak_sweep_time = MAX2(_peak_sweep_time, _total_time_this_sweep); ! } EventSweepCodeCache event(UNTIMED); if (event.should_commit()) { event.set_starttime(sweep_start_counter); event.set_endtime(sweep_end_counter); event.set_sweepIndex(_traversals); event.set_sweptCount(swept_count); ! event.set_flushedCount(flushed_count); ! event.set_markedCount(marked_for_reclamation_count); ! event.set_zombifiedCount(zombified_count); event.commit(); } #ifdef ASSERT if(PrintMethodFlushing) { tty->print_cr("### sweeper: sweep time(%d): ", (jlong)sweep_time.value()); } #endif log_sweep("finished"); // Sweeper is the only case where memory is released, check here if it // is time to restart the compiler. Only checking if there is a certain // amount of free memory in the code cache might lead to re-enabling
*** 509,522 **** MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag); nm->flush(); } ! int NMethodSweeper::process_nmethod(nmethod* nm) { assert(!CodeCache_lock->owned_by_self(), "just checking"); ! int freed_memory = 0; // Make sure this nmethod doesn't get unloaded during the scan, // since safepoints may happen during acquired below locks. NMethodMarker nmm(nm); SWEEP(nm); --- 532,546 ---- MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag); nm->flush(); } ! NMethodSweeper::MethodStateChange NMethodSweeper::process_nmethod(nmethod* nm) { ! assert(nm != NULL, "sanity"); assert(!CodeCache_lock->owned_by_self(), "just checking"); ! MethodStateChange result = None; // Make sure this nmethod doesn't get unloaded during the scan, // since safepoints may happen during acquired below locks. NMethodMarker nmm(nm); SWEEP(nm);
*** 527,537 **** // Clean inline caches that point to zombie/non-entrant methods MutexLocker cl(CompiledIC_lock); nm->cleanup_inline_caches(); SWEEP(nm); } ! return freed_memory; } if (nm->is_zombie()) { // If it is the first time we see nmethod then we mark it. Otherwise, // we reclaim it. When we have seen a zombie method twice, we know that --- 551,561 ---- // Clean inline caches that point to zombie/non-entrant methods MutexLocker cl(CompiledIC_lock); nm->cleanup_inline_caches(); SWEEP(nm); } ! return result; } if (nm->is_zombie()) { // If it is the first time we see nmethod then we mark it. Otherwise, // we reclaim it. When we have seen a zombie method twice, we know that
*** 539,563 **** if (nm->is_marked_for_reclamation()) { assert(!nm->is_locked_by_vm(), "must not flush locked nmethods"); if (PrintMethodFlushing && Verbose) { tty->print_cr("### Nmethod %3d/" PTR_FORMAT " (marked for reclamation) being flushed", nm->compile_id(), nm); } - freed_memory = nm->total_size(); - if (nm->is_compiled_by_c2()) { - _total_nof_c2_methods_reclaimed++; - } release_nmethod(nm); ! _flushed_count++; } else { if (PrintMethodFlushing && Verbose) { tty->print_cr("### Nmethod %3d/" PTR_FORMAT " (zombie) being marked for reclamation", nm->compile_id(), nm); } nm->mark_for_reclamation(); // Keep track of code cache state change _bytes_changed += nm->total_size(); - _marked_for_reclamation_count++; SWEEP(nm); } } else if (nm->is_not_entrant()) { // If there are no current activations of this method on the // stack we can safely convert it to a zombie method if (nm->can_not_entrant_be_converted()) { --- 563,585 ---- if (nm->is_marked_for_reclamation()) { assert(!nm->is_locked_by_vm(), "must not flush locked nmethods"); if (PrintMethodFlushing && Verbose) { tty->print_cr("### Nmethod %3d/" PTR_FORMAT " (marked for reclamation) being flushed", nm->compile_id(), nm); } release_nmethod(nm); ! assert(result == None, "sanity"); ! result = Flushed; } else { if (PrintMethodFlushing && Verbose) { tty->print_cr("### Nmethod %3d/" PTR_FORMAT " (zombie) being marked for reclamation", nm->compile_id(), nm); } nm->mark_for_reclamation(); // Keep track of code cache state change _bytes_changed += nm->total_size(); SWEEP(nm); + assert(result == None, "sanity"); + result = MarkedForReclamation; } } else if (nm->is_not_entrant()) { // If there are no current activations of this method on the // stack we can safely convert it to a zombie method if (nm->can_not_entrant_be_converted()) {
*** 574,585 **** if (PrintMethodFlushing && Verbose) { tty->print_cr("### Nmethod %3d/" PTR_FORMAT " (not entrant) being made zombie", nm->compile_id(), nm); } // Code cache state change is tracked in make_zombie() nm->make_zombie(); - _zombified_count++; SWEEP(nm); } assert(nm->is_zombie(), "nmethod must be zombie"); } else { // Still alive, clean up its inline caches MutexLocker cl(CompiledIC_lock); --- 596,608 ---- if (PrintMethodFlushing && Verbose) { tty->print_cr("### Nmethod %3d/" PTR_FORMAT " (not entrant) being made zombie", nm->compile_id(), nm); } // Code cache state change is tracked in make_zombie() nm->make_zombie(); SWEEP(nm); + assert(result == None, "sanity"); + result = MadeZombie; } assert(nm->is_zombie(), "nmethod must be zombie"); } else { // Still alive, clean up its inline caches MutexLocker cl(CompiledIC_lock);
*** 592,621 **** tty->print_cr("### Nmethod %3d/" PTR_FORMAT " (unloaded) being made zombie", nm->compile_id(), nm); } if (nm->is_osr_method()) { SWEEP(nm); // No inline caches will ever point to osr methods, so we can just remove it - freed_memory = nm->total_size(); - if (nm->is_compiled_by_c2()) { - _total_nof_c2_methods_reclaimed++; - } release_nmethod(nm); ! _flushed_count++; } else { // Code cache state change is tracked in make_zombie() nm->make_zombie(); - _zombified_count++; SWEEP(nm); } } else { possibly_flush(nm); // Clean-up all inline caches that point to zombie/non-reentrant methods MutexLocker cl(CompiledIC_lock); nm->cleanup_inline_caches(); SWEEP(nm); } ! return freed_memory; } void NMethodSweeper::possibly_flush(nmethod* nm) { if (UseCodeCacheFlushing) { --- 615,642 ---- tty->print_cr("### Nmethod %3d/" PTR_FORMAT " (unloaded) being made zombie", nm->compile_id(), nm); } if (nm->is_osr_method()) { SWEEP(nm); // No inline caches will ever point to osr methods, so we can just remove it release_nmethod(nm); ! assert(result == None, "sanity"); ! result = Flushed; } else { // Code cache state change is tracked in make_zombie() nm->make_zombie(); SWEEP(nm); + assert(result == None, "sanity"); + result = MadeZombie; } } else { possibly_flush(nm); // Clean-up all inline caches that point to zombie/non-reentrant methods MutexLocker cl(CompiledIC_lock); nm->cleanup_inline_caches(); SWEEP(nm); } ! return result; } void NMethodSweeper::possibly_flush(nmethod* nm) { if (UseCodeCacheFlushing) {
src/share/vm/runtime/sweeper.cpp
Index Unified diffs Context diffs Sdiffs Patch New Old Previous File Next File