26 #include "code/codeCache.hpp"
27 #include "code/compiledIC.hpp"
28 #include "code/icBuffer.hpp"
29 #include "code/nmethod.hpp"
30 #include "compiler/compileBroker.hpp"
31 #include "memory/resourceArea.hpp"
32 #include "oops/method.hpp"
33 #include "runtime/atomic.inline.hpp"
34 #include "runtime/compilationPolicy.hpp"
35 #include "runtime/mutexLocker.hpp"
36 #include "runtime/orderAccess.inline.hpp"
37 #include "runtime/os.hpp"
38 #include "runtime/sweeper.hpp"
39 #include "runtime/thread.inline.hpp"
40 #include "runtime/vm_operations.hpp"
41 #include "trace/tracing.hpp"
42 #include "utilities/events.hpp"
43 #include "utilities/ticks.inline.hpp"
44 #include "utilities/xmlstream.hpp"
45
46 PRAGMA_FORMAT_MUTE_WARNINGS_FOR_GCC
47
48 #ifdef ASSERT
49
50 #define SWEEP(nm) record_sweep(nm, __LINE__)
51 // Sweeper logging code
52 class SweeperRecord {
53 public:
54 int traversal;
55 int compile_id;
56 long traversal_mark;
57 int state;
58 const char* kind;
59 address vep;
60 address uep;
61 int line;
62
63 void print() {
64 tty->print_cr("traversal = %d compile_id = %d %s uep = " PTR_FORMAT " vep = "
65 PTR_FORMAT " state = %d traversal_mark %d line = %d",
66 traversal,
67 compile_id,
68 kind == NULL ? "" : kind,
69 uep,
70 vep,
71 state,
72 traversal_mark,
73 line);
74 }
75 };
76
77 static int _sweep_index = 0;
78 static SweeperRecord* _records = NULL;
79
80 void NMethodSweeper::report_events(int id, address entry) {
81 if (_records != NULL) {
82 for (int i = _sweep_index; i < SweeperLogEntries; i++) {
83 if (_records[i].uep == entry ||
84 _records[i].vep == entry ||
85 _records[i].compile_id == id) {
86 _records[i].print();
87 }
88 }
89 for (int i = 0; i < _sweep_index; i++) {
90 if (_records[i].uep == entry ||
206 // If we do not want to reclaim not-entrant or zombie methods there is no need
207 // to scan stacks
208 if (!MethodFlushing) {
209 return;
210 }
211
212 // Increase time so that we can estimate when to invoke the sweeper again.
213 _time_counter++;
214
215 // Check for restart
216 assert(CodeCache::find_blob_unsafe(_current.method()) == _current.method(), "Sweeper nmethod cached state invalid");
217 if (wait_for_stack_scanning()) {
218 _seen = 0;
219 _current = NMethodIterator();
220 // Initialize to first nmethod
221 _current.next();
222 _traversals += 1;
223 _total_time_this_sweep = Tickspan();
224
225 if (PrintMethodFlushing) {
226 tty->print_cr("### Sweep: stack traversal %d", _traversals);
227 }
228 Threads::nmethods_do(&mark_activation_closure);
229
230 } else {
231 // Only set hotness counter
232 Threads::nmethods_do(&set_hotness_closure);
233 }
234
235 OrderAccess::storestore();
236 }
237
238 /**
239 * This function triggers a VM operation that does stack scanning of active
240 * methods. Stack scanning is mandatory for the sweeper to make progress.
241 */
242 void NMethodSweeper::do_stack_scanning() {
243 assert(!CodeCache_lock->owned_by_self(), "just checking");
244 if (wait_for_stack_scanning()) {
245 VM_MarkActiveNMethods op;
246 VMThread::execute(&op);
465 _peak_sweep_fraction_time = MAX2(sweep_time, _peak_sweep_fraction_time);
466 _total_flushed_size += freed_memory;
467 _total_nof_methods_reclaimed += flushed_count;
468 _total_nof_c2_methods_reclaimed += flushed_c2_count;
469 _peak_sweep_time = MAX2(_peak_sweep_time, _total_time_this_sweep);
470 }
471 EventSweepCodeCache event(UNTIMED);
472 if (event.should_commit()) {
473 event.set_starttime(sweep_start_counter);
474 event.set_endtime(sweep_end_counter);
475 event.set_sweepIndex(_traversals);
476 event.set_sweptCount(swept_count);
477 event.set_flushedCount(flushed_count);
478 event.set_markedCount(marked_for_reclamation_count);
479 event.set_zombifiedCount(zombified_count);
480 event.commit();
481 }
482
483 #ifdef ASSERT
484 if(PrintMethodFlushing) {
485 tty->print_cr("### sweeper: sweep time(%d): ", (jlong)sweep_time.value());
486 }
487 #endif
488
489 log_sweep("finished");
490
491 // Sweeper is the only case where memory is released, check here if it
492 // is time to restart the compiler. Only checking if there is a certain
493 // amount of free memory in the code cache might lead to re-enabling
494 // compilation although no memory has been released. For example, there are
495 // cases when compilation was disabled although there is 4MB (or more) free
496 // memory in the code cache. The reason is code cache fragmentation. Therefore,
497 // it only makes sense to re-enable compilation if we have actually freed memory.
498 // Note that typically several kB are released for sweeping 16MB of the code
499 // cache. As a result, 'freed_memory' > 0 to restart the compiler.
500 if (!CompileBroker::should_compile_new_jobs() && (freed_memory > 0)) {
501 CompileBroker::set_should_compile_new_jobs(CompileBroker::run_compilation);
502 log_sweep("restart_compiler");
503 }
504 }
505
575
576 // Skip methods that are currently referenced by the VM
577 if (nm->is_locked_by_vm()) {
578 // But still remember to clean-up inline caches for alive nmethods
579 if (nm->is_alive()) {
580 // Clean inline caches that point to zombie/non-entrant methods
581 MutexLocker cl(CompiledIC_lock);
582 nm->cleanup_inline_caches();
583 SWEEP(nm);
584 }
585 return result;
586 }
587
588 if (nm->is_zombie()) {
589 // If it is the first time we see nmethod then we mark it. Otherwise,
590 // we reclaim it. When we have seen a zombie method twice, we know that
591 // there are no inline caches that refer to it.
592 if (nm->is_marked_for_reclamation()) {
593 assert(!nm->is_locked_by_vm(), "must not flush locked nmethods");
594 if (PrintMethodFlushing && Verbose) {
595 tty->print_cr("### Nmethod %3d/" PTR_FORMAT " (marked for reclamation) being flushed", nm->compile_id(), nm);
596 }
597 release_nmethod(nm);
598 assert(result == None, "sanity");
599 result = Flushed;
600 } else {
601 if (PrintMethodFlushing && Verbose) {
602 tty->print_cr("### Nmethod %3d/" PTR_FORMAT " (zombie) being marked for reclamation", nm->compile_id(), nm);
603 }
604 nm->mark_for_reclamation();
605 // Keep track of code cache state change
606 _bytes_changed += nm->total_size();
607 SWEEP(nm);
608 assert(result == None, "sanity");
609 result = MarkedForReclamation;
610 }
611 } else if (nm->is_not_entrant()) {
612 // If there are no current activations of this method on the
613 // stack we can safely convert it to a zombie method
614 if (nm->can_convert_to_zombie()) {
615 // Clear ICStubs to prevent back patching stubs of zombie or unloaded
616 // nmethods during the next safepoint (see ICStub::finalize).
617 {
618 MutexLocker cl(CompiledIC_lock);
619 nm->clear_ic_stubs();
620 }
621 if (PrintMethodFlushing && Verbose) {
622 tty->print_cr("### Nmethod %3d/" PTR_FORMAT " (not entrant) being made zombie", nm->compile_id(), nm);
623 }
624 // Code cache state change is tracked in make_zombie()
625 nm->make_zombie();
626 SWEEP(nm);
627 assert(result == None, "sanity");
628 result = MadeZombie;
629 assert(nm->is_zombie(), "nmethod must be zombie");
630 } else {
631 // Still alive, clean up its inline caches
632 MutexLocker cl(CompiledIC_lock);
633 nm->cleanup_inline_caches();
634 SWEEP(nm);
635 }
636 } else if (nm->is_unloaded()) {
637 // Unloaded code, just make it a zombie
638 if (PrintMethodFlushing && Verbose) {
639 tty->print_cr("### Nmethod %3d/" PTR_FORMAT " (unloaded) being made zombie", nm->compile_id(), nm);
640 }
641 if (nm->is_osr_method()) {
642 SWEEP(nm);
643 // No inline caches will ever point to osr methods, so we can just remove it
644 release_nmethod(nm);
645 assert(result == None, "sanity");
646 result = Flushed;
647 } else {
648 {
649 // Clean ICs of unloaded nmethods as well because they may reference other
650 // unloaded nmethods that may be flushed earlier in the sweeper cycle.
651 MutexLocker cl(CompiledIC_lock);
652 nm->cleanup_inline_caches();
653 }
654 // Code cache state change is tracked in make_zombie()
655 nm->make_zombie();
656 SWEEP(nm);
657 assert(result == None, "sanity");
658 result = MadeZombie;
659 }
726 make_not_entrant = false;
727 } else if (MethodCounters::is_nmethod_age_unset(age)) {
728 // No counters were used before. Set the counters to the detection
729 // limit value. If the method is going to be used again it will be compiled
730 // with counters that we're going to use for analysis the the next time.
731 mc->reset_nmethod_age();
732 } else {
733 // Method was totally idle for 10 sweeps
734 // The counter already has the initial value, flush it and may be recompile
735 // later with counters
736 }
737 }
738 }
739
740 if (make_not_entrant) {
741 nm->make_not_entrant();
742
743 // Code cache state change is tracked in make_not_entrant()
744 if (PrintMethodFlushing && Verbose) {
745 tty->print_cr("### Nmethod %d/" PTR_FORMAT "made not-entrant: hotness counter %d/%d threshold %f",
746 nm->compile_id(), nm, nm->hotness_counter(), reset_val, threshold);
747 }
748 }
749 }
750 }
751 }
752
753 // Print out some state information about the current sweep and the
754 // state of the code cache if it's requested.
755 void NMethodSweeper::log_sweep(const char* msg, const char* format, ...) {
756 if (PrintMethodFlushing) {
757 ResourceMark rm;
758 stringStream s;
759 // Dump code cache state into a buffer before locking the tty,
760 // because log_state() will use locks causing lock conflicts.
761 CodeCache::log_state(&s);
762
763 ttyLocker ttyl;
764 tty->print("### sweeper: %s ", msg);
765 if (format != NULL) {
766 va_list ap;
|
26 #include "code/codeCache.hpp"
27 #include "code/compiledIC.hpp"
28 #include "code/icBuffer.hpp"
29 #include "code/nmethod.hpp"
30 #include "compiler/compileBroker.hpp"
31 #include "memory/resourceArea.hpp"
32 #include "oops/method.hpp"
33 #include "runtime/atomic.inline.hpp"
34 #include "runtime/compilationPolicy.hpp"
35 #include "runtime/mutexLocker.hpp"
36 #include "runtime/orderAccess.inline.hpp"
37 #include "runtime/os.hpp"
38 #include "runtime/sweeper.hpp"
39 #include "runtime/thread.inline.hpp"
40 #include "runtime/vm_operations.hpp"
41 #include "trace/tracing.hpp"
42 #include "utilities/events.hpp"
43 #include "utilities/ticks.inline.hpp"
44 #include "utilities/xmlstream.hpp"
45
46 #ifdef ASSERT
47
48 #define SWEEP(nm) record_sweep(nm, __LINE__)
49 // Sweeper logging code
50 class SweeperRecord {
51 public:
52 int traversal;
53 int compile_id;
54 long traversal_mark;
55 int state;
56 const char* kind;
57 address vep;
58 address uep;
59 int line;
60
61 void print() {
62 tty->print_cr("traversal = %d compile_id = %d %s uep = " PTR_FORMAT " vep = "
63 PTR_FORMAT " state = %d traversal_mark %ld line = %d",
64 traversal,
65 compile_id,
66 kind == NULL ? "" : kind,
67 p2i(uep),
68 p2i(vep),
69 state,
70 traversal_mark,
71 line);
72 }
73 };
74
75 static int _sweep_index = 0;
76 static SweeperRecord* _records = NULL;
77
78 void NMethodSweeper::report_events(int id, address entry) {
79 if (_records != NULL) {
80 for (int i = _sweep_index; i < SweeperLogEntries; i++) {
81 if (_records[i].uep == entry ||
82 _records[i].vep == entry ||
83 _records[i].compile_id == id) {
84 _records[i].print();
85 }
86 }
87 for (int i = 0; i < _sweep_index; i++) {
88 if (_records[i].uep == entry ||
204 // If we do not want to reclaim not-entrant or zombie methods there is no need
205 // to scan stacks
206 if (!MethodFlushing) {
207 return;
208 }
209
210 // Increase time so that we can estimate when to invoke the sweeper again.
211 _time_counter++;
212
213 // Check for restart
214 assert(CodeCache::find_blob_unsafe(_current.method()) == _current.method(), "Sweeper nmethod cached state invalid");
215 if (wait_for_stack_scanning()) {
216 _seen = 0;
217 _current = NMethodIterator();
218 // Initialize to first nmethod
219 _current.next();
220 _traversals += 1;
221 _total_time_this_sweep = Tickspan();
222
223 if (PrintMethodFlushing) {
224 tty->print_cr("### Sweep: stack traversal %ld", _traversals);
225 }
226 Threads::nmethods_do(&mark_activation_closure);
227
228 } else {
229 // Only set hotness counter
230 Threads::nmethods_do(&set_hotness_closure);
231 }
232
233 OrderAccess::storestore();
234 }
235
236 /**
237 * This function triggers a VM operation that does stack scanning of active
238 * methods. Stack scanning is mandatory for the sweeper to make progress.
239 */
240 void NMethodSweeper::do_stack_scanning() {
241 assert(!CodeCache_lock->owned_by_self(), "just checking");
242 if (wait_for_stack_scanning()) {
243 VM_MarkActiveNMethods op;
244 VMThread::execute(&op);
463 _peak_sweep_fraction_time = MAX2(sweep_time, _peak_sweep_fraction_time);
464 _total_flushed_size += freed_memory;
465 _total_nof_methods_reclaimed += flushed_count;
466 _total_nof_c2_methods_reclaimed += flushed_c2_count;
467 _peak_sweep_time = MAX2(_peak_sweep_time, _total_time_this_sweep);
468 }
469 EventSweepCodeCache event(UNTIMED);
470 if (event.should_commit()) {
471 event.set_starttime(sweep_start_counter);
472 event.set_endtime(sweep_end_counter);
473 event.set_sweepIndex(_traversals);
474 event.set_sweptCount(swept_count);
475 event.set_flushedCount(flushed_count);
476 event.set_markedCount(marked_for_reclamation_count);
477 event.set_zombifiedCount(zombified_count);
478 event.commit();
479 }
480
481 #ifdef ASSERT
482 if(PrintMethodFlushing) {
483 tty->print_cr("### sweeper: sweep time(" INT64_FORMAT "): ", (jlong)sweep_time.value());
484 }
485 #endif
486
487 log_sweep("finished");
488
489 // Sweeper is the only case where memory is released, check here if it
490 // is time to restart the compiler. Only checking if there is a certain
491 // amount of free memory in the code cache might lead to re-enabling
492 // compilation although no memory has been released. For example, there are
493 // cases when compilation was disabled although there is 4MB (or more) free
494 // memory in the code cache. The reason is code cache fragmentation. Therefore,
495 // it only makes sense to re-enable compilation if we have actually freed memory.
496 // Note that typically several kB are released for sweeping 16MB of the code
497 // cache. As a result, 'freed_memory' > 0 to restart the compiler.
498 if (!CompileBroker::should_compile_new_jobs() && (freed_memory > 0)) {
499 CompileBroker::set_should_compile_new_jobs(CompileBroker::run_compilation);
500 log_sweep("restart_compiler");
501 }
502 }
503
573
574 // Skip methods that are currently referenced by the VM
575 if (nm->is_locked_by_vm()) {
576 // But still remember to clean-up inline caches for alive nmethods
577 if (nm->is_alive()) {
578 // Clean inline caches that point to zombie/non-entrant methods
579 MutexLocker cl(CompiledIC_lock);
580 nm->cleanup_inline_caches();
581 SWEEP(nm);
582 }
583 return result;
584 }
585
586 if (nm->is_zombie()) {
587 // If it is the first time we see nmethod then we mark it. Otherwise,
588 // we reclaim it. When we have seen a zombie method twice, we know that
589 // there are no inline caches that refer to it.
590 if (nm->is_marked_for_reclamation()) {
591 assert(!nm->is_locked_by_vm(), "must not flush locked nmethods");
592 if (PrintMethodFlushing && Verbose) {
593 tty->print_cr("### Nmethod %3d/" PTR_FORMAT " (marked for reclamation) being flushed", nm->compile_id(), p2i(nm));
594 }
595 release_nmethod(nm);
596 assert(result == None, "sanity");
597 result = Flushed;
598 } else {
599 if (PrintMethodFlushing && Verbose) {
600 tty->print_cr("### Nmethod %3d/" PTR_FORMAT " (zombie) being marked for reclamation", nm->compile_id(), p2i(nm));
601 }
602 nm->mark_for_reclamation();
603 // Keep track of code cache state change
604 _bytes_changed += nm->total_size();
605 SWEEP(nm);
606 assert(result == None, "sanity");
607 result = MarkedForReclamation;
608 }
609 } else if (nm->is_not_entrant()) {
610 // If there are no current activations of this method on the
611 // stack we can safely convert it to a zombie method
612 if (nm->can_convert_to_zombie()) {
613 // Clear ICStubs to prevent back patching stubs of zombie or unloaded
614 // nmethods during the next safepoint (see ICStub::finalize).
615 {
616 MutexLocker cl(CompiledIC_lock);
617 nm->clear_ic_stubs();
618 }
619 if (PrintMethodFlushing && Verbose) {
620 tty->print_cr("### Nmethod %3d/" PTR_FORMAT " (not entrant) being made zombie", nm->compile_id(), p2i(nm));
621 }
622 // Code cache state change is tracked in make_zombie()
623 nm->make_zombie();
624 SWEEP(nm);
625 assert(result == None, "sanity");
626 result = MadeZombie;
627 assert(nm->is_zombie(), "nmethod must be zombie");
628 } else {
629 // Still alive, clean up its inline caches
630 MutexLocker cl(CompiledIC_lock);
631 nm->cleanup_inline_caches();
632 SWEEP(nm);
633 }
634 } else if (nm->is_unloaded()) {
635 // Unloaded code, just make it a zombie
636 if (PrintMethodFlushing && Verbose) {
637 tty->print_cr("### Nmethod %3d/" PTR_FORMAT " (unloaded) being made zombie", nm->compile_id(), p2i(nm));
638 }
639 if (nm->is_osr_method()) {
640 SWEEP(nm);
641 // No inline caches will ever point to osr methods, so we can just remove it
642 release_nmethod(nm);
643 assert(result == None, "sanity");
644 result = Flushed;
645 } else {
646 {
647 // Clean ICs of unloaded nmethods as well because they may reference other
648 // unloaded nmethods that may be flushed earlier in the sweeper cycle.
649 MutexLocker cl(CompiledIC_lock);
650 nm->cleanup_inline_caches();
651 }
652 // Code cache state change is tracked in make_zombie()
653 nm->make_zombie();
654 SWEEP(nm);
655 assert(result == None, "sanity");
656 result = MadeZombie;
657 }
724 make_not_entrant = false;
725 } else if (MethodCounters::is_nmethod_age_unset(age)) {
726 // No counters were used before. Set the counters to the detection
727 // limit value. If the method is going to be used again it will be compiled
728 // with counters that we're going to use for analysis the the next time.
729 mc->reset_nmethod_age();
730 } else {
731 // Method was totally idle for 10 sweeps
732 // The counter already has the initial value, flush it and may be recompile
733 // later with counters
734 }
735 }
736 }
737
738 if (make_not_entrant) {
739 nm->make_not_entrant();
740
741 // Code cache state change is tracked in make_not_entrant()
742 if (PrintMethodFlushing && Verbose) {
743 tty->print_cr("### Nmethod %d/" PTR_FORMAT "made not-entrant: hotness counter %d/%d threshold %f",
744 nm->compile_id(), p2i(nm), nm->hotness_counter(), reset_val, threshold);
745 }
746 }
747 }
748 }
749 }
750
751 // Print out some state information about the current sweep and the
752 // state of the code cache if it's requested.
753 void NMethodSweeper::log_sweep(const char* msg, const char* format, ...) {
754 if (PrintMethodFlushing) {
755 ResourceMark rm;
756 stringStream s;
757 // Dump code cache state into a buffer before locking the tty,
758 // because log_state() will use locks causing lock conflicts.
759 CodeCache::log_state(&s);
760
761 ttyLocker ttyl;
762 tty->print("### sweeper: %s ", msg);
763 if (format != NULL) {
764 va_list ap;
|