27
28 G1FullGCScope* G1FullGCScope::_instance = NULL;
29
30 G1FullGCScope* G1FullGCScope::instance() {
31 assert(_instance != NULL, "Must be setup already");
32 return _instance;
33 }
34
35 G1FullGCScope::G1FullGCScope(bool explicit_gc, bool clear_soft) :
36 _rm(),
37 _explicit_gc(explicit_gc),
38 _g1h(G1CollectedHeap::heap()),
39 _gc_id(),
40 _svc_marker(SvcGCMarker::FULL),
41 _timer(),
42 _tracer(),
43 _active(),
44 _cpu_time(),
45 _soft_refs(clear_soft, _g1h->collector_policy()),
46 _memory_stats(true, _g1h->gc_cause()),
47 _collector_stats(_g1h->g1mm()->full_collection_counters()) {
48 assert(_instance == NULL, "Only one scope at a time");
49 _timer.register_gc_start();
50 _tracer.report_gc_start(_g1h->gc_cause(), _timer.gc_start());
51 _g1h->pre_full_gc_dump(&_timer);
52 _g1h->trace_heap_before_gc(&_tracer);
53 _instance = this;
54 }
55
56 G1FullGCScope::~G1FullGCScope() {
57 // We must call G1MonitoringSupport::update_sizes() in the same scoping level
58 // as an active TraceMemoryManagerStats object (i.e. before the destructor for the
59 // TraceMemoryManagerStats is called) so that the G1 memory pools are updated
60 // before any GC notifications are raised.
61 _g1h->g1mm()->update_sizes();
62 _g1h->trace_heap_after_gc(&_tracer);
63 _g1h->post_full_gc_dump(&_timer);
64 _timer.register_gc_end();
65 _tracer.report_gc_end(_timer.gc_end(), _timer.time_partitions());
66 _instance = NULL;
67 }
68
69 bool G1FullGCScope::is_explicit_gc() {
70 return _explicit_gc;
71 }
72
73 bool G1FullGCScope::should_clear_soft_refs() {
74 return _soft_refs.should_clear();
75 }
76
77 STWGCTimer* G1FullGCScope::timer() {
78 return &_timer;
79 }
80
81 SerialOldTracer* G1FullGCScope::tracer() {
82 return &_tracer;
83 }
|
27
28 G1FullGCScope* G1FullGCScope::_instance = NULL;
29
30 G1FullGCScope* G1FullGCScope::instance() {
31 assert(_instance != NULL, "Must be setup already");
32 return _instance;
33 }
34
35 G1FullGCScope::G1FullGCScope(bool explicit_gc, bool clear_soft) :
36 _rm(),
37 _explicit_gc(explicit_gc),
38 _g1h(G1CollectedHeap::heap()),
39 _gc_id(),
40 _svc_marker(SvcGCMarker::FULL),
41 _timer(),
42 _tracer(),
43 _active(),
44 _cpu_time(),
45 _soft_refs(clear_soft, _g1h->collector_policy()),
46 _memory_stats(true, _g1h->gc_cause()),
47 _collector_stats(_g1h->g1mm()->full_collection_counters()),
48 _heap_transition(_g1h) {
49 assert(_instance == NULL, "Only one scope at a time");
50 _timer.register_gc_start();
51 _tracer.report_gc_start(_g1h->gc_cause(), _timer.gc_start());
52 _g1h->pre_full_gc_dump(&_timer);
53 _g1h->trace_heap_before_gc(&_tracer);
54 _instance = this;
55 }
56
57 G1FullGCScope::~G1FullGCScope() {
58 // We must call G1MonitoringSupport::update_sizes() in the same scoping level
59 // as an active TraceMemoryManagerStats object (i.e. before the destructor for the
60 // TraceMemoryManagerStats is called) so that the G1 memory pools are updated
61 // before any GC notifications are raised.
62 _g1h->g1mm()->update_sizes();
63 _g1h->trace_heap_after_gc(&_tracer);
64 _g1h->post_full_gc_dump(&_timer);
65 _timer.register_gc_end();
66 _tracer.report_gc_end(_timer.gc_end(), _timer.time_partitions());
67 _instance = NULL;
68 }
69
70 bool G1FullGCScope::is_explicit_gc() {
71 return _explicit_gc;
72 }
73
74 bool G1FullGCScope::should_clear_soft_refs() {
75 return _soft_refs.should_clear();
76 }
77
78 STWGCTimer* G1FullGCScope::timer() {
79 return &_timer;
80 }
81
82 SerialOldTracer* G1FullGCScope::tracer() {
83 return &_tracer;
84 }
85
86 G1HeapTransition* G1FullGCScope::heap_transition() {
87 return &_heap_transition;
88 }
|