27 #include "classfile/javaClasses.hpp"
28 #include "gc/shared/allocTracer.hpp"
29 #include "gc/shared/gcId.hpp"
30 #include "gc/shared/gcLocker.inline.hpp"
31 #include "gc/shared/genCollectedHeap.hpp"
32 #include "gc/shared/vmGCOperations.hpp"
33 #include "logging/log.hpp"
34 #include "memory/oopFactory.hpp"
35 #include "runtime/handles.inline.hpp"
36 #include "runtime/init.hpp"
37 #include "runtime/interfaceSupport.hpp"
38 #include "utilities/dtrace.hpp"
39 #include "utilities/macros.hpp"
40 #include "utilities/preserveException.hpp"
41 #if INCLUDE_ALL_GCS
42 #include "gc/g1/g1CollectedHeap.inline.hpp"
43 #include "gc/g1/g1Policy.hpp"
44 #endif // INCLUDE_ALL_GCS
45
46 VM_GC_Operation::~VM_GC_Operation() {
47 CollectedHeap* ch = Universe::heap();
48 ch->collector_policy()->set_all_soft_refs_clear(false);
49 }
50
51 // The same dtrace probe can't be inserted in two different files, so we
52 // have to call it here, so it's only in one file. Can't create new probes
53 // for the other file anymore. The dtrace probes have to remain stable.
54 void VM_GC_Operation::notify_gc_begin(bool full) {
55 HOTSPOT_GC_BEGIN(
56 full);
57 HS_DTRACE_WORKAROUND_TAIL_CALL_BUG();
58 }
59
60 void VM_GC_Operation::notify_gc_end() {
61 HOTSPOT_GC_END();
62 HS_DTRACE_WORKAROUND_TAIL_CALL_BUG();
63 }
64
65 void VM_GC_Operation::acquire_pending_list_lock() {
66 _pending_list_locker.lock();
67 }
68
69 void VM_GC_Operation::release_and_notify_pending_list_lock() {
70 _pending_list_locker.unlock();
71 }
72
73 // Allocations may fail in several threads at about the same time,
74 // resulting in multiple gc requests. We only want to do one of them.
75 // In case a GC locker is active and the need for a GC is already signaled,
76 // we want to skip this GC attempt altogether, without doing a futile
77 // safepoint operation.
78 bool VM_GC_Operation::skip_operation() const {
79 bool skip = (_gc_count_before != Universe::heap()->total_collections());
80 if (_full && skip) {
81 skip = (_full_gc_count_before != Universe::heap()->total_full_collections());
82 }
83 if (!skip && GCLocker::is_active_and_needs_gc()) {
84 skip = Universe::heap()->is_maximal_no_gc();
85 assert(!(skip && (_gc_cause == GCCause::_gc_locker)),
86 "GCLocker cannot be active when initiating GC");
87 }
88 return skip;
89 }
90
91 bool VM_GC_Operation::doit_prologue() {
92 assert(Thread::current()->is_Java_thread(), "just checking");
93 assert(((_gc_cause != GCCause::_no_gc) &&
94 (_gc_cause != GCCause::_no_cause_specified)), "Illegal GCCause");
95
96 // To be able to handle a GC the VM initialization needs to be completed.
97 if (!is_init_completed()) {
98 vm_exit_during_initialization(
99 err_msg("GC triggered before VM initialization completed. Try increasing "
100 "NewSize, current value " SIZE_FORMAT "%s.",
101 byte_size_in_proper_unit(NewSize),
102 proper_unit_for_byte_size(NewSize)));
103 }
104
118 }
119 return _prologue_succeeded;
120 }
121
122
123 void VM_GC_Operation::doit_epilogue() {
124 assert(Thread::current()->is_Java_thread(), "just checking");
125 // Release the Heap_lock first.
126 Heap_lock->unlock();
127 release_and_notify_pending_list_lock();
128 }
129
130 bool VM_GC_HeapInspection::skip_operation() const {
131 return false;
132 }
133
134 bool VM_GC_HeapInspection::collect() {
135 if (GCLocker::is_active()) {
136 return false;
137 }
138 Universe::heap()->collect_as_vm_thread(GCCause::_heap_inspection);
139 return true;
140 }
141
142 void VM_GC_HeapInspection::doit() {
143 HandleMark hm;
144 Universe::heap()->ensure_parsability(false); // must happen, even if collection does
145 // not happen (e.g. due to GCLocker)
146 // or _full_gc being false
147 if (_full_gc) {
148 if (!collect()) {
149 // The collection attempt was skipped because the gc locker is held.
150 // The following dump may then be a tad misleading to someone expecting
151 // only live objects to show up in the dump (see CR 6944195). Just issue
152 // a suitable warning in that case and do not attempt to do a collection.
153 // The latter is a subtle point, because even a failed attempt
154 // to GC will, in fact, induce one in the future, which we
155 // probably want to avoid in this case because the GC that we may
156 // be about to attempt holds value for us only
157 // if it happens now and not if it happens in the eventual
158 // future.
159 log_warning(gc)("GC locker is held; pre-dump GC was skipped");
160 }
161 }
162 HeapInspection inspect(_csv_format, _print_help, _print_class_stats,
163 _columns);
164 inspect.heap_inspection(_out);
213 GCCauseSetter x(g1h, _gc_cause);
214
215 // At this point we are supposed to start a concurrent cycle. We
216 // will do so if one is not already in progress.
217 bool should_start = g1h->g1_policy()->force_initial_mark_if_outside_cycle(_gc_cause);
218
219 if (should_start) {
220 double pause_target = g1h->g1_policy()->max_pause_time_ms();
221 g1h->do_collection_pause_at_safepoint(pause_target);
222 }
223 return true;
224 }
225 #endif
226
227 return false;
228 }
229
230 void VM_CollectForMetadataAllocation::doit() {
231 SvcGCMarker sgcm(SvcGCMarker::FULL);
232
233 CollectedHeap* heap = Universe::heap();
234 GCCauseSetter gccs(heap, _gc_cause);
235
236 // Check again if the space is available. Another thread
237 // may have similarly failed a metadata allocation and induced
238 // a GC that freed space for the allocation.
239 if (!MetadataAllocationFailALot) {
240 _result = _loader_data->metaspace_non_null()->allocate(_size, _mdtype);
241 if (_result != NULL) {
242 return;
243 }
244 }
245
246 if (initiate_concurrent_GC()) {
247 // For CMS and G1 expand since the collection is going to be concurrent.
248 _result = _loader_data->metaspace_non_null()->expand_and_allocate(_size, _mdtype);
249 if (_result != NULL) {
250 return;
251 }
252
253 log_debug(gc)("%s full GC for Metaspace", UseConcMarkSweepGC ? "CMS" : "G1");
|
27 #include "classfile/javaClasses.hpp"
28 #include "gc/shared/allocTracer.hpp"
29 #include "gc/shared/gcId.hpp"
30 #include "gc/shared/gcLocker.inline.hpp"
31 #include "gc/shared/genCollectedHeap.hpp"
32 #include "gc/shared/vmGCOperations.hpp"
33 #include "logging/log.hpp"
34 #include "memory/oopFactory.hpp"
35 #include "runtime/handles.inline.hpp"
36 #include "runtime/init.hpp"
37 #include "runtime/interfaceSupport.hpp"
38 #include "utilities/dtrace.hpp"
39 #include "utilities/macros.hpp"
40 #include "utilities/preserveException.hpp"
41 #if INCLUDE_ALL_GCS
42 #include "gc/g1/g1CollectedHeap.inline.hpp"
43 #include "gc/g1/g1Policy.hpp"
44 #endif // INCLUDE_ALL_GCS
45
46 VM_GC_Operation::~VM_GC_Operation() {
47 CollectedHeap* ch = GC::gc()->heap();
48 ch->collector_policy()->set_all_soft_refs_clear(false);
49 }
50
51 // The same dtrace probe can't be inserted in two different files, so we
52 // have to call it here, so it's only in one file. Can't create new probes
53 // for the other file anymore. The dtrace probes have to remain stable.
54 void VM_GC_Operation::notify_gc_begin(bool full) {
55 HOTSPOT_GC_BEGIN(
56 full);
57 HS_DTRACE_WORKAROUND_TAIL_CALL_BUG();
58 }
59
60 void VM_GC_Operation::notify_gc_end() {
61 HOTSPOT_GC_END();
62 HS_DTRACE_WORKAROUND_TAIL_CALL_BUG();
63 }
64
65 void VM_GC_Operation::acquire_pending_list_lock() {
66 _pending_list_locker.lock();
67 }
68
69 void VM_GC_Operation::release_and_notify_pending_list_lock() {
70 _pending_list_locker.unlock();
71 }
72
73 // Allocations may fail in several threads at about the same time,
74 // resulting in multiple gc requests. We only want to do one of them.
75 // In case a GC locker is active and the need for a GC is already signaled,
76 // we want to skip this GC attempt altogether, without doing a futile
77 // safepoint operation.
78 bool VM_GC_Operation::skip_operation() const {
79 bool skip = (_gc_count_before != GC::gc()->heap()->total_collections());
80 if (_full && skip) {
81 skip = (_full_gc_count_before != GC::gc()->heap()->total_full_collections());
82 }
83 if (!skip && GCLocker::is_active_and_needs_gc()) {
84 skip = GC::gc()->heap()->is_maximal_no_gc();
85 assert(!(skip && (_gc_cause == GCCause::_gc_locker)),
86 "GCLocker cannot be active when initiating GC");
87 }
88 return skip;
89 }
90
91 bool VM_GC_Operation::doit_prologue() {
92 assert(Thread::current()->is_Java_thread(), "just checking");
93 assert(((_gc_cause != GCCause::_no_gc) &&
94 (_gc_cause != GCCause::_no_cause_specified)), "Illegal GCCause");
95
96 // To be able to handle a GC the VM initialization needs to be completed.
97 if (!is_init_completed()) {
98 vm_exit_during_initialization(
99 err_msg("GC triggered before VM initialization completed. Try increasing "
100 "NewSize, current value " SIZE_FORMAT "%s.",
101 byte_size_in_proper_unit(NewSize),
102 proper_unit_for_byte_size(NewSize)));
103 }
104
118 }
119 return _prologue_succeeded;
120 }
121
122
123 void VM_GC_Operation::doit_epilogue() {
124 assert(Thread::current()->is_Java_thread(), "just checking");
125 // Release the Heap_lock first.
126 Heap_lock->unlock();
127 release_and_notify_pending_list_lock();
128 }
129
130 bool VM_GC_HeapInspection::skip_operation() const {
131 return false;
132 }
133
134 bool VM_GC_HeapInspection::collect() {
135 if (GCLocker::is_active()) {
136 return false;
137 }
138 GC::gc()->heap()->collect_as_vm_thread(GCCause::_heap_inspection);
139 return true;
140 }
141
142 void VM_GC_HeapInspection::doit() {
143 HandleMark hm;
144 GC::gc()->heap()->ensure_parsability(false); // must happen, even if collection does
145 // not happen (e.g. due to GCLocker)
146 // or _full_gc being false
147 if (_full_gc) {
148 if (!collect()) {
149 // The collection attempt was skipped because the gc locker is held.
150 // The following dump may then be a tad misleading to someone expecting
151 // only live objects to show up in the dump (see CR 6944195). Just issue
152 // a suitable warning in that case and do not attempt to do a collection.
153 // The latter is a subtle point, because even a failed attempt
154 // to GC will, in fact, induce one in the future, which we
155 // probably want to avoid in this case because the GC that we may
156 // be about to attempt holds value for us only
157 // if it happens now and not if it happens in the eventual
158 // future.
159 log_warning(gc)("GC locker is held; pre-dump GC was skipped");
160 }
161 }
162 HeapInspection inspect(_csv_format, _print_help, _print_class_stats,
163 _columns);
164 inspect.heap_inspection(_out);
213 GCCauseSetter x(g1h, _gc_cause);
214
215 // At this point we are supposed to start a concurrent cycle. We
216 // will do so if one is not already in progress.
217 bool should_start = g1h->g1_policy()->force_initial_mark_if_outside_cycle(_gc_cause);
218
219 if (should_start) {
220 double pause_target = g1h->g1_policy()->max_pause_time_ms();
221 g1h->do_collection_pause_at_safepoint(pause_target);
222 }
223 return true;
224 }
225 #endif
226
227 return false;
228 }
229
230 void VM_CollectForMetadataAllocation::doit() {
231 SvcGCMarker sgcm(SvcGCMarker::FULL);
232
233 CollectedHeap* heap = GC::gc()->heap();
234 GCCauseSetter gccs(heap, _gc_cause);
235
236 // Check again if the space is available. Another thread
237 // may have similarly failed a metadata allocation and induced
238 // a GC that freed space for the allocation.
239 if (!MetadataAllocationFailALot) {
240 _result = _loader_data->metaspace_non_null()->allocate(_size, _mdtype);
241 if (_result != NULL) {
242 return;
243 }
244 }
245
246 if (initiate_concurrent_GC()) {
247 // For CMS and G1 expand since the collection is going to be concurrent.
248 _result = _loader_data->metaspace_non_null()->expand_and_allocate(_size, _mdtype);
249 if (_result != NULL) {
250 return;
251 }
252
253 log_debug(gc)("%s full GC for Metaspace", UseConcMarkSweepGC ? "CMS" : "G1");
|