67 // we may enter this with pending exception set
68 InstanceRefKlass::acquire_pending_list_lock(&_pending_list_basic_lock);
69 }
70
71
72 void VM_GC_Operation::release_and_notify_pending_list_lock() {
73
74 InstanceRefKlass::release_and_notify_pending_list_lock(&_pending_list_basic_lock);
75 }
76
77 // Allocations may fail in several threads at about the same time,
78 // resulting in multiple gc requests. We only want to do one of them.
79 // In case a GC locker is active and the need for a GC is already signaled,
80 // we want to skip this GC attempt altogether, without doing a futile
81 // safepoint operation.
82 bool VM_GC_Operation::skip_operation() const {
83 bool skip = (_gc_count_before != Universe::heap()->total_collections());
84 if (_full && skip) {
85 skip = (_full_gc_count_before != Universe::heap()->total_full_collections());
86 }
87 if (!skip && GC_locker::is_active_and_needs_gc()) {
88 skip = Universe::heap()->is_maximal_no_gc();
89 assert(!(skip && (_gc_cause == GCCause::_gc_locker)),
90 "GC_locker cannot be active when initiating GC");
91 }
92 return skip;
93 }
94
95 bool VM_GC_Operation::doit_prologue() {
96 assert(Thread::current()->is_Java_thread(), "just checking");
97 assert(((_gc_cause != GCCause::_no_gc) &&
98 (_gc_cause != GCCause::_no_cause_specified)), "Illegal GCCause");
99
100 // To be able to handle a GC the VM initialization needs to be completed.
101 if (!is_init_completed()) {
102 vm_exit_during_initialization(
103 err_msg("GC triggered before VM initialization completed. Try increasing "
104 "NewSize, current value " SIZE_FORMAT "%s.",
105 byte_size_in_proper_unit(NewSize),
106 proper_unit_for_byte_size(NewSize)));
107 }
108
109 acquire_pending_list_lock();
110 // If the GC count has changed someone beat us to the collection
119 _prologue_succeeded = false;
120 } else {
121 _prologue_succeeded = true;
122 }
123 return _prologue_succeeded;
124 }
125
126
127 void VM_GC_Operation::doit_epilogue() {
128 assert(Thread::current()->is_Java_thread(), "just checking");
129 // Release the Heap_lock first.
130 Heap_lock->unlock();
131 release_and_notify_pending_list_lock();
132 }
133
134 bool VM_GC_HeapInspection::skip_operation() const {
135 return false;
136 }
137
138 bool VM_GC_HeapInspection::collect() {
139 if (GC_locker::is_active()) {
140 return false;
141 }
142 Universe::heap()->collect_as_vm_thread(GCCause::_heap_inspection);
143 return true;
144 }
145
146 void VM_GC_HeapInspection::doit() {
147 HandleMark hm;
148 Universe::heap()->ensure_parsability(false); // must happen, even if collection does
149 // not happen (e.g. due to GC_locker)
150 // or _full_gc being false
151 if (_full_gc) {
152 if (!collect()) {
153 // The collection attempt was skipped because the gc locker is held.
154 // The following dump may then be a tad misleading to someone expecting
155 // only live objects to show up in the dump (see CR 6944195). Just issue
156 // a suitable warning in that case and do not attempt to do a collection.
157 // The latter is a subtle point, because even a failed attempt
158 // to GC will, in fact, induce one in the future, which we
159 // probably want to avoid in this case because the GC that we may
160 // be about to attempt holds value for us only
161 // if it happens now and not if it happens in the eventual
162 // future.
163 warning("GC locker is held; pre-dump GC was skipped");
164 }
165 }
166 HeapInspection inspect(_csv_format, _print_help, _print_class_stats,
167 _columns);
168 inspect.heap_inspection(_out);
169 }
170
171
172 void VM_GenCollectForAllocation::doit() {
173 SvcGCMarker sgcm(SvcGCMarker::MINOR);
174
175 GenCollectedHeap* gch = GenCollectedHeap::heap();
176 GCCauseSetter gccs(gch, _gc_cause);
177 _result = gch->satisfy_failed_allocation(_word_size, _tlab);
178 assert(gch->is_in_reserved_or_null(_result), "result not in heap");
179
180 if (_result == NULL && GC_locker::is_active_and_needs_gc()) {
181 set_gc_locked();
182 }
183 }
184
185 void VM_GenCollectFull::doit() {
186 SvcGCMarker sgcm(SvcGCMarker::FULL);
187
188 GenCollectedHeap* gch = GenCollectedHeap::heap();
189 GCCauseSetter gccs(gch, _gc_cause);
190 gch->do_full_collection(gch->must_clear_all_soft_refs(), _max_generation);
191 }
192
193 VM_CollectForMetadataAllocation::VM_CollectForMetadataAllocation(ClassLoaderData* loader_data,
194 size_t size,
195 Metaspace::MetadataType mdtype,
196 uint gc_count_before,
197 uint full_gc_count_before,
198 GCCause::Cause gc_cause)
199 : VM_GC_Operation(gc_count_before, gc_cause, full_gc_count_before, true),
200 _loader_data(loader_data), _size(size), _mdtype(mdtype), _result(NULL) {
272 // This should work unless there really is no more space
273 // or a MaxMetaspaceSize has been specified on the command line.
274 _result = _loader_data->metaspace_non_null()->expand_and_allocate(_size, _mdtype);
275 if (_result != NULL) {
276 return;
277 }
278
279 // If expansion failed, do a last-ditch collection and try allocating
280 // again. A last-ditch collection will clear softrefs. This
281 // behavior is similar to the last-ditch collection done for perm
282 // gen when it was full and a collection for failed allocation
283 // did not free perm gen space.
284 heap->collect_as_vm_thread(GCCause::_last_ditch_collection);
285 _result = _loader_data->metaspace_non_null()->allocate(_size, _mdtype);
286 if (_result != NULL) {
287 return;
288 }
289
290 log_debug(gc)("After Metaspace GC failed to allocate size " SIZE_FORMAT, _size);
291
292 if (GC_locker::is_active_and_needs_gc()) {
293 set_gc_locked();
294 }
295 }
296
297 VM_CollectForAllocation::VM_CollectForAllocation(size_t word_size, uint gc_count_before, GCCause::Cause cause)
298 : VM_GC_Operation(gc_count_before, cause), _result(NULL), _word_size(word_size) {
299 // Only report if operation was really caused by an allocation.
300 if (_word_size != 0) {
301 AllocTracer::send_allocation_requiring_gc_event(_word_size * HeapWordSize, GCId::peek());
302 }
303 }
|
67 // we may enter this with pending exception set
68 InstanceRefKlass::acquire_pending_list_lock(&_pending_list_basic_lock);
69 }
70
71
72 void VM_GC_Operation::release_and_notify_pending_list_lock() {
73
74 InstanceRefKlass::release_and_notify_pending_list_lock(&_pending_list_basic_lock);
75 }
76
77 // Allocations may fail in several threads at about the same time,
78 // resulting in multiple gc requests. We only want to do one of them.
79 // In case a GC locker is active and the need for a GC is already signaled,
80 // we want to skip this GC attempt altogether, without doing a futile
81 // safepoint operation.
82 bool VM_GC_Operation::skip_operation() const {
83 bool skip = (_gc_count_before != Universe::heap()->total_collections());
84 if (_full && skip) {
85 skip = (_full_gc_count_before != Universe::heap()->total_full_collections());
86 }
87 if (!skip && GCLocker::is_active_and_needs_gc()) {
88 skip = Universe::heap()->is_maximal_no_gc();
89 assert(!(skip && (_gc_cause == GCCause::_gc_locker)),
90 "GCLocker cannot be active when initiating GC");
91 }
92 return skip;
93 }
94
95 bool VM_GC_Operation::doit_prologue() {
96 assert(Thread::current()->is_Java_thread(), "just checking");
97 assert(((_gc_cause != GCCause::_no_gc) &&
98 (_gc_cause != GCCause::_no_cause_specified)), "Illegal GCCause");
99
100 // To be able to handle a GC the VM initialization needs to be completed.
101 if (!is_init_completed()) {
102 vm_exit_during_initialization(
103 err_msg("GC triggered before VM initialization completed. Try increasing "
104 "NewSize, current value " SIZE_FORMAT "%s.",
105 byte_size_in_proper_unit(NewSize),
106 proper_unit_for_byte_size(NewSize)));
107 }
108
109 acquire_pending_list_lock();
110 // If the GC count has changed someone beat us to the collection
119 _prologue_succeeded = false;
120 } else {
121 _prologue_succeeded = true;
122 }
123 return _prologue_succeeded;
124 }
125
126
127 void VM_GC_Operation::doit_epilogue() {
128 assert(Thread::current()->is_Java_thread(), "just checking");
129 // Release the Heap_lock first.
130 Heap_lock->unlock();
131 release_and_notify_pending_list_lock();
132 }
133
134 bool VM_GC_HeapInspection::skip_operation() const {
135 return false;
136 }
137
138 bool VM_GC_HeapInspection::collect() {
139 if (GCLocker::is_active()) {
140 return false;
141 }
142 Universe::heap()->collect_as_vm_thread(GCCause::_heap_inspection);
143 return true;
144 }
145
146 void VM_GC_HeapInspection::doit() {
147 HandleMark hm;
148 Universe::heap()->ensure_parsability(false); // must happen, even if collection does
149 // not happen (e.g. due to GCLocker)
150 // or _full_gc being false
151 if (_full_gc) {
152 if (!collect()) {
153 // The collection attempt was skipped because the gc locker is held.
154 // The following dump may then be a tad misleading to someone expecting
155 // only live objects to show up in the dump (see CR 6944195). Just issue
156 // a suitable warning in that case and do not attempt to do a collection.
157 // The latter is a subtle point, because even a failed attempt
158 // to GC will, in fact, induce one in the future, which we
159 // probably want to avoid in this case because the GC that we may
160 // be about to attempt holds value for us only
161 // if it happens now and not if it happens in the eventual
162 // future.
163 warning("GC locker is held; pre-dump GC was skipped");
164 }
165 }
166 HeapInspection inspect(_csv_format, _print_help, _print_class_stats,
167 _columns);
168 inspect.heap_inspection(_out);
169 }
170
171
172 void VM_GenCollectForAllocation::doit() {
173 SvcGCMarker sgcm(SvcGCMarker::MINOR);
174
175 GenCollectedHeap* gch = GenCollectedHeap::heap();
176 GCCauseSetter gccs(gch, _gc_cause);
177 _result = gch->satisfy_failed_allocation(_word_size, _tlab);
178 assert(gch->is_in_reserved_or_null(_result), "result not in heap");
179
180 if (_result == NULL && GCLocker::is_active_and_needs_gc()) {
181 set_gc_locked();
182 }
183 }
184
185 void VM_GenCollectFull::doit() {
186 SvcGCMarker sgcm(SvcGCMarker::FULL);
187
188 GenCollectedHeap* gch = GenCollectedHeap::heap();
189 GCCauseSetter gccs(gch, _gc_cause);
190 gch->do_full_collection(gch->must_clear_all_soft_refs(), _max_generation);
191 }
192
193 VM_CollectForMetadataAllocation::VM_CollectForMetadataAllocation(ClassLoaderData* loader_data,
194 size_t size,
195 Metaspace::MetadataType mdtype,
196 uint gc_count_before,
197 uint full_gc_count_before,
198 GCCause::Cause gc_cause)
199 : VM_GC_Operation(gc_count_before, gc_cause, full_gc_count_before, true),
200 _loader_data(loader_data), _size(size), _mdtype(mdtype), _result(NULL) {
272 // This should work unless there really is no more space
273 // or a MaxMetaspaceSize has been specified on the command line.
274 _result = _loader_data->metaspace_non_null()->expand_and_allocate(_size, _mdtype);
275 if (_result != NULL) {
276 return;
277 }
278
279 // If expansion failed, do a last-ditch collection and try allocating
280 // again. A last-ditch collection will clear softrefs. This
281 // behavior is similar to the last-ditch collection done for perm
282 // gen when it was full and a collection for failed allocation
283 // did not free perm gen space.
284 heap->collect_as_vm_thread(GCCause::_last_ditch_collection);
285 _result = _loader_data->metaspace_non_null()->allocate(_size, _mdtype);
286 if (_result != NULL) {
287 return;
288 }
289
290 log_debug(gc)("After Metaspace GC failed to allocate size " SIZE_FORMAT, _size);
291
292 if (GCLocker::is_active_and_needs_gc()) {
293 set_gc_locked();
294 }
295 }
296
297 VM_CollectForAllocation::VM_CollectForAllocation(size_t word_size, uint gc_count_before, GCCause::Cause cause)
298 : VM_GC_Operation(gc_count_before, cause), _result(NULL), _word_size(word_size) {
299 // Only report if operation was really caused by an allocation.
300 if (_word_size != 0) {
301 AllocTracer::send_allocation_requiring_gc_event(_word_size * HeapWordSize, GCId::peek());
302 }
303 }
|