162 // The latter is a subtle point, because even a failed attempt
163 // to GC will, in fact, induce one in the future, which we
164 // probably want to avoid in this case because the GC that we may
165 // be about to attempt holds value for us only
166 // if it happens now and not if it happens in the eventual
167 // future.
168 warning("GC locker is held; pre-dump GC was skipped");
169 }
170 }
171 HeapInspection inspect(_csv_format, _print_help, _print_class_stats,
172 _columns);
173 inspect.heap_inspection(_out);
174 }
175
176
177 void VM_GenCollectForAllocation::doit() {
178 SvcGCMarker sgcm(SvcGCMarker::MINOR);
179
180 GenCollectedHeap* gch = GenCollectedHeap::heap();
181 GCCauseSetter gccs(gch, _gc_cause);
182 _res = gch->satisfy_failed_allocation(_size, _tlab);
183 assert(gch->is_in_reserved_or_null(_res), "result not in heap");
184
185 if (_res == NULL && GC_locker::is_active_and_needs_gc()) {
186 set_gc_locked();
187 }
188 }
189
190 void VM_GenCollectFull::doit() {
191 SvcGCMarker sgcm(SvcGCMarker::FULL);
192
193 GenCollectedHeap* gch = GenCollectedHeap::heap();
194 GCCauseSetter gccs(gch, _gc_cause);
195 gch->do_full_collection(gch->must_clear_all_soft_refs(), _max_level);
196 }
197
198 // Returns true iff concurrent GCs unloads metadata.
199 bool VM_CollectForMetadataAllocation::initiate_concurrent_GC() {
200 #if INCLUDE_ALL_GCS
201 if (UseConcMarkSweepGC && CMSClassUnloadingEnabled) {
202 MetaspaceGC::set_should_concurrent_collect(true);
203 return true;
204 }
205
281
282 // If expansion failed, do a last-ditch collection and try allocating
283 // again. A last-ditch collection will clear softrefs. This
284 // behavior is similar to the last-ditch collection done for perm
285 // gen when it was full and a collection for failed allocation
286 // did not free perm gen space.
287 heap->collect_as_vm_thread(GCCause::_last_ditch_collection);
288 _result = _loader_data->metaspace_non_null()->allocate(_size, _mdtype);
289 if (_result != NULL) {
290 return;
291 }
292
293 if (Verbose && PrintGCDetails) {
294 gclog_or_tty->print_cr("\nAfter Metaspace GC failed to allocate size "
295 SIZE_FORMAT, _size);
296 }
297
298 if (GC_locker::is_active_and_needs_gc()) {
299 set_gc_locked();
300 }
301 }
|
162 // The latter is a subtle point, because even a failed attempt
163 // to GC will, in fact, induce one in the future, which we
164 // probably want to avoid in this case because the GC that we may
165 // be about to attempt holds value for us only
166 // if it happens now and not if it happens in the eventual
167 // future.
168 warning("GC locker is held; pre-dump GC was skipped");
169 }
170 }
171 HeapInspection inspect(_csv_format, _print_help, _print_class_stats,
172 _columns);
173 inspect.heap_inspection(_out);
174 }
175
176
177 void VM_GenCollectForAllocation::doit() {
178 SvcGCMarker sgcm(SvcGCMarker::MINOR);
179
180 GenCollectedHeap* gch = GenCollectedHeap::heap();
181 GCCauseSetter gccs(gch, _gc_cause);
182 _result = gch->satisfy_failed_allocation(_word_size, _tlab);
183 assert(gch->is_in_reserved_or_null(_result), "result not in heap");
184
185 if (_result == NULL && GC_locker::is_active_and_needs_gc()) {
186 set_gc_locked();
187 }
188 }
189
190 void VM_GenCollectFull::doit() {
191 SvcGCMarker sgcm(SvcGCMarker::FULL);
192
193 GenCollectedHeap* gch = GenCollectedHeap::heap();
194 GCCauseSetter gccs(gch, _gc_cause);
195 gch->do_full_collection(gch->must_clear_all_soft_refs(), _max_level);
196 }
197
198 // Returns true iff concurrent GCs unloads metadata.
199 bool VM_CollectForMetadataAllocation::initiate_concurrent_GC() {
200 #if INCLUDE_ALL_GCS
201 if (UseConcMarkSweepGC && CMSClassUnloadingEnabled) {
202 MetaspaceGC::set_should_concurrent_collect(true);
203 return true;
204 }
205
281
282 // If expansion failed, do a last-ditch collection and try allocating
283 // again. A last-ditch collection will clear softrefs. This
284 // behavior is similar to the last-ditch collection done for perm
285 // gen when it was full and a collection for failed allocation
286 // did not free perm gen space.
287 heap->collect_as_vm_thread(GCCause::_last_ditch_collection);
288 _result = _loader_data->metaspace_non_null()->allocate(_size, _mdtype);
289 if (_result != NULL) {
290 return;
291 }
292
293 if (Verbose && PrintGCDetails) {
294 gclog_or_tty->print_cr("\nAfter Metaspace GC failed to allocate size "
295 SIZE_FORMAT, _size);
296 }
297
298 if (GC_locker::is_active_and_needs_gc()) {
299 set_gc_locked();
300 }
301 }
302
303 VM_CollectForAllocation::VM_CollectForAllocation(size_t word_size, uint gc_count_before, GCCause::Cause cause) :
304 VM_GC_Operation(gc_count_before, cause), _result(NULL), _word_size(word_size) {
305 // G1's incremental collections are not always caused by an allocation, which is indicated by word_size = 0.
306 assert(_word_size != 0 || UseG1GC, "word_size = 0 should only happen with G1");
307 }
|