179 SvcGCMarker sgcm(SvcGCMarker::MINOR);
180
181 GenCollectedHeap* gch = GenCollectedHeap::heap();
182 GCCauseSetter gccs(gch, _gc_cause);
183 _result = gch->satisfy_failed_allocation(_word_size, _tlab);
184 assert(gch->is_in_reserved_or_null(_result), "result not in heap");
185
186 if (_result == NULL && GC_locker::is_active_and_needs_gc()) {
187 set_gc_locked();
188 }
189 }
190
191 void VM_GenCollectFull::doit() {
192 SvcGCMarker sgcm(SvcGCMarker::FULL);
193
194 GenCollectedHeap* gch = GenCollectedHeap::heap();
195 GCCauseSetter gccs(gch, _gc_cause);
196 gch->do_full_collection(gch->must_clear_all_soft_refs(), _max_level);
197 }
198
199 // Returns true iff concurrent GCs unloads metadata.
200 bool VM_CollectForMetadataAllocation::initiate_concurrent_GC() {
201 #if INCLUDE_ALL_GCS
202 if (UseConcMarkSweepGC && CMSClassUnloadingEnabled) {
203 MetaspaceGC::set_should_concurrent_collect(true);
204 return true;
205 }
206
207 if (UseG1GC && ClassUnloadingWithConcurrentMark) {
208 G1CollectedHeap* g1h = G1CollectedHeap::heap();
209 g1h->g1_policy()->set_initiate_conc_mark_if_possible();
210
211 GCCauseSetter x(g1h, _gc_cause);
212
213 // At this point we are supposed to start a concurrent cycle. We
214 // will do so if one is not already in progress.
215 bool should_start = g1h->g1_policy()->force_initial_mark_if_outside_cycle(_gc_cause);
216
217 if (should_start) {
218 double pause_target = g1h->g1_policy()->max_pause_time_ms();
281 }
282
283 // If expansion failed, do a last-ditch collection and try allocating
284 // again. A last-ditch collection will clear softrefs. This
285 // behavior is similar to the last-ditch collection done for perm
286 // gen when it was full and a collection for failed allocation
287 // did not free perm gen space.
288 heap->collect_as_vm_thread(GCCause::_last_ditch_collection);
289 _result = _loader_data->metaspace_non_null()->allocate(_size, _mdtype);
290 if (_result != NULL) {
291 return;
292 }
293
294 if (Verbose && PrintGCDetails) {
295 gclog_or_tty->print_cr("\nAfter Metaspace GC failed to allocate size "
296 SIZE_FORMAT, _size);
297 }
298
299 if (GC_locker::is_active_and_needs_gc()) {
300 set_gc_locked();
301 }
302 }
|
179 SvcGCMarker sgcm(SvcGCMarker::MINOR);
180
181 GenCollectedHeap* gch = GenCollectedHeap::heap();
182 GCCauseSetter gccs(gch, _gc_cause);
183 _result = gch->satisfy_failed_allocation(_word_size, _tlab);
184 assert(gch->is_in_reserved_or_null(_result), "result not in heap");
185
186 if (_result == NULL && GC_locker::is_active_and_needs_gc()) {
187 set_gc_locked();
188 }
189 }
190
191 void VM_GenCollectFull::doit() {
192 SvcGCMarker sgcm(SvcGCMarker::FULL);
193
194 GenCollectedHeap* gch = GenCollectedHeap::heap();
195 GCCauseSetter gccs(gch, _gc_cause);
196 gch->do_full_collection(gch->must_clear_all_soft_refs(), _max_level);
197 }
198
199 VM_CollectForMetadataAllocation::VM_CollectForMetadataAllocation(ClassLoaderData* loader_data,
200 size_t size,
201 Metaspace::MetadataType mdtype,
202 uint gc_count_before,
203 uint full_gc_count_before,
204 GCCause::Cause gc_cause)
205 : VM_GC_Operation(gc_count_before, gc_cause, full_gc_count_before, true),
206 _loader_data(loader_data), _size(size), _mdtype(mdtype), _result(NULL) {
207 assert(_size != 0, "An allocation should always be requested with this operation.");
208 AllocTracer::send_allocation_requiring_gc_event(_size * HeapWordSize, GCId::peek());
209 }
210
211 // Returns true iff concurrent GCs unloads metadata.
212 bool VM_CollectForMetadataAllocation::initiate_concurrent_GC() {
213 #if INCLUDE_ALL_GCS
214 if (UseConcMarkSweepGC && CMSClassUnloadingEnabled) {
215 MetaspaceGC::set_should_concurrent_collect(true);
216 return true;
217 }
218
219 if (UseG1GC && ClassUnloadingWithConcurrentMark) {
220 G1CollectedHeap* g1h = G1CollectedHeap::heap();
221 g1h->g1_policy()->set_initiate_conc_mark_if_possible();
222
223 GCCauseSetter x(g1h, _gc_cause);
224
225 // At this point we are supposed to start a concurrent cycle. We
226 // will do so if one is not already in progress.
227 bool should_start = g1h->g1_policy()->force_initial_mark_if_outside_cycle(_gc_cause);
228
229 if (should_start) {
230 double pause_target = g1h->g1_policy()->max_pause_time_ms();
293 }
294
295 // If expansion failed, do a last-ditch collection and try allocating
296 // again. A last-ditch collection will clear softrefs. This
297 // behavior is similar to the last-ditch collection done for perm
298 // gen when it was full and a collection for failed allocation
299 // did not free perm gen space.
300 heap->collect_as_vm_thread(GCCause::_last_ditch_collection);
301 _result = _loader_data->metaspace_non_null()->allocate(_size, _mdtype);
302 if (_result != NULL) {
303 return;
304 }
305
306 if (Verbose && PrintGCDetails) {
307 gclog_or_tty->print_cr("\nAfter Metaspace GC failed to allocate size "
308 SIZE_FORMAT, _size);
309 }
310
311 if (GC_locker::is_active_and_needs_gc()) {
312 set_gc_locked();
313 }
314 }
315
316 VM_CollectForAllocation::VM_CollectForAllocation(size_t word_size, uint gc_count_before, GCCause::Cause cause)
317 : VM_GC_Operation(gc_count_before, cause), _result(NULL), _word_size(word_size) {
318 // Only report if operation was really caused by an allocation.
319 if (_word_size != 0) {
320 AllocTracer::send_allocation_requiring_gc_event(_word_size * HeapWordSize, GCId::peek());
321 }
322 }
|