src/share/vm/gc_implementation/shared/vmGCOperations.cpp

Print this page




 178   SvcGCMarker sgcm(SvcGCMarker::MINOR);
 179 
 180   GenCollectedHeap* gch = GenCollectedHeap::heap();
 181   GCCauseSetter gccs(gch, _gc_cause);
 182   _res = gch->satisfy_failed_allocation(_size, _tlab);
 183   assert(gch->is_in_reserved_or_null(_res), "result not in heap");
 184 
 185   if (_res == NULL && GC_locker::is_active_and_needs_gc()) {
 186     set_gc_locked();
 187   }
 188 }
 189 
 190 void VM_GenCollectFull::doit() {
 191   SvcGCMarker sgcm(SvcGCMarker::FULL);
 192 
 193   GenCollectedHeap* gch = GenCollectedHeap::heap();
 194   GCCauseSetter gccs(gch, _gc_cause);
 195   gch->do_full_collection(gch->must_clear_all_soft_refs(), _max_level);
 196 }
 197 

 198 bool VM_CollectForMetadataAllocation::initiate_concurrent_GC() {
 199 #if INCLUDE_ALL_GCS
 200   if (UseConcMarkSweepGC && CMSClassUnloadingEnabled) {
 201     MetaspaceGC::set_should_concurrent_collect(true);
 202     return true;
 203   }
 204 
 205   if (UseG1GC) {
 206     G1CollectedHeap* g1h = G1CollectedHeap::heap();
 207     g1h->g1_policy()->set_initiate_conc_mark_if_possible();
 208 
 209     GCCauseSetter x(g1h, _gc_cause);
 210 
 211     // At this point we are supposed to start a concurrent cycle. We
 212     // will do so if one is not already in progress.
 213     bool should_start = g1h->g1_policy()->force_initial_mark_if_outside_cycle(_gc_cause);
 214 
 215     if (should_start) {
 216       double pause_target = g1h->g1_policy()->max_pause_time_ms();
 217       g1h->do_collection_pause_at_safepoint(pause_target);
 218     }
 219     return true;
 220   }
 221 #endif
 222 
 223   return false;
 224 }
 225 




 178   SvcGCMarker sgcm(SvcGCMarker::MINOR);
 179 
 180   GenCollectedHeap* gch = GenCollectedHeap::heap();
 181   GCCauseSetter gccs(gch, _gc_cause);
 182   _res = gch->satisfy_failed_allocation(_size, _tlab);
 183   assert(gch->is_in_reserved_or_null(_res), "result not in heap");
 184 
 185   if (_res == NULL && GC_locker::is_active_and_needs_gc()) {
 186     set_gc_locked();
 187   }
 188 }
 189 
 190 void VM_GenCollectFull::doit() {
 191   SvcGCMarker sgcm(SvcGCMarker::FULL);
 192 
 193   GenCollectedHeap* gch = GenCollectedHeap::heap();
 194   GCCauseSetter gccs(gch, _gc_cause);
 195   gch->do_full_collection(gch->must_clear_all_soft_refs(), _max_level);
 196 }
 197 
 198 // Returns true iff concurrent GCs unloads metadata.
 199 bool VM_CollectForMetadataAllocation::initiate_concurrent_GC() {
 200 #if INCLUDE_ALL_GCS
 201   if (UseConcMarkSweepGC && CMSClassUnloadingEnabled) {
 202     MetaspaceGC::set_should_concurrent_collect(true);
 203     return true;
 204   }
 205 
 206   if (UseG1GC && G1ClassUnloadingEnabled) {
 207     G1CollectedHeap* g1h = G1CollectedHeap::heap();
 208     g1h->g1_policy()->set_initiate_conc_mark_if_possible();
 209 
 210     GCCauseSetter x(g1h, _gc_cause);
 211 
 212     // At this point we are supposed to start a concurrent cycle. We
 213     // will do so if one is not already in progress.
 214     bool should_start = g1h->g1_policy()->force_initial_mark_if_outside_cycle(_gc_cause);
 215 
 216     if (should_start) {
 217       double pause_target = g1h->g1_policy()->max_pause_time_ms();
 218       g1h->do_collection_pause_at_safepoint(pause_target);
 219     }
 220     return true;
 221   }
 222 #endif
 223 
 224   return false;
 225 }
 226