src/share/vm/gc_implementation/shared/vmGCOperations.cpp
Index Unified diffs Context diffs Sdiffs Patch New Old Previous File Next File hotspot Sdiff src/share/vm/gc_implementation/shared

src/share/vm/gc_implementation/shared/vmGCOperations.cpp

Print this page
rev 7215 : imported patch remove_levels


 175 
 176 
 177 void VM_GenCollectForAllocation::doit() {
 178   SvcGCMarker sgcm(SvcGCMarker::MINOR);
 179 
 180   GenCollectedHeap* gch = GenCollectedHeap::heap();
 181   GCCauseSetter gccs(gch, _gc_cause);
 182   _res = gch->satisfy_failed_allocation(_size, _tlab);
 183   assert(gch->is_in_reserved_or_null(_res), "result not in heap");
 184 
 185   if (_res == NULL && GC_locker::is_active_and_needs_gc()) {
 186     set_gc_locked();
 187   }
 188 }
 189 
 190 void VM_GenCollectFull::doit() {
 191   SvcGCMarker sgcm(SvcGCMarker::FULL);
 192 
 193   GenCollectedHeap* gch = GenCollectedHeap::heap();
 194   GCCauseSetter gccs(gch, _gc_cause);
 195   gch->do_full_collection(gch->must_clear_all_soft_refs(), _max_level);
 196 }
 197 
 198 // Returns true iff concurrent GCs unloads metadata.
 199 bool VM_CollectForMetadataAllocation::initiate_concurrent_GC() {
 200 #if INCLUDE_ALL_GCS
 201   if (UseConcMarkSweepGC && CMSClassUnloadingEnabled) {
 202     MetaspaceGC::set_should_concurrent_collect(true);
 203     return true;
 204   }
 205 
 206   if (UseG1GC && ClassUnloadingWithConcurrentMark) {
 207     G1CollectedHeap* g1h = G1CollectedHeap::heap();
 208     g1h->g1_policy()->set_initiate_conc_mark_if_possible();
 209 
 210     GCCauseSetter x(g1h, _gc_cause);
 211 
 212     // At this point we are supposed to start a concurrent cycle. We
 213     // will do so if one is not already in progress.
 214     bool should_start = g1h->g1_policy()->force_initial_mark_if_outside_cycle(_gc_cause);
 215 




 175 
 176 
 177 void VM_GenCollectForAllocation::doit() {
 178   SvcGCMarker sgcm(SvcGCMarker::MINOR);
 179 
 180   GenCollectedHeap* gch = GenCollectedHeap::heap();
 181   GCCauseSetter gccs(gch, _gc_cause);
 182   _res = gch->satisfy_failed_allocation(_size, _tlab);
 183   assert(gch->is_in_reserved_or_null(_res), "result not in heap");
 184 
 185   if (_res == NULL && GC_locker::is_active_and_needs_gc()) {
 186     set_gc_locked();
 187   }
 188 }
 189 
 190 void VM_GenCollectFull::doit() {
 191   SvcGCMarker sgcm(SvcGCMarker::FULL);
 192 
 193   GenCollectedHeap* gch = GenCollectedHeap::heap();
 194   GCCauseSetter gccs(gch, _gc_cause);
 195   gch->do_full_collection(gch->must_clear_all_soft_refs(), _max_generation);
 196 }
 197 
 198 // Returns true iff concurrent GCs unloads metadata.
 199 bool VM_CollectForMetadataAllocation::initiate_concurrent_GC() {
 200 #if INCLUDE_ALL_GCS
 201   if (UseConcMarkSweepGC && CMSClassUnloadingEnabled) {
 202     MetaspaceGC::set_should_concurrent_collect(true);
 203     return true;
 204   }
 205 
 206   if (UseG1GC && ClassUnloadingWithConcurrentMark) {
 207     G1CollectedHeap* g1h = G1CollectedHeap::heap();
 208     g1h->g1_policy()->set_initiate_conc_mark_if_possible();
 209 
 210     GCCauseSetter x(g1h, _gc_cause);
 211 
 212     // At this point we are supposed to start a concurrent cycle. We
 213     // will do so if one is not already in progress.
 214     bool should_start = g1h->g1_policy()->force_initial_mark_if_outside_cycle(_gc_cause);
 215 


src/share/vm/gc_implementation/shared/vmGCOperations.cpp
Index Unified diffs Context diffs Sdiffs Patch New Old Previous File Next File