< prev index next >

src/share/vm/gc/shared/vmGCOperations.cpp

Print this page
rev 8393 : 8077842: Remove the level parameter passed around in GenCollectedHeap
Reviewed-by:


 167 
 168 
 169 void VM_GenCollectForAllocation::doit() {
 170   SvcGCMarker sgcm(SvcGCMarker::MINOR);
 171 
 172   GenCollectedHeap* gch = GenCollectedHeap::heap();
 173   GCCauseSetter gccs(gch, _gc_cause);
 174   _result = gch->satisfy_failed_allocation(_word_size, _tlab);
 175   assert(gch->is_in_reserved_or_null(_result), "result not in heap");
 176 
 177   if (_result == NULL && GC_locker::is_active_and_needs_gc()) {
 178     set_gc_locked();
 179   }
 180 }
 181 
 182 void VM_GenCollectFull::doit() {
 183   SvcGCMarker sgcm(SvcGCMarker::FULL);
 184 
 185   GenCollectedHeap* gch = GenCollectedHeap::heap();
 186   GCCauseSetter gccs(gch, _gc_cause);
 187   gch->do_full_collection(gch->must_clear_all_soft_refs(), _max_level);
 188 }
 189 
 190 // Returns true iff concurrent GCs unloads metadata.
 191 bool VM_CollectForMetadataAllocation::initiate_concurrent_GC() {
 192 #if INCLUDE_ALL_GCS
 193   if (UseConcMarkSweepGC && CMSClassUnloadingEnabled) {
 194     MetaspaceGC::set_should_concurrent_collect(true);
 195     return true;
 196   }
 197 
 198   if (UseG1GC && ClassUnloadingWithConcurrentMark) {
 199     G1CollectedHeap* g1h = G1CollectedHeap::heap();
 200     g1h->g1_policy()->set_initiate_conc_mark_if_possible();
 201 
 202     GCCauseSetter x(g1h, _gc_cause);
 203 
 204     // At this point we are supposed to start a concurrent cycle. We
 205     // will do so if one is not already in progress.
 206     bool should_start = g1h->g1_policy()->force_initial_mark_if_outside_cycle(_gc_cause);
 207 




 167 
 168 
 169 void VM_GenCollectForAllocation::doit() {
 170   SvcGCMarker sgcm(SvcGCMarker::MINOR);
 171 
 172   GenCollectedHeap* gch = GenCollectedHeap::heap();
 173   GCCauseSetter gccs(gch, _gc_cause);
 174   _result = gch->satisfy_failed_allocation(_word_size, _tlab);
 175   assert(gch->is_in_reserved_or_null(_result), "result not in heap");
 176 
 177   if (_result == NULL && GC_locker::is_active_and_needs_gc()) {
 178     set_gc_locked();
 179   }
 180 }
 181 
 182 void VM_GenCollectFull::doit() {
 183   SvcGCMarker sgcm(SvcGCMarker::FULL);
 184 
 185   GenCollectedHeap* gch = GenCollectedHeap::heap();
 186   GCCauseSetter gccs(gch, _gc_cause);
 187   gch->do_full_collection(gch->must_clear_all_soft_refs(), _max_generation);
 188 }
 189 
 190 // Returns true iff concurrent GCs unloads metadata.
 191 bool VM_CollectForMetadataAllocation::initiate_concurrent_GC() {
 192 #if INCLUDE_ALL_GCS
 193   if (UseConcMarkSweepGC && CMSClassUnloadingEnabled) {
 194     MetaspaceGC::set_should_concurrent_collect(true);
 195     return true;
 196   }
 197 
 198   if (UseG1GC && ClassUnloadingWithConcurrentMark) {
 199     G1CollectedHeap* g1h = G1CollectedHeap::heap();
 200     g1h->g1_policy()->set_initiate_conc_mark_if_possible();
 201 
 202     GCCauseSetter x(g1h, _gc_cause);
 203 
 204     // At this point we are supposed to start a concurrent cycle. We
 205     // will do so if one is not already in progress.
 206     bool should_start = g1h->g1_policy()->force_initial_mark_if_outside_cycle(_gc_cause);
 207 


< prev index next >