< prev index next >

src/share/vm/gc/cms/concurrentMarkSweepGeneration.cpp

Print this page




 462   _eden_chunk_capacity(0),     // -- ditto --
 463   _eden_chunk_index(0),        // -- ditto --
 464   _survivor_plab_array(NULL),  // -- ditto --
 465   _survivor_chunk_array(NULL), // -- ditto --
 466   _survivor_chunk_capacity(0), // -- ditto --
 467   _survivor_chunk_index(0),    // -- ditto --
 468   _ser_pmc_preclean_ovflw(0),
 469   _ser_kac_preclean_ovflw(0),
 470   _ser_pmc_remark_ovflw(0),
 471   _par_pmc_remark_ovflw(0),
 472   _ser_kac_ovflw(0),
 473   _par_kac_ovflw(0),
 474 #ifndef PRODUCT
 475   _num_par_pushes(0),
 476 #endif
 477   _collection_count_start(0),
 478   _verifying(false),
 479   _verification_mark_bm(0, Mutex::leaf + 1, "CMS_verification_mark_bm_lock"),
 480   _completed_initialization(false),
 481   _collector_policy(cp),
 482   _should_unload_classes(CMSClassUnloadingEnabled),
 483   _concurrent_cycles_since_last_unload(0),
 484   _roots_scanning_options(GenCollectedHeap::SO_None),
 485   _inter_sweep_estimate(CMS_SweepWeight, CMS_SweepPadding),
 486   _intra_sweep_estimate(CMS_SweepWeight, CMS_SweepPadding),
 487   _gc_tracer_cm(new (ResourceObj::C_HEAP, mtGC) CMSTracer()),
 488   _gc_timer_cm(new (ResourceObj::C_HEAP, mtGC) ConcurrentGCTimer()),
 489   _cms_start_registered(false)
 490 {
 491   if (ExplicitGCInvokesConcurrentAndUnloadsClasses) {
 492     ExplicitGCInvokesConcurrent = true;
 493   }
 494   // Now expand the span and allocate the collection support structures
 495   // (MUT, marking bit map etc.) to cover both generations subject to
 496   // collection.
 497 
 498   // For use by dirty card to oop closures.
 499   _cmsGen->cmsSpace()->set_collector(this);
 500 
 501   // Allocate MUT and marking bit map
 502   {


2545 // unload classes if it's the case that:
2546 // (1) an explicit gc request has been made and the flag
2547 //     ExplicitGCInvokesConcurrentAndUnloadsClasses is set, OR
2548 // (2) (a) class unloading is enabled at the command line, and
2549 //     (b) old gen is getting really full
2550 // NOTE: Provided there is no change in the state of the heap between
2551 // calls to this method, it should have idempotent results. Moreover,
2552 // its results should be monotonically increasing (i.e. going from 0 to 1,
2553 // but not 1 to 0) between successive calls between which the heap was
2554 // not collected. For the implementation below, it must thus rely on
2555 // the property that concurrent_cycles_since_last_unload()
2556 // will not decrease unless a collection cycle happened and that
2557 // _cmsGen->is_too_full() are
2558 // themselves also monotonic in that sense. See check_monotonicity()
2559 // below.
2560 void CMSCollector::update_should_unload_classes() {
2561   _should_unload_classes = false;
2562   // Condition 1 above
2563   if (_full_gc_requested && ExplicitGCInvokesConcurrentAndUnloadsClasses) {
2564     _should_unload_classes = true;
2565   } else if (CMSClassUnloadingEnabled) { // Condition 2.a above
2566     // Disjuncts 2.b.(i,ii,iii) above
2567     _should_unload_classes = (concurrent_cycles_since_last_unload() >=
2568                               CMSClassUnloadingMaxInterval)
2569                            || _cmsGen->is_too_full();
2570   }
2571 }
2572 
2573 bool ConcurrentMarkSweepGeneration::is_too_full() const {
2574   bool res = should_concurrent_collect();
2575   res = res && (occupancy() > (double)CMSIsTooFullPercentage/100.0);
2576   return res;
2577 }
2578 
2579 void CMSCollector::setup_cms_unloading_and_verification_state() {
2580   const  bool should_verify =   VerifyBeforeGC || VerifyAfterGC || VerifyDuringGC
2581                              || VerifyBeforeExit;
2582   const  int  rso           =   GenCollectedHeap::SO_AllCodeCache;
2583 
2584   // We set the proper root for this CMS cycle here.
2585   if (should_unload_classes()) {   // Should unload classes this cycle




 462   _eden_chunk_capacity(0),     // -- ditto --
 463   _eden_chunk_index(0),        // -- ditto --
 464   _survivor_plab_array(NULL),  // -- ditto --
 465   _survivor_chunk_array(NULL), // -- ditto --
 466   _survivor_chunk_capacity(0), // -- ditto --
 467   _survivor_chunk_index(0),    // -- ditto --
 468   _ser_pmc_preclean_ovflw(0),
 469   _ser_kac_preclean_ovflw(0),
 470   _ser_pmc_remark_ovflw(0),
 471   _par_pmc_remark_ovflw(0),
 472   _ser_kac_ovflw(0),
 473   _par_kac_ovflw(0),
 474 #ifndef PRODUCT
 475   _num_par_pushes(0),
 476 #endif
 477   _collection_count_start(0),
 478   _verifying(false),
 479   _verification_mark_bm(0, Mutex::leaf + 1, "CMS_verification_mark_bm_lock"),
 480   _completed_initialization(false),
 481   _collector_policy(cp),
 482   _should_unload_classes(ClassUnloadingWithConcurrentMark),
 483   _concurrent_cycles_since_last_unload(0),
 484   _roots_scanning_options(GenCollectedHeap::SO_None),
 485   _inter_sweep_estimate(CMS_SweepWeight, CMS_SweepPadding),
 486   _intra_sweep_estimate(CMS_SweepWeight, CMS_SweepPadding),
 487   _gc_tracer_cm(new (ResourceObj::C_HEAP, mtGC) CMSTracer()),
 488   _gc_timer_cm(new (ResourceObj::C_HEAP, mtGC) ConcurrentGCTimer()),
 489   _cms_start_registered(false)
 490 {
 491   if (ExplicitGCInvokesConcurrentAndUnloadsClasses) {
 492     ExplicitGCInvokesConcurrent = true;
 493   }
 494   // Now expand the span and allocate the collection support structures
 495   // (MUT, marking bit map etc.) to cover both generations subject to
 496   // collection.
 497 
 498   // For use by dirty card to oop closures.
 499   _cmsGen->cmsSpace()->set_collector(this);
 500 
 501   // Allocate MUT and marking bit map
 502   {


2545 // unload classes if it's the case that:
2546 // (1) an explicit gc request has been made and the flag
2547 //     ExplicitGCInvokesConcurrentAndUnloadsClasses is set, OR
2548 // (2) (a) class unloading is enabled at the command line, and
2549 //     (b) old gen is getting really full
2550 // NOTE: Provided there is no change in the state of the heap between
2551 // calls to this method, it should have idempotent results. Moreover,
2552 // its results should be monotonically increasing (i.e. going from 0 to 1,
2553 // but not 1 to 0) between successive calls between which the heap was
2554 // not collected. For the implementation below, it must thus rely on
2555 // the property that concurrent_cycles_since_last_unload()
2556 // will not decrease unless a collection cycle happened and that
2557 // _cmsGen->is_too_full() are
2558 // themselves also monotonic in that sense. See check_monotonicity()
2559 // below.
2560 void CMSCollector::update_should_unload_classes() {
2561   _should_unload_classes = false;
2562   // Condition 1 above
2563   if (_full_gc_requested && ExplicitGCInvokesConcurrentAndUnloadsClasses) {
2564     _should_unload_classes = true;
2565   } else if (ClassUnloadingWithConcurrentMark) { // Condition 2.a above
2566     // Disjuncts 2.b.(i,ii,iii) above
2567     _should_unload_classes = (concurrent_cycles_since_last_unload() >=
2568                               CMSClassUnloadingMaxInterval)
2569                            || _cmsGen->is_too_full();
2570   }
2571 }
2572 
2573 bool ConcurrentMarkSweepGeneration::is_too_full() const {
2574   bool res = should_concurrent_collect();
2575   res = res && (occupancy() > (double)CMSIsTooFullPercentage/100.0);
2576   return res;
2577 }
2578 
2579 void CMSCollector::setup_cms_unloading_and_verification_state() {
2580   const  bool should_verify =   VerifyBeforeGC || VerifyAfterGC || VerifyDuringGC
2581                              || VerifyBeforeExit;
2582   const  int  rso           =   GenCollectedHeap::SO_AllCodeCache;
2583 
2584   // We set the proper root for this CMS cycle here.
2585   if (should_unload_classes()) {   // Should unload classes this cycle


< prev index next >