< prev index next >

src/share/vm/gc/cms/concurrentMarkSweepGeneration.cpp

Print this page




 462   _eden_chunk_capacity(0),     // -- ditto --
 463   _eden_chunk_index(0),        // -- ditto --
 464   _survivor_plab_array(NULL),  // -- ditto --
 465   _survivor_chunk_array(NULL), // -- ditto --
 466   _survivor_chunk_capacity(0), // -- ditto --
 467   _survivor_chunk_index(0),    // -- ditto --
 468   _ser_pmc_preclean_ovflw(0),
 469   _ser_kac_preclean_ovflw(0),
 470   _ser_pmc_remark_ovflw(0),
 471   _par_pmc_remark_ovflw(0),
 472   _ser_kac_ovflw(0),
 473   _par_kac_ovflw(0),
 474 #ifndef PRODUCT
 475   _num_par_pushes(0),
 476 #endif
 477   _collection_count_start(0),
 478   _verifying(false),
 479   _verification_mark_bm(0, Mutex::leaf + 1, "CMS_verification_mark_bm_lock"),
 480   _completed_initialization(false),
 481   _collector_policy(cp),
 482   _should_unload_classes(CMSClassUnloadingEnabled),
 483   _concurrent_cycles_since_last_unload(0),
 484   _roots_scanning_options(GenCollectedHeap::SO_None),
 485   _inter_sweep_estimate(CMS_SweepWeight, CMS_SweepPadding),
 486   _intra_sweep_estimate(CMS_SweepWeight, CMS_SweepPadding),
 487   _gc_tracer_cm(new (ResourceObj::C_HEAP, mtGC) CMSTracer()),
 488   _gc_timer_cm(new (ResourceObj::C_HEAP, mtGC) ConcurrentGCTimer()),
 489   _cms_start_registered(false)
 490 {
 491   if (ExplicitGCInvokesConcurrentAndUnloadsClasses) {
 492     ExplicitGCInvokesConcurrent = true;
 493   }
 494   // Now expand the span and allocate the collection support structures
 495   // (MUT, marking bit map etc.) to cover both generations subject to
 496   // collection.
 497 
 498   // For use by dirty card to oop closures.
 499   _cmsGen->cmsSpace()->set_collector(this);
 500 
 501   // Allocate MUT and marking bit map
 502   {


2562 // unload classes if it's the case that:
2563 // (1) an explicit gc request has been made and the flag
2564 //     ExplicitGCInvokesConcurrentAndUnloadsClasses is set, OR
2565 // (2) (a) class unloading is enabled at the command line, and
2566 //     (b) old gen is getting really full
2567 // NOTE: Provided there is no change in the state of the heap between
2568 // calls to this method, it should have idempotent results. Moreover,
2569 // its results should be monotonically increasing (i.e. going from 0 to 1,
2570 // but not 1 to 0) between successive calls between which the heap was
2571 // not collected. For the implementation below, it must thus rely on
2572 // the property that concurrent_cycles_since_last_unload()
2573 // will not decrease unless a collection cycle happened and that
2574 // _cmsGen->is_too_full() are
2575 // themselves also monotonic in that sense. See check_monotonicity()
2576 // below.
2577 void CMSCollector::update_should_unload_classes() {
2578   _should_unload_classes = false;
2579   // Condition 1 above
2580   if (_full_gc_requested && ExplicitGCInvokesConcurrentAndUnloadsClasses) {
2581     _should_unload_classes = true;
2582   } else if (CMSClassUnloadingEnabled) { // Condition 2.a above
2583     // Disjuncts 2.b.(i,ii,iii) above
2584     _should_unload_classes = (concurrent_cycles_since_last_unload() >=
2585                               CMSClassUnloadingMaxInterval)
2586                            || _cmsGen->is_too_full();
2587   }
2588 }
2589 
2590 bool ConcurrentMarkSweepGeneration::is_too_full() const {
2591   bool res = should_concurrent_collect();
2592   res = res && (occupancy() > (double)CMSIsTooFullPercentage/100.0);
2593   return res;
2594 }
2595 
2596 void CMSCollector::setup_cms_unloading_and_verification_state() {
2597   const  bool should_verify =   VerifyBeforeGC || VerifyAfterGC || VerifyDuringGC
2598                              || VerifyBeforeExit;
2599   const  int  rso           =   GenCollectedHeap::SO_AllCodeCache;
2600 
2601   // We set the proper root for this CMS cycle here.
2602   if (should_unload_classes()) {   // Should unload classes this cycle




 462   _eden_chunk_capacity(0),     // -- ditto --
 463   _eden_chunk_index(0),        // -- ditto --
 464   _survivor_plab_array(NULL),  // -- ditto --
 465   _survivor_chunk_array(NULL), // -- ditto --
 466   _survivor_chunk_capacity(0), // -- ditto --
 467   _survivor_chunk_index(0),    // -- ditto --
 468   _ser_pmc_preclean_ovflw(0),
 469   _ser_kac_preclean_ovflw(0),
 470   _ser_pmc_remark_ovflw(0),
 471   _par_pmc_remark_ovflw(0),
 472   _ser_kac_ovflw(0),
 473   _par_kac_ovflw(0),
 474 #ifndef PRODUCT
 475   _num_par_pushes(0),
 476 #endif
 477   _collection_count_start(0),
 478   _verifying(false),
 479   _verification_mark_bm(0, Mutex::leaf + 1, "CMS_verification_mark_bm_lock"),
 480   _completed_initialization(false),
 481   _collector_policy(cp),
 482   _should_unload_classes(ClassUnloadingWithConcurrentMark),
 483   _concurrent_cycles_since_last_unload(0),
 484   _roots_scanning_options(GenCollectedHeap::SO_None),
 485   _inter_sweep_estimate(CMS_SweepWeight, CMS_SweepPadding),
 486   _intra_sweep_estimate(CMS_SweepWeight, CMS_SweepPadding),
 487   _gc_tracer_cm(new (ResourceObj::C_HEAP, mtGC) CMSTracer()),
 488   _gc_timer_cm(new (ResourceObj::C_HEAP, mtGC) ConcurrentGCTimer()),
 489   _cms_start_registered(false)
 490 {
 491   if (ExplicitGCInvokesConcurrentAndUnloadsClasses) {
 492     ExplicitGCInvokesConcurrent = true;
 493   }
 494   // Now expand the span and allocate the collection support structures
 495   // (MUT, marking bit map etc.) to cover both generations subject to
 496   // collection.
 497 
 498   // For use by dirty card to oop closures.
 499   _cmsGen->cmsSpace()->set_collector(this);
 500 
 501   // Allocate MUT and marking bit map
 502   {


2562 // unload classes if it's the case that:
2563 // (1) an explicit gc request has been made and the flag
2564 //     ExplicitGCInvokesConcurrentAndUnloadsClasses is set, OR
2565 // (2) (a) class unloading is enabled at the command line, and
2566 //     (b) old gen is getting really full
2567 // NOTE: Provided there is no change in the state of the heap between
2568 // calls to this method, it should have idempotent results. Moreover,
2569 // its results should be monotonically increasing (i.e. going from 0 to 1,
2570 // but not 1 to 0) between successive calls between which the heap was
2571 // not collected. For the implementation below, it must thus rely on
2572 // the property that concurrent_cycles_since_last_unload()
2573 // will not decrease unless a collection cycle happened and that
2574 // _cmsGen->is_too_full() are
2575 // themselves also monotonic in that sense. See check_monotonicity()
2576 // below.
2577 void CMSCollector::update_should_unload_classes() {
2578   _should_unload_classes = false;
2579   // Condition 1 above
2580   if (_full_gc_requested && ExplicitGCInvokesConcurrentAndUnloadsClasses) {
2581     _should_unload_classes = true;
2582   } else if (ClassUnloadingWithConcurrentMark) { // Condition 2.a above
2583     // Disjuncts 2.b.(i,ii,iii) above
2584     _should_unload_classes = (concurrent_cycles_since_last_unload() >=
2585                               CMSClassUnloadingMaxInterval)
2586                            || _cmsGen->is_too_full();
2587   }
2588 }
2589 
2590 bool ConcurrentMarkSweepGeneration::is_too_full() const {
2591   bool res = should_concurrent_collect();
2592   res = res && (occupancy() > (double)CMSIsTooFullPercentage/100.0);
2593   return res;
2594 }
2595 
2596 void CMSCollector::setup_cms_unloading_and_verification_state() {
2597   const  bool should_verify =   VerifyBeforeGC || VerifyAfterGC || VerifyDuringGC
2598                              || VerifyBeforeExit;
2599   const  int  rso           =   GenCollectedHeap::SO_AllCodeCache;
2600 
2601   // We set the proper root for this CMS cycle here.
2602   if (should_unload_classes()) {   // Should unload classes this cycle


< prev index next >