src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.cpp

Print this page




1953   ref_processor()->setup_policy(clear_all_soft_refs);
1954   // If an asynchronous collection finishes, the _modUnionTable is
1955   // all clear.  If we are assuming the collection from an asynchronous
1956   // collection, clear the _modUnionTable.
1957   assert(_collectorState != Idling || _modUnionTable.isAllClear(),
1958     "_modUnionTable should be clear if the baton was not passed");
1959   _modUnionTable.clear_all();
1960 
1961   // We must adjust the allocation statistics being maintained
1962   // in the free list space. We do so by reading and clearing
1963   // the sweep timer and updating the block flux rate estimates below.
1964   assert(!_intra_sweep_timer.is_active(), "_intra_sweep_timer should be inactive");
1965   if (_inter_sweep_timer.is_active()) {
1966     _inter_sweep_timer.stop();
1967     // Note that we do not use this sample to update the _inter_sweep_estimate.
1968     _cmsGen->cmsSpace()->beginSweepFLCensus((float)(_inter_sweep_timer.seconds()),
1969                                             _inter_sweep_estimate.padded_average(),
1970                                             _intra_sweep_estimate.padded_average());
1971   }
1972 



1973   GenMarkSweep::invoke_at_safepoint(_cmsGen->level(),
1974     ref_processor(), clear_all_soft_refs);
1975   #ifdef ASSERT
1976     CompactibleFreeListSpace* cms_space = _cmsGen->cmsSpace();
1977     size_t free_size = cms_space->free();
1978     assert(free_size ==
1979            pointer_delta(cms_space->end(), cms_space->compaction_top())
1980            * HeapWordSize,
1981       "All the free space should be compacted into one chunk at top");
1982     assert(cms_space->dictionary()->totalChunkSize(
1983                                       debug_only(cms_space->freelistLock())) == 0 ||
1984            cms_space->totalSizeInIndexedFreeLists() == 0,
1985       "All the free space should be in a single chunk");
1986     size_t num = cms_space->totalCount();
1987     assert((free_size == 0 && num == 0) ||
1988            (free_size > 0  && (num == 1 || num == 2)),
1989          "There should be at most 2 free chunks after compaction");
1990   #endif // ASSERT
1991   _collectorState = Resetting;
1992   assert(_restart_addr == NULL,


3403                  _collector->cmsGen()->short_name(),
3404                  _phase, _collector->timerValue(), _wallclock.seconds());
3405     if (_print_cr) {
3406       gclog_or_tty->print_cr("");
3407     }
3408     if (PrintCMSStatistics != 0) {
3409       gclog_or_tty->print_cr(" (CMS-concurrent-%s yielded %d times)", _phase,
3410                     _collector->yields());
3411     }
3412   }
3413 }
3414 
3415 // CMS work
3416 
3417 // Checkpoint the roots into this generation from outside
3418 // this generation. [Note this initial checkpoint need only
3419 // be approximate -- we'll do a catch up phase subsequently.]
3420 void CMSCollector::checkpointRootsInitial(bool asynch) {
3421   assert(_collectorState == InitialMarking, "Wrong collector state");
3422   check_correct_thread_executing();

3423   ReferenceProcessor* rp = ref_processor();
3424   SpecializationStats::clear();
3425   assert(_restart_addr == NULL, "Control point invariant");
3426   if (asynch) {
3427     // acquire locks for subsequent manipulations
3428     MutexLockerEx x(bitMapLock(),
3429                     Mutex::_no_safepoint_check_flag);
3430     checkpointRootsInitialWork(asynch);
3431     rp->verify_no_references_recorded();
3432     rp->enable_discovery(); // enable ("weak") refs discovery
3433     _collectorState = Marking;
3434   } else {
3435     // (Weak) Refs discovery: this is controlled from genCollectedHeap::do_collection
3436     // which recognizes if we are a CMS generation, and doesn't try to turn on
3437     // discovery; verify that they aren't meddling.
3438     assert(!rp->discovery_is_atomic(),
3439            "incorrect setting of discovery predicate");
3440     assert(!rp->discovery_enabled(), "genCollectedHeap shouldn't control "
3441            "ref discovery for this generation kind");
3442     // already have locks


4736           // Compute the next address at which preclean should pick up.
4737           lastAddr = next_card_start_after_block(stop_point);
4738         }
4739       }
4740     } else {
4741       break;
4742     }
4743   }
4744   verify_work_stacks_empty();
4745   verify_overflow_empty();
4746   return cumNumDirtyCards;
4747 }
4748 
4749 void CMSCollector::checkpointRootsFinal(bool asynch,
4750   bool clear_all_soft_refs, bool init_mark_was_synchronous) {
4751   assert(_collectorState == FinalMarking, "incorrect state transition?");
4752   check_correct_thread_executing();
4753   // world is stopped at this checkpoint
4754   assert(SafepointSynchronize::is_at_safepoint(),
4755          "world should be stopped");

4756   verify_work_stacks_empty();
4757   verify_overflow_empty();
4758 
4759   SpecializationStats::clear();
4760   if (PrintGCDetails) {
4761     gclog_or_tty->print("[YG occupancy: "SIZE_FORMAT" K ("SIZE_FORMAT" K)]",
4762                         _young_gen->used() / K,
4763                         _young_gen->capacity() / K);
4764   }
4765   if (asynch) {
4766     if (CMSScavengeBeforeRemark) {
4767       GenCollectedHeap* gch = GenCollectedHeap::heap();
4768       // Temporarily set flag to false, GCH->do_collection will
4769       // expect it to be false and set to true
4770       FlagSetting fl(gch->_is_gc_active, false);
4771       NOT_PRODUCT(TraceTime t("Scavenge-Before-Remark",
4772         PrintGCDetails && Verbose, true, gclog_or_tty);)
4773       int level = _cmsGen->level() - 1;
4774       if (level >= 0) {
4775         gch->do_collection(true,        // full (i.e. force, see below)


5837     // phase of CMS collection.
5838     if (t->is_ConcurrentGC_thread()) {
5839       assert(_collectorState == InitialMarking ||
5840              _collectorState == FinalMarking,
5841              "Should be a stop-world phase");
5842       // The CMS thread should be holding the CMS_token.
5843       assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
5844              "Potential interference with concurrently "
5845              "executing VM thread");
5846     }
5847   }
5848 }
5849 #endif
5850 
5851 void CMSCollector::sweep(bool asynch) {
5852   assert(_collectorState == Sweeping, "just checking");
5853   check_correct_thread_executing();
5854   verify_work_stacks_empty();
5855   verify_overflow_empty();
5856   increment_sweep_count();


5857   _inter_sweep_timer.stop();
5858   _inter_sweep_estimate.sample(_inter_sweep_timer.seconds());
5859   size_policy()->avg_cms_free_at_sweep()->sample(_cmsGen->free());
5860 
5861   // PermGen verification support: If perm gen sweeping is disabled in
5862   // this cycle, we preserve the perm gen object "deadness" information
5863   // in the perm_gen_verify_bit_map. In order to do that we traverse
5864   // all blocks in perm gen and mark all dead objects.
5865   if (verifying() && !should_unload_classes()) {
5866     assert(perm_gen_verify_bit_map()->sizeInBits() != 0,
5867            "Should have already been allocated");
5868     MarkDeadObjectsClosure mdo(this, _permGen->cmsSpace(),
5869                                markBitMap(), perm_gen_verify_bit_map());
5870     if (asynch) {
5871       CMSTokenSyncWithLocks ts(true, _permGen->freelistLock(),
5872                                bitMapLock());
5873       _permGen->cmsSpace()->blk_iterate(&mdo);
5874     } else {
5875       // In the case of synchronous sweep, we already have
5876       // the requisite locks/tokens.


9109   assert(_collector->overflow_list_is_empty() || res,
9110          "If list is not empty, we should have taken something");
9111   assert(!res || !_mark_stack->isEmpty(),
9112          "If we took something, it should now be on our stack");
9113   return res;
9114 }
9115 
9116 size_t MarkDeadObjectsClosure::do_blk(HeapWord* addr) {
9117   size_t res = _sp->block_size_no_stall(addr, _collector);
9118   assert(res != 0, "Should always be able to compute a size");
9119   if (_sp->block_is_obj(addr)) {
9120     if (_live_bit_map->isMarked(addr)) {
9121       // It can't have been dead in a previous cycle
9122       guarantee(!_dead_bit_map->isMarked(addr), "No resurrection!");
9123     } else {
9124       _dead_bit_map->mark(addr);      // mark the dead object
9125     }
9126   }
9127   return res;
9128 }
























































1953   ref_processor()->setup_policy(clear_all_soft_refs);
1954   // If an asynchronous collection finishes, the _modUnionTable is
1955   // all clear.  If we are assuming the collection from an asynchronous
1956   // collection, clear the _modUnionTable.
1957   assert(_collectorState != Idling || _modUnionTable.isAllClear(),
1958     "_modUnionTable should be clear if the baton was not passed");
1959   _modUnionTable.clear_all();
1960 
1961   // We must adjust the allocation statistics being maintained
1962   // in the free list space. We do so by reading and clearing
1963   // the sweep timer and updating the block flux rate estimates below.
1964   assert(!_intra_sweep_timer.is_active(), "_intra_sweep_timer should be inactive");
1965   if (_inter_sweep_timer.is_active()) {
1966     _inter_sweep_timer.stop();
1967     // Note that we do not use this sample to update the _inter_sweep_estimate.
1968     _cmsGen->cmsSpace()->beginSweepFLCensus((float)(_inter_sweep_timer.seconds()),
1969                                             _inter_sweep_estimate.padded_average(),
1970                                             _intra_sweep_estimate.padded_average());
1971   }
1972 
1973   {
1974     TraceCMSMemoryManagerStats();
1975   }
1976   GenMarkSweep::invoke_at_safepoint(_cmsGen->level(),
1977     ref_processor(), clear_all_soft_refs);
1978   #ifdef ASSERT
1979     CompactibleFreeListSpace* cms_space = _cmsGen->cmsSpace();
1980     size_t free_size = cms_space->free();
1981     assert(free_size ==
1982            pointer_delta(cms_space->end(), cms_space->compaction_top())
1983            * HeapWordSize,
1984       "All the free space should be compacted into one chunk at top");
1985     assert(cms_space->dictionary()->totalChunkSize(
1986                                       debug_only(cms_space->freelistLock())) == 0 ||
1987            cms_space->totalSizeInIndexedFreeLists() == 0,
1988       "All the free space should be in a single chunk");
1989     size_t num = cms_space->totalCount();
1990     assert((free_size == 0 && num == 0) ||
1991            (free_size > 0  && (num == 1 || num == 2)),
1992          "There should be at most 2 free chunks after compaction");
1993   #endif // ASSERT
1994   _collectorState = Resetting;
1995   assert(_restart_addr == NULL,


3406                  _collector->cmsGen()->short_name(),
3407                  _phase, _collector->timerValue(), _wallclock.seconds());
3408     if (_print_cr) {
3409       gclog_or_tty->print_cr("");
3410     }
3411     if (PrintCMSStatistics != 0) {
3412       gclog_or_tty->print_cr(" (CMS-concurrent-%s yielded %d times)", _phase,
3413                     _collector->yields());
3414     }
3415   }
3416 }
3417 
3418 // CMS work
3419 
3420 // Checkpoint the roots into this generation from outside
3421 // this generation. [Note this initial checkpoint need only
3422 // be approximate -- we'll do a catch up phase subsequently.]
3423 void CMSCollector::checkpointRootsInitial(bool asynch) {
3424   assert(_collectorState == InitialMarking, "Wrong collector state");
3425   check_correct_thread_executing();
3426   TraceCMSMemoryManagerStats tms(_collectorState);
3427   ReferenceProcessor* rp = ref_processor();
3428   SpecializationStats::clear();
3429   assert(_restart_addr == NULL, "Control point invariant");
3430   if (asynch) {
3431     // acquire locks for subsequent manipulations
3432     MutexLockerEx x(bitMapLock(),
3433                     Mutex::_no_safepoint_check_flag);
3434     checkpointRootsInitialWork(asynch);
3435     rp->verify_no_references_recorded();
3436     rp->enable_discovery(); // enable ("weak") refs discovery
3437     _collectorState = Marking;
3438   } else {
3439     // (Weak) Refs discovery: this is controlled from genCollectedHeap::do_collection
3440     // which recognizes if we are a CMS generation, and doesn't try to turn on
3441     // discovery; verify that they aren't meddling.
3442     assert(!rp->discovery_is_atomic(),
3443            "incorrect setting of discovery predicate");
3444     assert(!rp->discovery_enabled(), "genCollectedHeap shouldn't control "
3445            "ref discovery for this generation kind");
3446     // already have locks


4740           // Compute the next address at which preclean should pick up.
4741           lastAddr = next_card_start_after_block(stop_point);
4742         }
4743       }
4744     } else {
4745       break;
4746     }
4747   }
4748   verify_work_stacks_empty();
4749   verify_overflow_empty();
4750   return cumNumDirtyCards;
4751 }
4752 
4753 void CMSCollector::checkpointRootsFinal(bool asynch,
4754   bool clear_all_soft_refs, bool init_mark_was_synchronous) {
4755   assert(_collectorState == FinalMarking, "incorrect state transition?");
4756   check_correct_thread_executing();
4757   // world is stopped at this checkpoint
4758   assert(SafepointSynchronize::is_at_safepoint(),
4759          "world should be stopped");
4760   TraceCMSMemoryManagerStats tms(_collectorState);
4761   verify_work_stacks_empty();
4762   verify_overflow_empty();
4763 
4764   SpecializationStats::clear();
4765   if (PrintGCDetails) {
4766     gclog_or_tty->print("[YG occupancy: "SIZE_FORMAT" K ("SIZE_FORMAT" K)]",
4767                         _young_gen->used() / K,
4768                         _young_gen->capacity() / K);
4769   }
4770   if (asynch) {
4771     if (CMSScavengeBeforeRemark) {
4772       GenCollectedHeap* gch = GenCollectedHeap::heap();
4773       // Temporarily set flag to false, GCH->do_collection will
4774       // expect it to be false and set to true
4775       FlagSetting fl(gch->_is_gc_active, false);
4776       NOT_PRODUCT(TraceTime t("Scavenge-Before-Remark",
4777         PrintGCDetails && Verbose, true, gclog_or_tty);)
4778       int level = _cmsGen->level() - 1;
4779       if (level >= 0) {
4780         gch->do_collection(true,        // full (i.e. force, see below)


5842     // phase of CMS collection.
5843     if (t->is_ConcurrentGC_thread()) {
5844       assert(_collectorState == InitialMarking ||
5845              _collectorState == FinalMarking,
5846              "Should be a stop-world phase");
5847       // The CMS thread should be holding the CMS_token.
5848       assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
5849              "Potential interference with concurrently "
5850              "executing VM thread");
5851     }
5852   }
5853 }
5854 #endif
5855 
5856 void CMSCollector::sweep(bool asynch) {
5857   assert(_collectorState == Sweeping, "just checking");
5858   check_correct_thread_executing();
5859   verify_work_stacks_empty();
5860   verify_overflow_empty();
5861   increment_sweep_count();
5862   TraceCMSMemoryManagerStats tms(_collectorState);
5863 
5864   _inter_sweep_timer.stop();
5865   _inter_sweep_estimate.sample(_inter_sweep_timer.seconds());
5866   size_policy()->avg_cms_free_at_sweep()->sample(_cmsGen->free());
5867 
5868   // PermGen verification support: If perm gen sweeping is disabled in
5869   // this cycle, we preserve the perm gen object "deadness" information
5870   // in the perm_gen_verify_bit_map. In order to do that we traverse
5871   // all blocks in perm gen and mark all dead objects.
5872   if (verifying() && !should_unload_classes()) {
5873     assert(perm_gen_verify_bit_map()->sizeInBits() != 0,
5874            "Should have already been allocated");
5875     MarkDeadObjectsClosure mdo(this, _permGen->cmsSpace(),
5876                                markBitMap(), perm_gen_verify_bit_map());
5877     if (asynch) {
5878       CMSTokenSyncWithLocks ts(true, _permGen->freelistLock(),
5879                                bitMapLock());
5880       _permGen->cmsSpace()->blk_iterate(&mdo);
5881     } else {
5882       // In the case of synchronous sweep, we already have
5883       // the requisite locks/tokens.


9116   assert(_collector->overflow_list_is_empty() || res,
9117          "If list is not empty, we should have taken something");
9118   assert(!res || !_mark_stack->isEmpty(),
9119          "If we took something, it should now be on our stack");
9120   return res;
9121 }
9122 
9123 size_t MarkDeadObjectsClosure::do_blk(HeapWord* addr) {
9124   size_t res = _sp->block_size_no_stall(addr, _collector);
9125   assert(res != 0, "Should always be able to compute a size");
9126   if (_sp->block_is_obj(addr)) {
9127     if (_live_bit_map->isMarked(addr)) {
9128       // It can't have been dead in a previous cycle
9129       guarantee(!_dead_bit_map->isMarked(addr), "No resurrection!");
9130     } else {
9131       _dead_bit_map->mark(addr);      // mark the dead object
9132     }
9133   }
9134   return res;
9135 }
9136 
9137 TraceCMSMemoryManagerStats::TraceCMSMemoryManagerStats(CMSCollector::CollectorState phase): TraceMemoryManagerStats() {
9138 
9139   switch (phase) {
9140     case CMSCollector::InitialMarking:
9141       initialize(true  /* fullGC */ ,
9142                  true  /* recordGCBeginTime */,
9143                  true  /* recordPreGCUsage */,
9144                  false /* recordPeakUsage */,
9145                  false /* recordPostGCusage */,
9146                  true  /* recordAccumulatedGCTime */,
9147                  false /* recordGCEndTime */,
9148                  false /* countCollection */  );
9149       break;
9150 
9151     case CMSCollector::FinalMarking:
9152       initialize(true  /* fullGC */ ,
9153                  false /* recordGCBeginTime */,
9154                  false /* recordPreGCUsage */,
9155                  false /* recordPeakUsage */,
9156                  false /* recordPostGCusage */,
9157                  true  /* recordAccumulatedGCTime */,
9158                  false /* recordGCEndTime */,
9159                  false /* countCollection */  );
9160       break;
9161 
9162     case CMSCollector::Sweeping:
9163       initialize(true  /* fullGC */ ,
9164                  false /* recordGCBeginTime */,
9165                  false /* recordPreGCUsage */,
9166                  true  /* recordPeakUsage */,
9167                  true  /* recordPostGCusage */,
9168                  false /* recordAccumulatedGCTime */,
9169                  true  /* recordGCEndTime */,
9170                  true  /* countCollection */  );
9171       break;
9172 
9173     default:
9174       ShouldNotReachHere();
9175   }
9176 }
9177 
9178 // when bailing out of cms in concurrent mode failure
9179 TraceCMSMemoryManagerStats::TraceCMSMemoryManagerStats(): TraceMemoryManagerStats() {
9180   initialize(true /* fullGC */ ,
9181              true /* recordGCBeginTime */,
9182              true /* recordPreGCUsage */,
9183              true /* recordPeakUsage */,
9184              true /* recordPostGCusage */,
9185              true /* recordAccumulatedGCTime */,
9186              true /* recordGCEndTime */,
9187              true /* countCollection */ );
9188 }
9189