< prev index next >

src/share/vm/gc/cms/concurrentMarkSweepGeneration.cpp

Print this page




  36 #include "gc/cms/vmCMSOperations.hpp"
  37 #include "gc/serial/genMarkSweep.hpp"
  38 #include "gc/serial/tenuredGeneration.hpp"
  39 #include "gc/shared/adaptiveSizePolicy.hpp"
  40 #include "gc/shared/cardGeneration.inline.hpp"
  41 #include "gc/shared/cardTableRS.hpp"
  42 #include "gc/shared/collectedHeap.inline.hpp"
  43 #include "gc/shared/collectorCounters.hpp"
  44 #include "gc/shared/collectorPolicy.hpp"
  45 #include "gc/shared/gcLocker.inline.hpp"
  46 #include "gc/shared/gcPolicyCounters.hpp"
  47 #include "gc/shared/gcTimer.hpp"
  48 #include "gc/shared/gcTrace.hpp"
  49 #include "gc/shared/gcTraceTime.hpp"
  50 #include "gc/shared/genCollectedHeap.hpp"
  51 #include "gc/shared/genOopClosures.inline.hpp"
  52 #include "gc/shared/isGCActiveMark.hpp"
  53 #include "gc/shared/referencePolicy.hpp"
  54 #include "gc/shared/strongRootsScope.hpp"
  55 #include "gc/shared/taskqueue.inline.hpp"

  56 #include "memory/allocation.hpp"
  57 #include "memory/iterator.inline.hpp"
  58 #include "memory/padded.hpp"
  59 #include "memory/resourceArea.hpp"
  60 #include "oops/oop.inline.hpp"
  61 #include "prims/jvmtiExport.hpp"
  62 #include "runtime/atomic.inline.hpp"
  63 #include "runtime/globals_extension.hpp"
  64 #include "runtime/handles.inline.hpp"
  65 #include "runtime/java.hpp"
  66 #include "runtime/orderAccess.inline.hpp"
  67 #include "runtime/vmThread.hpp"
  68 #include "services/memoryService.hpp"
  69 #include "services/runtimeService.hpp"
  70 #include "utilities/stack.inline.hpp"
  71 
  72 // statics
  73 CMSCollector* ConcurrentMarkSweepGeneration::_collector = NULL;
  74 bool CMSCollector::_full_gc_requested = false;
  75 GCCause::Cause CMSCollector::_full_gc_cause = GCCause::_no_gc;


1563 // after obtaining the free list locks for the
1564 // two generations.
1565 void CMSCollector::compute_new_size() {
1566   assert_locked_or_safepoint(Heap_lock);
1567   FreelistLocker z(this);
1568   MetaspaceGC::compute_new_size();
1569   _cmsGen->compute_new_size_free_list();
1570 }
1571 
1572 // A work method used by the foreground collector to do
1573 // a mark-sweep-compact.
1574 void CMSCollector::do_compaction_work(bool clear_all_soft_refs) {
1575   GenCollectedHeap* gch = GenCollectedHeap::heap();
1576 
1577   STWGCTimer* gc_timer = GenMarkSweep::gc_timer();
1578   gc_timer->register_gc_start();
1579 
1580   SerialOldTracer* gc_tracer = GenMarkSweep::gc_tracer();
1581   gc_tracer->report_gc_start(gch->gc_cause(), gc_timer->gc_start());
1582 
1583   GCTraceTime t("CMS:MSC ", PrintGCDetails && Verbose, true, NULL);
1584 
1585   // Temporarily widen the span of the weak reference processing to
1586   // the entire heap.
1587   MemRegion new_span(GenCollectedHeap::heap()->reserved_region());
1588   ReferenceProcessorSpanMutator rp_mut_span(ref_processor(), new_span);
1589   // Temporarily, clear the "is_alive_non_header" field of the
1590   // reference processor.
1591   ReferenceProcessorIsAliveMutator rp_mut_closure(ref_processor(), NULL);
1592   // Temporarily make reference _processing_ single threaded (non-MT).
1593   ReferenceProcessorMTProcMutator rp_mut_mt_processing(ref_processor(), false);
1594   // Temporarily make refs discovery atomic
1595   ReferenceProcessorAtomicMutator rp_mut_atomic(ref_processor(), true);
1596   // Temporarily make reference _discovery_ single threaded (non-MT)
1597   ReferenceProcessorMTDiscoveryMutator rp_mut_discovery(ref_processor(), false);
1598 
1599   ref_processor()->set_enqueuing_is_done(false);
1600   ref_processor()->enable_discovery();
1601   ref_processor()->setup_policy(clear_all_soft_refs);
1602   // If an asynchronous collection finishes, the _modUnionTable is
1603   // all clear.  If we are assuming the collection from an asynchronous


1946   // Should this be in gc_epilogue?
1947   collector_policy()->counters()->update_counters();
1948 
1949   {
1950     // Clear _foregroundGCShouldWait and, in the event that the
1951     // foreground collector is waiting, notify it, before
1952     // returning.
1953     MutexLockerEx x(CGC_lock, Mutex::_no_safepoint_check_flag);
1954     _foregroundGCShouldWait = false;
1955     if (_foregroundGCIsActive) {
1956       CGC_lock->notify();
1957     }
1958     assert(!ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
1959            "Possible deadlock");
1960   }
1961   if (TraceCMSState) {
1962     gclog_or_tty->print_cr("CMS Thread " INTPTR_FORMAT
1963       " exiting collection CMS state %d",
1964       p2i(Thread::current()), _collectorState);
1965   }
1966   if (PrintGC && Verbose) {
1967     _cmsGen->print_heap_change(prev_used);
1968   }
1969 }
1970 
1971 void CMSCollector::register_gc_start(GCCause::Cause cause) {
1972   _cms_start_registered = true;
1973   _gc_timer_cm->register_gc_start();
1974   _gc_tracer_cm->report_gc_start(cause, _gc_timer_cm->gc_start());
1975 }
1976 
1977 void CMSCollector::register_gc_end() {
1978   if (_cms_start_registered) {
1979     report_heap_summary(GCWhen::AfterGC);
1980 
1981     _gc_timer_cm->register_gc_end();
1982     _gc_tracer_cm->report_gc_end(_gc_timer_cm->gc_end(), _gc_timer_cm->time_partitions());
1983     _cms_start_registered = false;
1984   }
1985 }
1986 
1987 void CMSCollector::save_heap_summary() {
1988   GenCollectedHeap* gch = GenCollectedHeap::heap();


2302 class VerifyMarkedClosure: public BitMapClosure {
2303   CMSBitMap* _marks;
2304   bool       _failed;
2305 
2306  public:
2307   VerifyMarkedClosure(CMSBitMap* bm): _marks(bm), _failed(false) {}
2308 
2309   bool do_bit(size_t offset) {
2310     HeapWord* addr = _marks->offsetToHeapWord(offset);
2311     if (!_marks->isMarked(addr)) {
2312       oop(addr)->print_on(gclog_or_tty);
2313       gclog_or_tty->print_cr(" (" INTPTR_FORMAT " should have been marked)", p2i(addr));
2314       _failed = true;
2315     }
2316     return true;
2317   }
2318 
2319   bool failed() { return _failed; }
2320 };
2321 
2322 bool CMSCollector::verify_after_remark(bool silent) {
2323   if (!silent) gclog_or_tty->print(" [Verifying CMS Marking... ");
2324   MutexLockerEx ml(verification_mark_bm()->lock(), Mutex::_no_safepoint_check_flag);
2325   static bool init = false;
2326 
2327   assert(SafepointSynchronize::is_at_safepoint(),
2328          "Else mutations in object graph will make answer suspect");
2329   assert(have_cms_token(),
2330          "Else there may be mutual interference in use of "
2331          " verification data structures");
2332   assert(_collectorState > Marking && _collectorState <= Sweeping,
2333          "Else marking info checked here may be obsolete");
2334   assert(haveFreelistLocks(), "must hold free list locks");
2335   assert_lock_strong(bitMapLock());
2336 
2337 
2338   // Allocate marking bit map if not already allocated
2339   if (!init) { // first time
2340     if (!verification_mark_bm()->allocate(_span)) {
2341       return false;
2342     }
2343     init = true;


2366   gch->ensure_parsability(false);  // fill TLABs, but no need to retire them
2367   // Update the saved marks which may affect the root scans.
2368   gch->save_marks();
2369 
2370   if (CMSRemarkVerifyVariant == 1) {
2371     // In this first variant of verification, we complete
2372     // all marking, then check if the new marks-vector is
2373     // a subset of the CMS marks-vector.
2374     verify_after_remark_work_1();
2375   } else if (CMSRemarkVerifyVariant == 2) {
2376     // In this second variant of verification, we flag an error
2377     // (i.e. an object reachable in the new marks-vector not reachable
2378     // in the CMS marks-vector) immediately, also indicating the
2379     // identify of an object (A) that references the unmarked object (B) --
2380     // presumably, a mutation to A failed to be picked up by preclean/remark?
2381     verify_after_remark_work_2();
2382   } else {
2383     warning("Unrecognized value " UINTX_FORMAT " for CMSRemarkVerifyVariant",
2384             CMSRemarkVerifyVariant);
2385   }
2386   if (!silent) gclog_or_tty->print(" done] ");
2387   return true;
2388 }
2389 
2390 void CMSCollector::verify_after_remark_work_1() {
2391   ResourceMark rm;
2392   HandleMark  hm;
2393   GenCollectedHeap* gch = GenCollectedHeap::heap();
2394 
2395   // Get a clear set of claim bits for the roots processing to work with.
2396   ClassLoaderDataGraph::clear_claimed_marks();
2397 
2398   // Mark from roots one level into CMS
2399   MarkRefsIntoClosure notOlder(_span, verification_mark_bm());
2400   gch->rem_set()->prepare_for_younger_refs_iterate(false); // Not parallel.
2401 
2402   {
2403     StrongRootsScope srs(1);
2404 
2405     gch->gen_process_roots(&srs,
2406                            GenCollectedHeap::OldGen,


2825  public:
2826   // Not MT-safe; so do not pass around these StackObj's
2827   // where they may be accessed by other threads.
2828   jlong wallclock_millis() {
2829     assert(_wallclock.is_active(), "Wall clock should not stop");
2830     _wallclock.stop();  // to record time
2831     jlong ret = _wallclock.milliseconds();
2832     _wallclock.start(); // restart
2833     return ret;
2834   }
2835 };
2836 
2837 CMSPhaseAccounting::CMSPhaseAccounting(CMSCollector *collector,
2838                                        const char *phase,
2839                                        bool print_cr) :
2840   _collector(collector), _phase(phase), _print_cr(print_cr) {
2841 
2842   if (PrintCMSStatistics != 0) {
2843     _collector->resetYields();
2844   }
2845   if (PrintGCDetails) {
2846     gclog_or_tty->gclog_stamp();
2847     gclog_or_tty->print_cr("[%s-concurrent-%s-start]",
2848       _collector->cmsGen()->short_name(), _phase);
2849   }
2850   _collector->resetTimer();
2851   _wallclock.start();
2852   _collector->startTimer();
2853 }
2854 
2855 CMSPhaseAccounting::~CMSPhaseAccounting() {
2856   assert(_wallclock.is_active(), "Wall clock should not have stopped");
2857   _collector->stopTimer();
2858   _wallclock.stop();
2859   if (PrintGCDetails) {
2860     gclog_or_tty->gclog_stamp();
2861     gclog_or_tty->print("[%s-concurrent-%s: %3.3f/%3.3f secs]",
2862                  _collector->cmsGen()->short_name(),
2863                  _phase, _collector->timerValue(), _wallclock.seconds());
2864     if (_print_cr) {
2865       gclog_or_tty->cr();
2866     }
2867     if (PrintCMSStatistics != 0) {
2868       gclog_or_tty->print_cr(" (CMS-concurrent-%s yielded %d times)", _phase,
2869                     _collector->yields());
2870     }
2871   }
2872 }
2873 
2874 // CMS work
2875 
2876 // The common parts of CMSParInitialMarkTask and CMSParRemarkTask.
2877 class CMSParMarkTask : public AbstractGangTask {
2878  protected:
2879   CMSCollector*     _collector;
2880   uint              _n_workers;
2881   CMSParMarkTask(const char* name, CMSCollector* collector, uint n_workers) :
2882       AbstractGangTask(name),
2883       _collector(collector),
2884       _n_workers(n_workers) {}
2885   // Work method in support of parallel rescan ... of young gen spaces
2886   void do_young_space_rescan(uint worker_id, OopsInGenClosure* cl,
2887                              ContiguousSpace* space,
2888                              HeapWord** chunk_array, size_t chunk_top);
2889   void work_on_young_gen_roots(uint worker_id, OopsInGenClosure* cl);
2890 };
2891 


2918                     Mutex::_no_safepoint_check_flag);
2919     checkpointRootsInitialWork();
2920     // enable ("weak") refs discovery
2921     rp->enable_discovery();
2922     _collectorState = Marking;
2923   }
2924 }
2925 
2926 void CMSCollector::checkpointRootsInitialWork() {
2927   assert(SafepointSynchronize::is_at_safepoint(), "world should be stopped");
2928   assert(_collectorState == InitialMarking, "just checking");
2929 
2930   // Already have locks.
2931   assert_lock_strong(bitMapLock());
2932   assert(_markBitMap.isAllClear(), "was reset at end of previous cycle");
2933 
2934   // Setup the verification and class unloading state for this
2935   // CMS collection cycle.
2936   setup_cms_unloading_and_verification_state();
2937 
2938   NOT_PRODUCT(GCTraceTime t("\ncheckpointRootsInitialWork",
2939     PrintGCDetails && Verbose, true, _gc_timer_cm);)
2940 
2941   // Reset all the PLAB chunk arrays if necessary.
2942   if (_survivor_plab_array != NULL && !CMSPLABRecordAlways) {
2943     reset_survivor_plab_arrays();
2944   }
2945 
2946   ResourceMark rm;
2947   HandleMark  hm;
2948 
2949   MarkRefsIntoClosure notOlder(_span, &_markBitMap);
2950   GenCollectedHeap* gch = GenCollectedHeap::heap();
2951 
2952   verify_work_stacks_empty();
2953   verify_overflow_empty();
2954 
2955   gch->ensure_parsability(false);  // fill TLABs, but no need to retire them
2956   // Update the saved marks which may affect the root scans.
2957   gch->save_marks();
2958 
2959   // weak reference processing has not started yet.


3023   save_sweep_limits();
3024   verify_overflow_empty();
3025 }
3026 
3027 bool CMSCollector::markFromRoots() {
3028   // we might be tempted to assert that:
3029   // assert(!SafepointSynchronize::is_at_safepoint(),
3030   //        "inconsistent argument?");
3031   // However that wouldn't be right, because it's possible that
3032   // a safepoint is indeed in progress as a young generation
3033   // stop-the-world GC happens even as we mark in this generation.
3034   assert(_collectorState == Marking, "inconsistent state?");
3035   check_correct_thread_executing();
3036   verify_overflow_empty();
3037 
3038   // Weak ref discovery note: We may be discovering weak
3039   // refs in this generation concurrent (but interleaved) with
3040   // weak ref discovery by the young generation collector.
3041 
3042   CMSTokenSyncWithLocks ts(true, bitMapLock());
3043   TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty);
3044   CMSPhaseAccounting pa(this, "mark", !PrintGCDetails);
3045   bool res = markFromRootsWork();
3046   if (res) {
3047     _collectorState = Precleaning;
3048   } else { // We failed and a foreground collection wants to take over
3049     assert(_foregroundGCIsActive, "internal state inconsistency");
3050     assert(_restart_addr == NULL,  "foreground will restart from scratch");
3051     if (PrintGCDetails) {
3052       gclog_or_tty->print_cr("bailing out to foreground collection");
3053     }
3054   }
3055   verify_overflow_empty();
3056   return res;
3057 }
3058 
3059 bool CMSCollector::markFromRootsWork() {
3060   // iterate over marked bits in bit map, doing a full scan and mark
3061   // from these roots using the following algorithm:
3062   // . if oop is to the right of the current scan pointer,
3063   //   mark corresponding bit (we'll process it later)


3720 void CMSCollector::preclean() {
3721   check_correct_thread_executing();
3722   assert(Thread::current()->is_ConcurrentGC_thread(), "Wrong thread");
3723   verify_work_stacks_empty();
3724   verify_overflow_empty();
3725   _abort_preclean = false;
3726   if (CMSPrecleaningEnabled) {
3727     if (!CMSEdenChunksRecordAlways) {
3728       _eden_chunk_index = 0;
3729     }
3730     size_t used = get_eden_used();
3731     size_t capacity = get_eden_capacity();
3732     // Don't start sampling unless we will get sufficiently
3733     // many samples.
3734     if (used < (capacity/(CMSScheduleRemarkSamplingRatio * 100)
3735                 * CMSScheduleRemarkEdenPenetration)) {
3736       _start_sampling = true;
3737     } else {
3738       _start_sampling = false;
3739     }
3740     TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty);
3741     CMSPhaseAccounting pa(this, "preclean", !PrintGCDetails);
3742     preclean_work(CMSPrecleanRefLists1, CMSPrecleanSurvivors1);
3743   }
3744   CMSTokenSync x(true); // is cms thread
3745   if (CMSPrecleaningEnabled) {
3746     sample_eden();
3747     _collectorState = AbortablePreclean;
3748   } else {
3749     _collectorState = FinalMarking;
3750   }
3751   verify_work_stacks_empty();
3752   verify_overflow_empty();
3753 }
3754 
3755 // Try and schedule the remark such that young gen
3756 // occupancy is CMSScheduleRemarkEdenPenetration %.
3757 void CMSCollector::abortable_preclean() {
3758   check_correct_thread_executing();
3759   assert(CMSPrecleaningEnabled,  "Inconsistent control state");
3760   assert(_collectorState == AbortablePreclean, "Inconsistent control state");
3761 
3762   // If Eden's current occupancy is below this threshold,
3763   // immediately schedule the remark; else preclean
3764   // past the next scavenge in an effort to
3765   // schedule the pause as described above. By choosing
3766   // CMSScheduleRemarkEdenSizeThreshold >= max eden size
3767   // we will never do an actual abortable preclean cycle.
3768   if (get_eden_used() > CMSScheduleRemarkEdenSizeThreshold) {
3769     TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty);
3770     CMSPhaseAccounting pa(this, "abortable-preclean", !PrintGCDetails);
3771     // We need more smarts in the abortable preclean
3772     // loop below to deal with cases where allocation
3773     // in young gen is very very slow, and our precleaning
3774     // is running a losing race against a horde of
3775     // mutators intent on flooding us with CMS updates
3776     // (dirty cards).
3777     // One, admittedly dumb, strategy is to give up
3778     // after a certain number of abortable precleaning loops
3779     // or after a certain maximum time. We want to make
3780     // this smarter in the next iteration.
3781     // XXX FIX ME!!! YSR
3782     size_t loops = 0, workdone = 0, cumworkdone = 0, waited = 0;
3783     while (!(should_abort_preclean() ||
3784              ConcurrentMarkSweepThread::should_terminate())) {
3785       workdone = preclean_work(CMSPrecleanRefLists2, CMSPrecleanSurvivors2);
3786       cumworkdone += workdone;
3787       loops++;
3788       // Voluntarily terminate abortable preclean phase if we have
3789       // been at it for too long.


4230   check_correct_thread_executing();
4231   // world is stopped at this checkpoint
4232   assert(SafepointSynchronize::is_at_safepoint(),
4233          "world should be stopped");
4234   TraceCMSMemoryManagerStats tms(_collectorState,GenCollectedHeap::heap()->gc_cause());
4235 
4236   verify_work_stacks_empty();
4237   verify_overflow_empty();
4238 
4239   if (PrintGCDetails) {
4240     gclog_or_tty->print("[YG occupancy: " SIZE_FORMAT " K (" SIZE_FORMAT " K)]",
4241                         _young_gen->used() / K,
4242                         _young_gen->capacity() / K);
4243   }
4244   {
4245     if (CMSScavengeBeforeRemark) {
4246       GenCollectedHeap* gch = GenCollectedHeap::heap();
4247       // Temporarily set flag to false, GCH->do_collection will
4248       // expect it to be false and set to true
4249       FlagSetting fl(gch->_is_gc_active, false);
4250       NOT_PRODUCT(GCTraceTime t("Scavenge-Before-Remark",
4251         PrintGCDetails && Verbose, true, _gc_timer_cm);)

4252       gch->do_collection(true,                      // full (i.e. force, see below)
4253                          false,                     // !clear_all_soft_refs
4254                          0,                         // size
4255                          false,                     // is_tlab
4256                          GenCollectedHeap::YoungGen // type
4257         );
4258     }
4259     FreelistLocker x(this);
4260     MutexLockerEx y(bitMapLock(),
4261                     Mutex::_no_safepoint_check_flag);
4262     checkpointRootsFinalWork();
4263   }
4264   verify_work_stacks_empty();
4265   verify_overflow_empty();
4266 }
4267 
4268 void CMSCollector::checkpointRootsFinalWork() {
4269   NOT_PRODUCT(GCTraceTime tr("checkpointRootsFinalWork", PrintGCDetails, false, _gc_timer_cm);)
4270 
4271   assert(haveFreelistLocks(), "must have free list locks");
4272   assert_lock_strong(bitMapLock());
4273 
4274   ResourceMark rm;
4275   HandleMark   hm;
4276 
4277   GenCollectedHeap* gch = GenCollectedHeap::heap();
4278 
4279   if (should_unload_classes()) {
4280     CodeCache::gc_prologue();
4281   }
4282   assert(haveFreelistLocks(), "must have free list locks");
4283   assert_lock_strong(bitMapLock());
4284 
4285   // We might assume that we need not fill TLAB's when
4286   // CMSScavengeBeforeRemark is set, because we may have just done
4287   // a scavenge which would have filled all TLAB's -- and besides
4288   // Eden would be empty. This however may not always be the case --
4289   // for instance although we asked for a scavenge, it may not have


4301   if (CMSPrintEdenSurvivorChunks) {
4302     print_eden_and_survivor_chunk_arrays();
4303   }
4304 
4305   {
4306 #if defined(COMPILER2) || INCLUDE_JVMCI
4307     DerivedPointerTableDeactivate dpt_deact;
4308 #endif
4309 
4310     // Note on the role of the mod union table:
4311     // Since the marker in "markFromRoots" marks concurrently with
4312     // mutators, it is possible for some reachable objects not to have been
4313     // scanned. For instance, an only reference to an object A was
4314     // placed in object B after the marker scanned B. Unless B is rescanned,
4315     // A would be collected. Such updates to references in marked objects
4316     // are detected via the mod union table which is the set of all cards
4317     // dirtied since the first checkpoint in this GC cycle and prior to
4318     // the most recent young generation GC, minus those cleaned up by the
4319     // concurrent precleaning.
4320     if (CMSParallelRemarkEnabled) {
4321       GCTraceTime t("Rescan (parallel) ", PrintGCDetails, false, _gc_timer_cm);
4322       do_remark_parallel();
4323     } else {
4324       GCTraceTime t("Rescan (non-parallel) ", PrintGCDetails, false, _gc_timer_cm);
4325       do_remark_non_parallel();
4326     }
4327   }
4328   verify_work_stacks_empty();
4329   verify_overflow_empty();
4330 
4331   {
4332     NOT_PRODUCT(GCTraceTime ts("refProcessingWork", PrintGCDetails, false, _gc_timer_cm);)
4333     refProcessingWork();
4334   }
4335   verify_work_stacks_empty();
4336   verify_overflow_empty();
4337 
4338   if (should_unload_classes()) {
4339     CodeCache::gc_epilogue();
4340   }
4341   JvmtiExport::gc_epilogue();
4342 
4343   // If we encountered any (marking stack / work queue) overflow
4344   // events during the current CMS cycle, take appropriate
4345   // remedial measures, where possible, so as to try and avoid
4346   // recurrence of that condition.
4347   assert(_markStack.isEmpty(), "No grey objects");
4348   size_t ser_ovflw = _ser_pmc_remark_ovflw + _ser_pmc_preclean_ovflw +
4349                      _ser_kac_ovflw        + _ser_kac_preclean_ovflw;
4350   if (ser_ovflw > 0) {
4351     if (PrintCMSStatistics != 0) {
4352       gclog_or_tty->print_cr("Marking stack overflow (benign) "


5087   // as a result of work_q overflow
5088   restore_preserved_marks_if_any();
5089 }
5090 
5091 // Non-parallel version of remark
5092 void CMSCollector::do_remark_non_parallel() {
5093   ResourceMark rm;
5094   HandleMark   hm;
5095   GenCollectedHeap* gch = GenCollectedHeap::heap();
5096   ReferenceProcessorMTDiscoveryMutator mt(ref_processor(), false);
5097 
5098   MarkRefsIntoAndScanClosure
5099     mrias_cl(_span, ref_processor(), &_markBitMap, NULL /* not precleaning */,
5100              &_markStack, this,
5101              false /* should_yield */, false /* not precleaning */);
5102   MarkFromDirtyCardsClosure
5103     markFromDirtyCardsClosure(this, _span,
5104                               NULL,  // space is set further below
5105                               &_markBitMap, &_markStack, &mrias_cl);
5106   {
5107     GCTraceTime t("grey object rescan", PrintGCDetails, false, _gc_timer_cm);
5108     // Iterate over the dirty cards, setting the corresponding bits in the
5109     // mod union table.
5110     {
5111       ModUnionClosure modUnionClosure(&_modUnionTable);
5112       _ct->ct_bs()->dirty_card_iterate(
5113                       _cmsGen->used_region(),
5114                       &modUnionClosure);
5115     }
5116     // Having transferred these marks into the modUnionTable, we just need
5117     // to rescan the marked objects on the dirty cards in the modUnionTable.
5118     // The initial marking may have been done during an asynchronous
5119     // collection so there may be dirty bits in the mod-union table.
5120     const int alignment =
5121       CardTableModRefBS::card_size * BitsPerWord;
5122     {
5123       // ... First handle dirty cards in CMS gen
5124       markFromDirtyCardsClosure.set_space(_cmsGen->cmsSpace());
5125       MemRegion ur = _cmsGen->used_region();
5126       HeapWord* lb = ur.start();
5127       HeapWord* ub = (HeapWord*)round_to((intptr_t)ur.end(), alignment);
5128       MemRegion cms_span(lb, ub);
5129       _modUnionTable.dirty_range_iterate_clear(cms_span,
5130                                                &markFromDirtyCardsClosure);
5131       verify_work_stacks_empty();
5132       if (PrintCMSStatistics != 0) {
5133         gclog_or_tty->print(" (re-scanned " SIZE_FORMAT " dirty cards in cms gen) ",
5134           markFromDirtyCardsClosure.num_dirty_cards());
5135       }
5136     }
5137   }
5138   if (VerifyDuringGC &&
5139       GenCollectedHeap::heap()->total_collections() >= VerifyGCStartAt) {
5140     HandleMark hm;  // Discard invalid handles created during verification
5141     Universe::verify();
5142   }
5143   {
5144     GCTraceTime t("root rescan", PrintGCDetails, false, _gc_timer_cm);
5145 
5146     verify_work_stacks_empty();
5147 
5148     gch->rem_set()->prepare_for_younger_refs_iterate(false); // Not parallel.
5149     StrongRootsScope srs(1);
5150 
5151     gch->gen_process_roots(&srs,
5152                            GenCollectedHeap::OldGen,
5153                            true,  // young gen as roots
5154                            GenCollectedHeap::ScanningOption(roots_scanning_options()),
5155                            should_unload_classes(),
5156                            &mrias_cl,
5157                            NULL,
5158                            NULL); // The dirty klasses will be handled below
5159 
5160     assert(should_unload_classes()
5161            || (roots_scanning_options() & GenCollectedHeap::SO_AllCodeCache),
5162            "if we didn't scan the code cache, we have to be ready to drop nmethods with expired weak oops");
5163   }
5164 
5165   {
5166     GCTraceTime t("visit unhandled CLDs", PrintGCDetails, false, _gc_timer_cm);
5167 
5168     verify_work_stacks_empty();
5169 
5170     // Scan all class loader data objects that might have been introduced
5171     // during concurrent marking.
5172     ResourceMark rm;
5173     GrowableArray<ClassLoaderData*>* array = ClassLoaderDataGraph::new_clds();
5174     for (int i = 0; i < array->length(); i++) {
5175       mrias_cl.do_cld_nv(array->at(i));
5176     }
5177 
5178     // We don't need to keep track of new CLDs anymore.
5179     ClassLoaderDataGraph::remember_new_clds(false);
5180 
5181     verify_work_stacks_empty();
5182   }
5183 
5184   {
5185     GCTraceTime t("dirty klass scan", PrintGCDetails, false, _gc_timer_cm);
5186 
5187     verify_work_stacks_empty();
5188 
5189     RemarkKlassClosure remark_klass_closure(&mrias_cl);
5190     ClassLoaderDataGraph::classes_do(&remark_klass_closure);
5191 
5192     verify_work_stacks_empty();
5193   }
5194 
5195   // We might have added oops to ClassLoaderData::_handles during the
5196   // concurrent marking phase. These oops point to newly allocated objects
5197   // that are guaranteed to be kept alive. Either by the direct allocation
5198   // code, or when the young collector processes the roots. Hence,
5199   // we don't have to revisit the _handles block during the remark phase.
5200 
5201   verify_work_stacks_empty();
5202   // Restore evacuated mark words, if any, used for overflow list links
5203   restore_preserved_marks_if_any();
5204 
5205   verify_overflow_empty();


5373   workers->run_task(&enq_task);
5374 }
5375 
5376 void CMSCollector::refProcessingWork() {
5377   ResourceMark rm;
5378   HandleMark   hm;
5379 
5380   ReferenceProcessor* rp = ref_processor();
5381   assert(rp->span().equals(_span), "Spans should be equal");
5382   assert(!rp->enqueuing_is_done(), "Enqueuing should not be complete");
5383   // Process weak references.
5384   rp->setup_policy(false);
5385   verify_work_stacks_empty();
5386 
5387   CMSKeepAliveClosure cmsKeepAliveClosure(this, _span, &_markBitMap,
5388                                           &_markStack, false /* !preclean */);
5389   CMSDrainMarkingStackClosure cmsDrainMarkingStackClosure(this,
5390                                 _span, &_markBitMap, &_markStack,
5391                                 &cmsKeepAliveClosure, false /* !preclean */);
5392   {
5393     GCTraceTime t("weak refs processing", PrintGCDetails, false, _gc_timer_cm);
5394 
5395     ReferenceProcessorStats stats;
5396     if (rp->processing_is_mt()) {
5397       // Set the degree of MT here.  If the discovery is done MT, there
5398       // may have been a different number of threads doing the discovery
5399       // and a different number of discovered lists may have Ref objects.
5400       // That is OK as long as the Reference lists are balanced (see
5401       // balance_all_queues() and balance_queues()).
5402       GenCollectedHeap* gch = GenCollectedHeap::heap();
5403       uint active_workers = ParallelGCThreads;
5404       WorkGang* workers = gch->workers();
5405       if (workers != NULL) {
5406         active_workers = workers->active_workers();
5407         // The expectation is that active_workers will have already
5408         // been set to a reasonable value.  If it has not been set,
5409         // investigate.
5410         assert(active_workers > 0, "Should have been set during scavenge");
5411       }
5412       rp->set_active_mt_degree(active_workers);
5413       CMSRefProcTaskExecutor task_executor(*this);


5415                                         &cmsKeepAliveClosure,
5416                                         &cmsDrainMarkingStackClosure,
5417                                         &task_executor,
5418                                         _gc_timer_cm);
5419     } else {
5420       stats = rp->process_discovered_references(&_is_alive_closure,
5421                                         &cmsKeepAliveClosure,
5422                                         &cmsDrainMarkingStackClosure,
5423                                         NULL,
5424                                         _gc_timer_cm);
5425     }
5426     _gc_tracer_cm->report_gc_reference_stats(stats);
5427 
5428   }
5429 
5430   // This is the point where the entire marking should have completed.
5431   verify_work_stacks_empty();
5432 
5433   if (should_unload_classes()) {
5434     {
5435       GCTraceTime t("class unloading", PrintGCDetails, false, _gc_timer_cm);
5436 
5437       // Unload classes and purge the SystemDictionary.
5438       bool purged_class = SystemDictionary::do_unloading(&_is_alive_closure);
5439 
5440       // Unload nmethods.
5441       CodeCache::do_unloading(&_is_alive_closure, purged_class);
5442 
5443       // Prune dead klasses from subklass/sibling/implementor lists.
5444       Klass::clean_weak_klass_links(&_is_alive_closure);
5445     }
5446 
5447     {
5448       GCTraceTime t("scrub symbol table", PrintGCDetails, false, _gc_timer_cm);
5449       // Clean up unreferenced symbols in symbol table.
5450       SymbolTable::unlink();
5451     }
5452 
5453     {
5454       GCTraceTime t("scrub string table", PrintGCDetails, false, _gc_timer_cm);
5455       // Delete entries for dead interned strings.
5456       StringTable::unlink(&_is_alive_closure);
5457     }
5458   }
5459 
5460 
5461   // Restore any preserved marks as a result of mark stack or
5462   // work queue overflow
5463   restore_preserved_marks_if_any();  // done single-threaded for now
5464 
5465   rp->set_enqueuing_is_done(true);
5466   if (rp->processing_is_mt()) {
5467     rp->balance_all_queues();
5468     CMSRefProcTaskExecutor task_executor(*this);
5469     rp->enqueue_discovered_references(&task_executor);
5470   } else {
5471     rp->enqueue_discovered_references(NULL);
5472   }
5473   rp->verify_no_references_recorded();
5474   assert(!rp->discovery_enabled(), "should have been disabled");


5501     }
5502   }
5503 }
5504 #endif
5505 
5506 void CMSCollector::sweep() {
5507   assert(_collectorState == Sweeping, "just checking");
5508   check_correct_thread_executing();
5509   verify_work_stacks_empty();
5510   verify_overflow_empty();
5511   increment_sweep_count();
5512   TraceCMSMemoryManagerStats tms(_collectorState,GenCollectedHeap::heap()->gc_cause());
5513 
5514   _inter_sweep_timer.stop();
5515   _inter_sweep_estimate.sample(_inter_sweep_timer.seconds());
5516 
5517   assert(!_intra_sweep_timer.is_active(), "Should not be active");
5518   _intra_sweep_timer.reset();
5519   _intra_sweep_timer.start();
5520   {
5521     TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty);
5522     CMSPhaseAccounting pa(this, "sweep", !PrintGCDetails);
5523     // First sweep the old gen
5524     {
5525       CMSTokenSyncWithLocks ts(true, _cmsGen->freelistLock(),
5526                                bitMapLock());
5527       sweepWork(_cmsGen);
5528     }
5529 
5530     // Update Universe::_heap_*_at_gc figures.
5531     // We need all the free list locks to make the abstract state
5532     // transition from Sweeping to Resetting. See detailed note
5533     // further below.
5534     {
5535       CMSTokenSyncWithLocks ts(true, _cmsGen->freelistLock());
5536       // Update heap occupancy information which is used as
5537       // input to soft ref clearing policy at the next gc.
5538       Universe::update_heap_info_at_gc();
5539       _collectorState = Resizing;
5540     }
5541   }


5685   } else {                                      // did not unload classes,
5686     _concurrent_cycles_since_last_unload++;     // ... increment count
5687   }
5688 }
5689 
5690 // Reset CMS data structures (for now just the marking bit map)
5691 // preparatory for the next cycle.
5692 void CMSCollector::reset_concurrent() {
5693   CMSTokenSyncWithLocks ts(true, bitMapLock());
5694 
5695   // If the state is not "Resetting", the foreground  thread
5696   // has done a collection and the resetting.
5697   if (_collectorState != Resetting) {
5698     assert(_collectorState == Idling, "The state should only change"
5699       " because the foreground collector has finished the collection");
5700     return;
5701   }
5702 
5703   // Clear the mark bitmap (no grey objects to start with)
5704   // for the next cycle.
5705   TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty);
5706   CMSPhaseAccounting cmspa(this, "reset", !PrintGCDetails);
5707 
5708   HeapWord* curAddr = _markBitMap.startWord();
5709   while (curAddr < _markBitMap.endWord()) {
5710     size_t remaining  = pointer_delta(_markBitMap.endWord(), curAddr);
5711     MemRegion chunk(curAddr, MIN2(CMSBitMapYieldQuantum, remaining));
5712     _markBitMap.clear_large_range(chunk);
5713     if (ConcurrentMarkSweepThread::should_yield() &&
5714         !foregroundGCIsActive() &&
5715         CMSYield) {
5716       assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
5717              "CMS thread should hold CMS token");
5718       assert_lock_strong(bitMapLock());
5719       bitMapLock()->unlock();
5720       ConcurrentMarkSweepThread::desynchronize(true);
5721       stopTimer();
5722       if (PrintCMSStatistics != 0) {
5723         incrementYields();
5724       }
5725 


5741   // are being measured for gc overhead limits, clean the "near" flag
5742   // and count.
5743   size_policy()->reset_gc_overhead_limit_count();
5744   _collectorState = Idling;
5745 
5746   register_gc_end();
5747 }
5748 
5749 // Same as above but for STW paths
5750 void CMSCollector::reset_stw() {
5751   // already have the lock
5752   assert(_collectorState == Resetting, "just checking");
5753   assert_lock_strong(bitMapLock());
5754   GCIdMarkAndRestore gc_id_mark(_cmsThread->gc_id());
5755   _markBitMap.clear_all();
5756   _collectorState = Idling;
5757   register_gc_end();
5758 }
5759 
5760 void CMSCollector::do_CMS_operation(CMS_op_type op, GCCause::Cause gc_cause) {
5761   TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty);
5762   GCTraceTime t(GCCauseString("GC", gc_cause), PrintGC, !PrintGCDetails, NULL);
5763   TraceCollectorStats tcs(counters());
5764 
5765   switch (op) {
5766     case CMS_op_checkpointRootsInitial: {

5767       SvcGCMarker sgcm(SvcGCMarker::OTHER);
5768       checkpointRootsInitial();
5769       if (PrintGC) {
5770         _cmsGen->printOccupancy("initial-mark");
5771       }
5772       break;
5773     }
5774     case CMS_op_checkpointRootsFinal: {

5775       SvcGCMarker sgcm(SvcGCMarker::OTHER);
5776       checkpointRootsFinal();
5777       if (PrintGC) {
5778         _cmsGen->printOccupancy("remark");
5779       }
5780       break;
5781     }
5782     default:
5783       fatal("No such CMS_op");
5784   }
5785 }
5786 
5787 #ifndef PRODUCT
5788 size_t const CMSCollector::skip_header_HeapWords() {
5789   return FreeChunk::header_size();
5790 }
5791 
5792 // Try and collect here conditions that should hold when
5793 // CMS thread is exiting. The idea is that the foreground GC
5794 // thread should not be blocked if it wants to terminate




  36 #include "gc/cms/vmCMSOperations.hpp"
  37 #include "gc/serial/genMarkSweep.hpp"
  38 #include "gc/serial/tenuredGeneration.hpp"
  39 #include "gc/shared/adaptiveSizePolicy.hpp"
  40 #include "gc/shared/cardGeneration.inline.hpp"
  41 #include "gc/shared/cardTableRS.hpp"
  42 #include "gc/shared/collectedHeap.inline.hpp"
  43 #include "gc/shared/collectorCounters.hpp"
  44 #include "gc/shared/collectorPolicy.hpp"
  45 #include "gc/shared/gcLocker.inline.hpp"
  46 #include "gc/shared/gcPolicyCounters.hpp"
  47 #include "gc/shared/gcTimer.hpp"
  48 #include "gc/shared/gcTrace.hpp"
  49 #include "gc/shared/gcTraceTime.hpp"
  50 #include "gc/shared/genCollectedHeap.hpp"
  51 #include "gc/shared/genOopClosures.inline.hpp"
  52 #include "gc/shared/isGCActiveMark.hpp"
  53 #include "gc/shared/referencePolicy.hpp"
  54 #include "gc/shared/strongRootsScope.hpp"
  55 #include "gc/shared/taskqueue.inline.hpp"
  56 #include "logging/log.hpp"
  57 #include "memory/allocation.hpp"
  58 #include "memory/iterator.inline.hpp"
  59 #include "memory/padded.hpp"
  60 #include "memory/resourceArea.hpp"
  61 #include "oops/oop.inline.hpp"
  62 #include "prims/jvmtiExport.hpp"
  63 #include "runtime/atomic.inline.hpp"
  64 #include "runtime/globals_extension.hpp"
  65 #include "runtime/handles.inline.hpp"
  66 #include "runtime/java.hpp"
  67 #include "runtime/orderAccess.inline.hpp"
  68 #include "runtime/vmThread.hpp"
  69 #include "services/memoryService.hpp"
  70 #include "services/runtimeService.hpp"
  71 #include "utilities/stack.inline.hpp"
  72 
  73 // statics
  74 CMSCollector* ConcurrentMarkSweepGeneration::_collector = NULL;
  75 bool CMSCollector::_full_gc_requested = false;
  76 GCCause::Cause CMSCollector::_full_gc_cause = GCCause::_no_gc;


1564 // after obtaining the free list locks for the
1565 // two generations.
1566 void CMSCollector::compute_new_size() {
1567   assert_locked_or_safepoint(Heap_lock);
1568   FreelistLocker z(this);
1569   MetaspaceGC::compute_new_size();
1570   _cmsGen->compute_new_size_free_list();
1571 }
1572 
1573 // A work method used by the foreground collector to do
1574 // a mark-sweep-compact.
1575 void CMSCollector::do_compaction_work(bool clear_all_soft_refs) {
1576   GenCollectedHeap* gch = GenCollectedHeap::heap();
1577 
1578   STWGCTimer* gc_timer = GenMarkSweep::gc_timer();
1579   gc_timer->register_gc_start();
1580 
1581   SerialOldTracer* gc_tracer = GenMarkSweep::gc_tracer();
1582   gc_tracer->report_gc_start(gch->gc_cause(), gc_timer->gc_start());
1583 
1584   GCTraceTime(Trace, gc) t("CMS:MSC");
1585 
1586   // Temporarily widen the span of the weak reference processing to
1587   // the entire heap.
1588   MemRegion new_span(GenCollectedHeap::heap()->reserved_region());
1589   ReferenceProcessorSpanMutator rp_mut_span(ref_processor(), new_span);
1590   // Temporarily, clear the "is_alive_non_header" field of the
1591   // reference processor.
1592   ReferenceProcessorIsAliveMutator rp_mut_closure(ref_processor(), NULL);
1593   // Temporarily make reference _processing_ single threaded (non-MT).
1594   ReferenceProcessorMTProcMutator rp_mut_mt_processing(ref_processor(), false);
1595   // Temporarily make refs discovery atomic
1596   ReferenceProcessorAtomicMutator rp_mut_atomic(ref_processor(), true);
1597   // Temporarily make reference _discovery_ single threaded (non-MT)
1598   ReferenceProcessorMTDiscoveryMutator rp_mut_discovery(ref_processor(), false);
1599 
1600   ref_processor()->set_enqueuing_is_done(false);
1601   ref_processor()->enable_discovery();
1602   ref_processor()->setup_policy(clear_all_soft_refs);
1603   // If an asynchronous collection finishes, the _modUnionTable is
1604   // all clear.  If we are assuming the collection from an asynchronous


1947   // Should this be in gc_epilogue?
1948   collector_policy()->counters()->update_counters();
1949 
1950   {
1951     // Clear _foregroundGCShouldWait and, in the event that the
1952     // foreground collector is waiting, notify it, before
1953     // returning.
1954     MutexLockerEx x(CGC_lock, Mutex::_no_safepoint_check_flag);
1955     _foregroundGCShouldWait = false;
1956     if (_foregroundGCIsActive) {
1957       CGC_lock->notify();
1958     }
1959     assert(!ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
1960            "Possible deadlock");
1961   }
1962   if (TraceCMSState) {
1963     gclog_or_tty->print_cr("CMS Thread " INTPTR_FORMAT
1964       " exiting collection CMS state %d",
1965       p2i(Thread::current()), _collectorState);
1966   }
1967   log_info(gc, heap)("Old: " SIZE_FORMAT "K->" SIZE_FORMAT "K("  SIZE_FORMAT "K)",
1968                      prev_used / K, _cmsGen->used()/K, _cmsGen->capacity() /K);

1969 }
1970 
1971 void CMSCollector::register_gc_start(GCCause::Cause cause) {
1972   _cms_start_registered = true;
1973   _gc_timer_cm->register_gc_start();
1974   _gc_tracer_cm->report_gc_start(cause, _gc_timer_cm->gc_start());
1975 }
1976 
1977 void CMSCollector::register_gc_end() {
1978   if (_cms_start_registered) {
1979     report_heap_summary(GCWhen::AfterGC);
1980 
1981     _gc_timer_cm->register_gc_end();
1982     _gc_tracer_cm->report_gc_end(_gc_timer_cm->gc_end(), _gc_timer_cm->time_partitions());
1983     _cms_start_registered = false;
1984   }
1985 }
1986 
1987 void CMSCollector::save_heap_summary() {
1988   GenCollectedHeap* gch = GenCollectedHeap::heap();


2302 class VerifyMarkedClosure: public BitMapClosure {
2303   CMSBitMap* _marks;
2304   bool       _failed;
2305 
2306  public:
2307   VerifyMarkedClosure(CMSBitMap* bm): _marks(bm), _failed(false) {}
2308 
2309   bool do_bit(size_t offset) {
2310     HeapWord* addr = _marks->offsetToHeapWord(offset);
2311     if (!_marks->isMarked(addr)) {
2312       oop(addr)->print_on(gclog_or_tty);
2313       gclog_or_tty->print_cr(" (" INTPTR_FORMAT " should have been marked)", p2i(addr));
2314       _failed = true;
2315     }
2316     return true;
2317   }
2318 
2319   bool failed() { return _failed; }
2320 };
2321 
2322 bool CMSCollector::verify_after_remark() {
2323   GCTraceTime(Info, gc, verify) tm("Verifying CMS Marking.");
2324   MutexLockerEx ml(verification_mark_bm()->lock(), Mutex::_no_safepoint_check_flag);
2325   static bool init = false;
2326 
2327   assert(SafepointSynchronize::is_at_safepoint(),
2328          "Else mutations in object graph will make answer suspect");
2329   assert(have_cms_token(),
2330          "Else there may be mutual interference in use of "
2331          " verification data structures");
2332   assert(_collectorState > Marking && _collectorState <= Sweeping,
2333          "Else marking info checked here may be obsolete");
2334   assert(haveFreelistLocks(), "must hold free list locks");
2335   assert_lock_strong(bitMapLock());
2336 
2337 
2338   // Allocate marking bit map if not already allocated
2339   if (!init) { // first time
2340     if (!verification_mark_bm()->allocate(_span)) {
2341       return false;
2342     }
2343     init = true;


2366   gch->ensure_parsability(false);  // fill TLABs, but no need to retire them
2367   // Update the saved marks which may affect the root scans.
2368   gch->save_marks();
2369 
2370   if (CMSRemarkVerifyVariant == 1) {
2371     // In this first variant of verification, we complete
2372     // all marking, then check if the new marks-vector is
2373     // a subset of the CMS marks-vector.
2374     verify_after_remark_work_1();
2375   } else if (CMSRemarkVerifyVariant == 2) {
2376     // In this second variant of verification, we flag an error
2377     // (i.e. an object reachable in the new marks-vector not reachable
2378     // in the CMS marks-vector) immediately, also indicating the
2379     // identify of an object (A) that references the unmarked object (B) --
2380     // presumably, a mutation to A failed to be picked up by preclean/remark?
2381     verify_after_remark_work_2();
2382   } else {
2383     warning("Unrecognized value " UINTX_FORMAT " for CMSRemarkVerifyVariant",
2384             CMSRemarkVerifyVariant);
2385   }

2386   return true;
2387 }
2388 
2389 void CMSCollector::verify_after_remark_work_1() {
2390   ResourceMark rm;
2391   HandleMark  hm;
2392   GenCollectedHeap* gch = GenCollectedHeap::heap();
2393 
2394   // Get a clear set of claim bits for the roots processing to work with.
2395   ClassLoaderDataGraph::clear_claimed_marks();
2396 
2397   // Mark from roots one level into CMS
2398   MarkRefsIntoClosure notOlder(_span, verification_mark_bm());
2399   gch->rem_set()->prepare_for_younger_refs_iterate(false); // Not parallel.
2400 
2401   {
2402     StrongRootsScope srs(1);
2403 
2404     gch->gen_process_roots(&srs,
2405                            GenCollectedHeap::OldGen,


2824  public:
2825   // Not MT-safe; so do not pass around these StackObj's
2826   // where they may be accessed by other threads.
2827   jlong wallclock_millis() {
2828     assert(_wallclock.is_active(), "Wall clock should not stop");
2829     _wallclock.stop();  // to record time
2830     jlong ret = _wallclock.milliseconds();
2831     _wallclock.start(); // restart
2832     return ret;
2833   }
2834 };
2835 
2836 CMSPhaseAccounting::CMSPhaseAccounting(CMSCollector *collector,
2837                                        const char *phase,
2838                                        bool print_cr) :
2839   _collector(collector), _phase(phase), _print_cr(print_cr) {
2840 
2841   if (PrintCMSStatistics != 0) {
2842     _collector->resetYields();
2843   }
2844   log_info(gc)("%s-concurrent-%s-start", _collector->cmsGen()->short_name(), _phase);




2845   _collector->resetTimer();
2846   _wallclock.start();
2847   _collector->startTimer();
2848 }
2849 
2850 CMSPhaseAccounting::~CMSPhaseAccounting() {
2851   assert(_wallclock.is_active(), "Wall clock should not have stopped");
2852   _collector->stopTimer();
2853   _wallclock.stop();
2854   log_info(gc)("%s-concurrent-%s: %3.3f/%3.3f secs",


2855                _collector->cmsGen()->short_name(),
2856                _phase, _collector->timerValue(), _wallclock.seconds());



2857   if (PrintCMSStatistics != 0) {
2858     gclog_or_tty->print_cr(" (CMS-concurrent-%s yielded %d times)", _phase,
2859                   _collector->yields());
2860   }

2861 }
2862 
2863 // CMS work
2864 
2865 // The common parts of CMSParInitialMarkTask and CMSParRemarkTask.
2866 class CMSParMarkTask : public AbstractGangTask {
2867  protected:
2868   CMSCollector*     _collector;
2869   uint              _n_workers;
2870   CMSParMarkTask(const char* name, CMSCollector* collector, uint n_workers) :
2871       AbstractGangTask(name),
2872       _collector(collector),
2873       _n_workers(n_workers) {}
2874   // Work method in support of parallel rescan ... of young gen spaces
2875   void do_young_space_rescan(uint worker_id, OopsInGenClosure* cl,
2876                              ContiguousSpace* space,
2877                              HeapWord** chunk_array, size_t chunk_top);
2878   void work_on_young_gen_roots(uint worker_id, OopsInGenClosure* cl);
2879 };
2880 


2907                     Mutex::_no_safepoint_check_flag);
2908     checkpointRootsInitialWork();
2909     // enable ("weak") refs discovery
2910     rp->enable_discovery();
2911     _collectorState = Marking;
2912   }
2913 }
2914 
2915 void CMSCollector::checkpointRootsInitialWork() {
2916   assert(SafepointSynchronize::is_at_safepoint(), "world should be stopped");
2917   assert(_collectorState == InitialMarking, "just checking");
2918 
2919   // Already have locks.
2920   assert_lock_strong(bitMapLock());
2921   assert(_markBitMap.isAllClear(), "was reset at end of previous cycle");
2922 
2923   // Setup the verification and class unloading state for this
2924   // CMS collection cycle.
2925   setup_cms_unloading_and_verification_state();
2926 
2927   GCTraceTime(Trace, gc) ts("checkpointRootsInitialWork", _gc_timer_cm);

2928 
2929   // Reset all the PLAB chunk arrays if necessary.
2930   if (_survivor_plab_array != NULL && !CMSPLABRecordAlways) {
2931     reset_survivor_plab_arrays();
2932   }
2933 
2934   ResourceMark rm;
2935   HandleMark  hm;
2936 
2937   MarkRefsIntoClosure notOlder(_span, &_markBitMap);
2938   GenCollectedHeap* gch = GenCollectedHeap::heap();
2939 
2940   verify_work_stacks_empty();
2941   verify_overflow_empty();
2942 
2943   gch->ensure_parsability(false);  // fill TLABs, but no need to retire them
2944   // Update the saved marks which may affect the root scans.
2945   gch->save_marks();
2946 
2947   // weak reference processing has not started yet.


3011   save_sweep_limits();
3012   verify_overflow_empty();
3013 }
3014 
3015 bool CMSCollector::markFromRoots() {
3016   // we might be tempted to assert that:
3017   // assert(!SafepointSynchronize::is_at_safepoint(),
3018   //        "inconsistent argument?");
3019   // However that wouldn't be right, because it's possible that
3020   // a safepoint is indeed in progress as a young generation
3021   // stop-the-world GC happens even as we mark in this generation.
3022   assert(_collectorState == Marking, "inconsistent state?");
3023   check_correct_thread_executing();
3024   verify_overflow_empty();
3025 
3026   // Weak ref discovery note: We may be discovering weak
3027   // refs in this generation concurrent (but interleaved) with
3028   // weak ref discovery by the young generation collector.
3029 
3030   CMSTokenSyncWithLocks ts(true, bitMapLock());
3031   GCTraceCPUTime tcpu;
3032   CMSPhaseAccounting pa(this, "mark", !PrintGCDetails);
3033   bool res = markFromRootsWork();
3034   if (res) {
3035     _collectorState = Precleaning;
3036   } else { // We failed and a foreground collection wants to take over
3037     assert(_foregroundGCIsActive, "internal state inconsistency");
3038     assert(_restart_addr == NULL,  "foreground will restart from scratch");
3039     if (PrintGCDetails) {
3040       gclog_or_tty->print_cr("bailing out to foreground collection");
3041     }
3042   }
3043   verify_overflow_empty();
3044   return res;
3045 }
3046 
3047 bool CMSCollector::markFromRootsWork() {
3048   // iterate over marked bits in bit map, doing a full scan and mark
3049   // from these roots using the following algorithm:
3050   // . if oop is to the right of the current scan pointer,
3051   //   mark corresponding bit (we'll process it later)


3708 void CMSCollector::preclean() {
3709   check_correct_thread_executing();
3710   assert(Thread::current()->is_ConcurrentGC_thread(), "Wrong thread");
3711   verify_work_stacks_empty();
3712   verify_overflow_empty();
3713   _abort_preclean = false;
3714   if (CMSPrecleaningEnabled) {
3715     if (!CMSEdenChunksRecordAlways) {
3716       _eden_chunk_index = 0;
3717     }
3718     size_t used = get_eden_used();
3719     size_t capacity = get_eden_capacity();
3720     // Don't start sampling unless we will get sufficiently
3721     // many samples.
3722     if (used < (capacity/(CMSScheduleRemarkSamplingRatio * 100)
3723                 * CMSScheduleRemarkEdenPenetration)) {
3724       _start_sampling = true;
3725     } else {
3726       _start_sampling = false;
3727     }
3728     GCTraceCPUTime tcpu;
3729     CMSPhaseAccounting pa(this, "preclean", !PrintGCDetails);
3730     preclean_work(CMSPrecleanRefLists1, CMSPrecleanSurvivors1);
3731   }
3732   CMSTokenSync x(true); // is cms thread
3733   if (CMSPrecleaningEnabled) {
3734     sample_eden();
3735     _collectorState = AbortablePreclean;
3736   } else {
3737     _collectorState = FinalMarking;
3738   }
3739   verify_work_stacks_empty();
3740   verify_overflow_empty();
3741 }
3742 
3743 // Try and schedule the remark such that young gen
3744 // occupancy is CMSScheduleRemarkEdenPenetration %.
3745 void CMSCollector::abortable_preclean() {
3746   check_correct_thread_executing();
3747   assert(CMSPrecleaningEnabled,  "Inconsistent control state");
3748   assert(_collectorState == AbortablePreclean, "Inconsistent control state");
3749 
3750   // If Eden's current occupancy is below this threshold,
3751   // immediately schedule the remark; else preclean
3752   // past the next scavenge in an effort to
3753   // schedule the pause as described above. By choosing
3754   // CMSScheduleRemarkEdenSizeThreshold >= max eden size
3755   // we will never do an actual abortable preclean cycle.
3756   if (get_eden_used() > CMSScheduleRemarkEdenSizeThreshold) {
3757     GCTraceCPUTime tcpu;
3758     CMSPhaseAccounting pa(this, "abortable-preclean", !PrintGCDetails);
3759     // We need more smarts in the abortable preclean
3760     // loop below to deal with cases where allocation
3761     // in young gen is very very slow, and our precleaning
3762     // is running a losing race against a horde of
3763     // mutators intent on flooding us with CMS updates
3764     // (dirty cards).
3765     // One, admittedly dumb, strategy is to give up
3766     // after a certain number of abortable precleaning loops
3767     // or after a certain maximum time. We want to make
3768     // this smarter in the next iteration.
3769     // XXX FIX ME!!! YSR
3770     size_t loops = 0, workdone = 0, cumworkdone = 0, waited = 0;
3771     while (!(should_abort_preclean() ||
3772              ConcurrentMarkSweepThread::should_terminate())) {
3773       workdone = preclean_work(CMSPrecleanRefLists2, CMSPrecleanSurvivors2);
3774       cumworkdone += workdone;
3775       loops++;
3776       // Voluntarily terminate abortable preclean phase if we have
3777       // been at it for too long.


4218   check_correct_thread_executing();
4219   // world is stopped at this checkpoint
4220   assert(SafepointSynchronize::is_at_safepoint(),
4221          "world should be stopped");
4222   TraceCMSMemoryManagerStats tms(_collectorState,GenCollectedHeap::heap()->gc_cause());
4223 
4224   verify_work_stacks_empty();
4225   verify_overflow_empty();
4226 
4227   if (PrintGCDetails) {
4228     gclog_or_tty->print("[YG occupancy: " SIZE_FORMAT " K (" SIZE_FORMAT " K)]",
4229                         _young_gen->used() / K,
4230                         _young_gen->capacity() / K);
4231   }
4232   {
4233     if (CMSScavengeBeforeRemark) {
4234       GenCollectedHeap* gch = GenCollectedHeap::heap();
4235       // Temporarily set flag to false, GCH->do_collection will
4236       // expect it to be false and set to true
4237       FlagSetting fl(gch->_is_gc_active, false);
4238 
4239       GCTraceTime(Trace, gc) tm("Scavenge-Before-Remark", _gc_timer_cm);
4240 
4241       gch->do_collection(true,                      // full (i.e. force, see below)
4242                          false,                     // !clear_all_soft_refs
4243                          0,                         // size
4244                          false,                     // is_tlab
4245                          GenCollectedHeap::YoungGen // type
4246         );
4247     }
4248     FreelistLocker x(this);
4249     MutexLockerEx y(bitMapLock(),
4250                     Mutex::_no_safepoint_check_flag);
4251     checkpointRootsFinalWork();
4252   }
4253   verify_work_stacks_empty();
4254   verify_overflow_empty();
4255 }
4256 
4257 void CMSCollector::checkpointRootsFinalWork() {
4258   GCTraceTime(Trace, gc) tm("checkpointRootsFinalWork", _gc_timer_cm);
4259 
4260   assert(haveFreelistLocks(), "must have free list locks");
4261   assert_lock_strong(bitMapLock());
4262 
4263   ResourceMark rm;
4264   HandleMark   hm;
4265 
4266   GenCollectedHeap* gch = GenCollectedHeap::heap();
4267 
4268   if (should_unload_classes()) {
4269     CodeCache::gc_prologue();
4270   }
4271   assert(haveFreelistLocks(), "must have free list locks");
4272   assert_lock_strong(bitMapLock());
4273 
4274   // We might assume that we need not fill TLAB's when
4275   // CMSScavengeBeforeRemark is set, because we may have just done
4276   // a scavenge which would have filled all TLAB's -- and besides
4277   // Eden would be empty. This however may not always be the case --
4278   // for instance although we asked for a scavenge, it may not have


4290   if (CMSPrintEdenSurvivorChunks) {
4291     print_eden_and_survivor_chunk_arrays();
4292   }
4293 
4294   {
4295 #if defined(COMPILER2) || INCLUDE_JVMCI
4296     DerivedPointerTableDeactivate dpt_deact;
4297 #endif
4298 
4299     // Note on the role of the mod union table:
4300     // Since the marker in "markFromRoots" marks concurrently with
4301     // mutators, it is possible for some reachable objects not to have been
4302     // scanned. For instance, an only reference to an object A was
4303     // placed in object B after the marker scanned B. Unless B is rescanned,
4304     // A would be collected. Such updates to references in marked objects
4305     // are detected via the mod union table which is the set of all cards
4306     // dirtied since the first checkpoint in this GC cycle and prior to
4307     // the most recent young generation GC, minus those cleaned up by the
4308     // concurrent precleaning.
4309     if (CMSParallelRemarkEnabled) {
4310       GCTraceTime(Debug, gc) t("Rescan (parallel)", _gc_timer_cm);
4311       do_remark_parallel();
4312     } else {
4313       GCTraceTime(Debug, gc) t("Rescan (non-parallel)", _gc_timer_cm);
4314       do_remark_non_parallel();
4315     }
4316   }
4317   verify_work_stacks_empty();
4318   verify_overflow_empty();
4319 
4320   {
4321     GCTraceTime(Trace, gc) ts("refProcessingWork", _gc_timer_cm);
4322     refProcessingWork();
4323   }
4324   verify_work_stacks_empty();
4325   verify_overflow_empty();
4326 
4327   if (should_unload_classes()) {
4328     CodeCache::gc_epilogue();
4329   }
4330   JvmtiExport::gc_epilogue();
4331 
4332   // If we encountered any (marking stack / work queue) overflow
4333   // events during the current CMS cycle, take appropriate
4334   // remedial measures, where possible, so as to try and avoid
4335   // recurrence of that condition.
4336   assert(_markStack.isEmpty(), "No grey objects");
4337   size_t ser_ovflw = _ser_pmc_remark_ovflw + _ser_pmc_preclean_ovflw +
4338                      _ser_kac_ovflw        + _ser_kac_preclean_ovflw;
4339   if (ser_ovflw > 0) {
4340     if (PrintCMSStatistics != 0) {
4341       gclog_or_tty->print_cr("Marking stack overflow (benign) "


5076   // as a result of work_q overflow
5077   restore_preserved_marks_if_any();
5078 }
5079 
5080 // Non-parallel version of remark
5081 void CMSCollector::do_remark_non_parallel() {
5082   ResourceMark rm;
5083   HandleMark   hm;
5084   GenCollectedHeap* gch = GenCollectedHeap::heap();
5085   ReferenceProcessorMTDiscoveryMutator mt(ref_processor(), false);
5086 
5087   MarkRefsIntoAndScanClosure
5088     mrias_cl(_span, ref_processor(), &_markBitMap, NULL /* not precleaning */,
5089              &_markStack, this,
5090              false /* should_yield */, false /* not precleaning */);
5091   MarkFromDirtyCardsClosure
5092     markFromDirtyCardsClosure(this, _span,
5093                               NULL,  // space is set further below
5094                               &_markBitMap, &_markStack, &mrias_cl);
5095   {
5096     GCTraceTime(Trace, gc) t("grey object rescan", _gc_timer_cm);
5097     // Iterate over the dirty cards, setting the corresponding bits in the
5098     // mod union table.
5099     {
5100       ModUnionClosure modUnionClosure(&_modUnionTable);
5101       _ct->ct_bs()->dirty_card_iterate(
5102                       _cmsGen->used_region(),
5103                       &modUnionClosure);
5104     }
5105     // Having transferred these marks into the modUnionTable, we just need
5106     // to rescan the marked objects on the dirty cards in the modUnionTable.
5107     // The initial marking may have been done during an asynchronous
5108     // collection so there may be dirty bits in the mod-union table.
5109     const int alignment =
5110       CardTableModRefBS::card_size * BitsPerWord;
5111     {
5112       // ... First handle dirty cards in CMS gen
5113       markFromDirtyCardsClosure.set_space(_cmsGen->cmsSpace());
5114       MemRegion ur = _cmsGen->used_region();
5115       HeapWord* lb = ur.start();
5116       HeapWord* ub = (HeapWord*)round_to((intptr_t)ur.end(), alignment);
5117       MemRegion cms_span(lb, ub);
5118       _modUnionTable.dirty_range_iterate_clear(cms_span,
5119                                                &markFromDirtyCardsClosure);
5120       verify_work_stacks_empty();
5121       if (PrintCMSStatistics != 0) {
5122         gclog_or_tty->print(" (re-scanned " SIZE_FORMAT " dirty cards in cms gen) ",
5123           markFromDirtyCardsClosure.num_dirty_cards());
5124       }
5125     }
5126   }
5127   if (VerifyDuringGC &&
5128       GenCollectedHeap::heap()->total_collections() >= VerifyGCStartAt) {
5129     HandleMark hm;  // Discard invalid handles created during verification
5130     Universe::verify();
5131   }
5132   {
5133     GCTraceTime(Trace, gc) t("root rescan", _gc_timer_cm);
5134 
5135     verify_work_stacks_empty();
5136 
5137     gch->rem_set()->prepare_for_younger_refs_iterate(false); // Not parallel.
5138     StrongRootsScope srs(1);
5139 
5140     gch->gen_process_roots(&srs,
5141                            GenCollectedHeap::OldGen,
5142                            true,  // young gen as roots
5143                            GenCollectedHeap::ScanningOption(roots_scanning_options()),
5144                            should_unload_classes(),
5145                            &mrias_cl,
5146                            NULL,
5147                            NULL); // The dirty klasses will be handled below
5148 
5149     assert(should_unload_classes()
5150            || (roots_scanning_options() & GenCollectedHeap::SO_AllCodeCache),
5151            "if we didn't scan the code cache, we have to be ready to drop nmethods with expired weak oops");
5152   }
5153 
5154   {
5155     GCTraceTime(Trace, gc) t("visit unhandled CLDs", _gc_timer_cm);
5156 
5157     verify_work_stacks_empty();
5158 
5159     // Scan all class loader data objects that might have been introduced
5160     // during concurrent marking.
5161     ResourceMark rm;
5162     GrowableArray<ClassLoaderData*>* array = ClassLoaderDataGraph::new_clds();
5163     for (int i = 0; i < array->length(); i++) {
5164       mrias_cl.do_cld_nv(array->at(i));
5165     }
5166 
5167     // We don't need to keep track of new CLDs anymore.
5168     ClassLoaderDataGraph::remember_new_clds(false);
5169 
5170     verify_work_stacks_empty();
5171   }
5172 
5173   {
5174     GCTraceTime(Trace, gc) t("dirty klass scan", _gc_timer_cm);
5175 
5176     verify_work_stacks_empty();
5177 
5178     RemarkKlassClosure remark_klass_closure(&mrias_cl);
5179     ClassLoaderDataGraph::classes_do(&remark_klass_closure);
5180 
5181     verify_work_stacks_empty();
5182   }
5183 
5184   // We might have added oops to ClassLoaderData::_handles during the
5185   // concurrent marking phase. These oops point to newly allocated objects
5186   // that are guaranteed to be kept alive. Either by the direct allocation
5187   // code, or when the young collector processes the roots. Hence,
5188   // we don't have to revisit the _handles block during the remark phase.
5189 
5190   verify_work_stacks_empty();
5191   // Restore evacuated mark words, if any, used for overflow list links
5192   restore_preserved_marks_if_any();
5193 
5194   verify_overflow_empty();


5362   workers->run_task(&enq_task);
5363 }
5364 
5365 void CMSCollector::refProcessingWork() {
5366   ResourceMark rm;
5367   HandleMark   hm;
5368 
5369   ReferenceProcessor* rp = ref_processor();
5370   assert(rp->span().equals(_span), "Spans should be equal");
5371   assert(!rp->enqueuing_is_done(), "Enqueuing should not be complete");
5372   // Process weak references.
5373   rp->setup_policy(false);
5374   verify_work_stacks_empty();
5375 
5376   CMSKeepAliveClosure cmsKeepAliveClosure(this, _span, &_markBitMap,
5377                                           &_markStack, false /* !preclean */);
5378   CMSDrainMarkingStackClosure cmsDrainMarkingStackClosure(this,
5379                                 _span, &_markBitMap, &_markStack,
5380                                 &cmsKeepAliveClosure, false /* !preclean */);
5381   {
5382     GCTraceTime(Debug, gc) t("weak refs processing", _gc_timer_cm);
5383 
5384     ReferenceProcessorStats stats;
5385     if (rp->processing_is_mt()) {
5386       // Set the degree of MT here.  If the discovery is done MT, there
5387       // may have been a different number of threads doing the discovery
5388       // and a different number of discovered lists may have Ref objects.
5389       // That is OK as long as the Reference lists are balanced (see
5390       // balance_all_queues() and balance_queues()).
5391       GenCollectedHeap* gch = GenCollectedHeap::heap();
5392       uint active_workers = ParallelGCThreads;
5393       WorkGang* workers = gch->workers();
5394       if (workers != NULL) {
5395         active_workers = workers->active_workers();
5396         // The expectation is that active_workers will have already
5397         // been set to a reasonable value.  If it has not been set,
5398         // investigate.
5399         assert(active_workers > 0, "Should have been set during scavenge");
5400       }
5401       rp->set_active_mt_degree(active_workers);
5402       CMSRefProcTaskExecutor task_executor(*this);


5404                                         &cmsKeepAliveClosure,
5405                                         &cmsDrainMarkingStackClosure,
5406                                         &task_executor,
5407                                         _gc_timer_cm);
5408     } else {
5409       stats = rp->process_discovered_references(&_is_alive_closure,
5410                                         &cmsKeepAliveClosure,
5411                                         &cmsDrainMarkingStackClosure,
5412                                         NULL,
5413                                         _gc_timer_cm);
5414     }
5415     _gc_tracer_cm->report_gc_reference_stats(stats);
5416 
5417   }
5418 
5419   // This is the point where the entire marking should have completed.
5420   verify_work_stacks_empty();
5421 
5422   if (should_unload_classes()) {
5423     {
5424       GCTraceTime(Debug, gc) t("class unloading", _gc_timer_cm);
5425 
5426       // Unload classes and purge the SystemDictionary.
5427       bool purged_class = SystemDictionary::do_unloading(&_is_alive_closure);
5428 
5429       // Unload nmethods.
5430       CodeCache::do_unloading(&_is_alive_closure, purged_class);
5431 
5432       // Prune dead klasses from subklass/sibling/implementor lists.
5433       Klass::clean_weak_klass_links(&_is_alive_closure);
5434     }
5435 
5436     {
5437       GCTraceTime(Debug, gc) t("scrub symbol table", _gc_timer_cm);
5438       // Clean up unreferenced symbols in symbol table.
5439       SymbolTable::unlink();
5440     }
5441 
5442     {
5443       GCTraceTime(Debug, gc) t("scrub string table", _gc_timer_cm);
5444       // Delete entries for dead interned strings.
5445       StringTable::unlink(&_is_alive_closure);
5446     }
5447   }
5448 
5449 
5450   // Restore any preserved marks as a result of mark stack or
5451   // work queue overflow
5452   restore_preserved_marks_if_any();  // done single-threaded for now
5453 
5454   rp->set_enqueuing_is_done(true);
5455   if (rp->processing_is_mt()) {
5456     rp->balance_all_queues();
5457     CMSRefProcTaskExecutor task_executor(*this);
5458     rp->enqueue_discovered_references(&task_executor);
5459   } else {
5460     rp->enqueue_discovered_references(NULL);
5461   }
5462   rp->verify_no_references_recorded();
5463   assert(!rp->discovery_enabled(), "should have been disabled");


5490     }
5491   }
5492 }
5493 #endif
5494 
5495 void CMSCollector::sweep() {
5496   assert(_collectorState == Sweeping, "just checking");
5497   check_correct_thread_executing();
5498   verify_work_stacks_empty();
5499   verify_overflow_empty();
5500   increment_sweep_count();
5501   TraceCMSMemoryManagerStats tms(_collectorState,GenCollectedHeap::heap()->gc_cause());
5502 
5503   _inter_sweep_timer.stop();
5504   _inter_sweep_estimate.sample(_inter_sweep_timer.seconds());
5505 
5506   assert(!_intra_sweep_timer.is_active(), "Should not be active");
5507   _intra_sweep_timer.reset();
5508   _intra_sweep_timer.start();
5509   {
5510     GCTraceCPUTime tcpu;
5511     CMSPhaseAccounting pa(this, "sweep", !PrintGCDetails);
5512     // First sweep the old gen
5513     {
5514       CMSTokenSyncWithLocks ts(true, _cmsGen->freelistLock(),
5515                                bitMapLock());
5516       sweepWork(_cmsGen);
5517     }
5518 
5519     // Update Universe::_heap_*_at_gc figures.
5520     // We need all the free list locks to make the abstract state
5521     // transition from Sweeping to Resetting. See detailed note
5522     // further below.
5523     {
5524       CMSTokenSyncWithLocks ts(true, _cmsGen->freelistLock());
5525       // Update heap occupancy information which is used as
5526       // input to soft ref clearing policy at the next gc.
5527       Universe::update_heap_info_at_gc();
5528       _collectorState = Resizing;
5529     }
5530   }


5674   } else {                                      // did not unload classes,
5675     _concurrent_cycles_since_last_unload++;     // ... increment count
5676   }
5677 }
5678 
5679 // Reset CMS data structures (for now just the marking bit map)
5680 // preparatory for the next cycle.
5681 void CMSCollector::reset_concurrent() {
5682   CMSTokenSyncWithLocks ts(true, bitMapLock());
5683 
5684   // If the state is not "Resetting", the foreground  thread
5685   // has done a collection and the resetting.
5686   if (_collectorState != Resetting) {
5687     assert(_collectorState == Idling, "The state should only change"
5688       " because the foreground collector has finished the collection");
5689     return;
5690   }
5691 
5692   // Clear the mark bitmap (no grey objects to start with)
5693   // for the next cycle.
5694   GCTraceCPUTime tcpu;
5695   CMSPhaseAccounting cmspa(this, "reset", !PrintGCDetails);
5696 
5697   HeapWord* curAddr = _markBitMap.startWord();
5698   while (curAddr < _markBitMap.endWord()) {
5699     size_t remaining  = pointer_delta(_markBitMap.endWord(), curAddr);
5700     MemRegion chunk(curAddr, MIN2(CMSBitMapYieldQuantum, remaining));
5701     _markBitMap.clear_large_range(chunk);
5702     if (ConcurrentMarkSweepThread::should_yield() &&
5703         !foregroundGCIsActive() &&
5704         CMSYield) {
5705       assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
5706              "CMS thread should hold CMS token");
5707       assert_lock_strong(bitMapLock());
5708       bitMapLock()->unlock();
5709       ConcurrentMarkSweepThread::desynchronize(true);
5710       stopTimer();
5711       if (PrintCMSStatistics != 0) {
5712         incrementYields();
5713       }
5714 


5730   // are being measured for gc overhead limits, clean the "near" flag
5731   // and count.
5732   size_policy()->reset_gc_overhead_limit_count();
5733   _collectorState = Idling;
5734 
5735   register_gc_end();
5736 }
5737 
5738 // Same as above but for STW paths
5739 void CMSCollector::reset_stw() {
5740   // already have the lock
5741   assert(_collectorState == Resetting, "just checking");
5742   assert_lock_strong(bitMapLock());
5743   GCIdMarkAndRestore gc_id_mark(_cmsThread->gc_id());
5744   _markBitMap.clear_all();
5745   _collectorState = Idling;
5746   register_gc_end();
5747 }
5748 
5749 void CMSCollector::do_CMS_operation(CMS_op_type op, GCCause::Cause gc_cause) {
5750   GCTraceCPUTime tcpu;

5751   TraceCollectorStats tcs(counters());
5752 
5753   switch (op) {
5754     case CMS_op_checkpointRootsInitial: {
5755       GCTraceTime(Info, gc) t("GC pause, initial mark");
5756       SvcGCMarker sgcm(SvcGCMarker::OTHER);
5757       checkpointRootsInitial();
5758       if (PrintGC) {
5759         _cmsGen->printOccupancy("initial-mark");
5760       }
5761       break;
5762     }
5763     case CMS_op_checkpointRootsFinal: {
5764       GCTraceTime(Info, gc) t("GC pause, remark");
5765       SvcGCMarker sgcm(SvcGCMarker::OTHER);
5766       checkpointRootsFinal();
5767       if (PrintGC) {
5768         _cmsGen->printOccupancy("remark");
5769       }
5770       break;
5771     }
5772     default:
5773       fatal("No such CMS_op");
5774   }
5775 }
5776 
5777 #ifndef PRODUCT
5778 size_t const CMSCollector::skip_header_HeapWords() {
5779   return FreeChunk::header_size();
5780 }
5781 
5782 // Try and collect here conditions that should hold when
5783 // CMS thread is exiting. The idea is that the foreground GC
5784 // thread should not be blocked if it wants to terminate


< prev index next >