src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.cpp

Print this page




 558   _permGen(permGen),
 559   _ct(ct),
 560   _ref_processor(NULL),    // will be set later
 561   _conc_workers(NULL),     // may be set later
 562   _abort_preclean(false),
 563   _start_sampling(false),
 564   _between_prologue_and_epilogue(false),
 565   _markBitMap(0, Mutex::leaf + 1, "CMS_markBitMap_lock"),
 566   _perm_gen_verify_bit_map(0, -1 /* no mutex */, "No_lock"),
 567   _modUnionTable((CardTableModRefBS::card_shift - LogHeapWordSize),
 568                  -1 /* lock-free */, "No_lock" /* dummy */),
 569   _modUnionClosure(&_modUnionTable),
 570   _modUnionClosurePar(&_modUnionTable),
 571   // Adjust my span to cover old (cms) gen and perm gen
 572   _span(cmsGen->reserved()._union(permGen->reserved())),
 573   // Construct the is_alive_closure with _span & markBitMap
 574   _is_alive_closure(_span, &_markBitMap),
 575   _restart_addr(NULL),
 576   _overflow_list(NULL),
 577   _stats(cmsGen),

 578   _eden_chunk_array(NULL),     // may be set in ctor body
 579   _eden_chunk_capacity(0),     // -- ditto --
 580   _eden_chunk_index(0),        // -- ditto --
 581   _survivor_plab_array(NULL),  // -- ditto --
 582   _survivor_chunk_array(NULL), // -- ditto --
 583   _survivor_chunk_capacity(0), // -- ditto --
 584   _survivor_chunk_index(0),    // -- ditto --
 585   _ser_pmc_preclean_ovflw(0),
 586   _ser_kac_preclean_ovflw(0),
 587   _ser_pmc_remark_ovflw(0),
 588   _par_pmc_remark_ovflw(0),
 589   _ser_kac_ovflw(0),
 590   _par_kac_ovflw(0),
 591 #ifndef PRODUCT
 592   _num_par_pushes(0),
 593 #endif
 594   _collection_count_start(0),
 595   _verifying(false),
 596   _icms_start_limit(NULL),
 597   _icms_stop_limit(NULL),


 737   assert(CGC_lock != NULL, "Where's the CGC_lock?");
 738 
 739   // Support for parallelizing young gen rescan
 740   GenCollectedHeap* gch = GenCollectedHeap::heap();
 741   _young_gen = gch->prev_gen(_cmsGen);
 742   if (gch->supports_inline_contig_alloc()) {
 743     _top_addr = gch->top_addr();
 744     _end_addr = gch->end_addr();
 745     assert(_young_gen != NULL, "no _young_gen");
 746     _eden_chunk_index = 0;
 747     _eden_chunk_capacity = (_young_gen->max_capacity()+CMSSamplingGrain)/CMSSamplingGrain;
 748     _eden_chunk_array = NEW_C_HEAP_ARRAY(HeapWord*, _eden_chunk_capacity, mtGC);
 749     if (_eden_chunk_array == NULL) {
 750       _eden_chunk_capacity = 0;
 751       warning("GC/CMS: _eden_chunk_array allocation failure");
 752     }
 753   }
 754   assert(_eden_chunk_array != NULL || _eden_chunk_capacity == 0, "Error");
 755 
 756   // Support for parallelizing survivor space rescan
 757   if (CMSParallelRemarkEnabled && CMSParallelSurvivorRemarkEnabled) {
 758     const size_t max_plab_samples =
 759       ((DefNewGeneration*)_young_gen)->max_survivor_size()/MinTLABSize;
 760 
 761     _survivor_plab_array  = NEW_C_HEAP_ARRAY(ChunkArray, ParallelGCThreads, mtGC);
 762     _survivor_chunk_array = NEW_C_HEAP_ARRAY(HeapWord*, 2*max_plab_samples, mtGC);
 763     _cursor               = NEW_C_HEAP_ARRAY(size_t, ParallelGCThreads, mtGC);
 764     if (_survivor_plab_array == NULL || _survivor_chunk_array == NULL
 765         || _cursor == NULL) {
 766       warning("Failed to allocate survivor plab/chunk array");
 767       if (_survivor_plab_array  != NULL) {
 768         FREE_C_HEAP_ARRAY(ChunkArray, _survivor_plab_array, mtGC);
 769         _survivor_plab_array = NULL;
 770       }
 771       if (_survivor_chunk_array != NULL) {
 772         FREE_C_HEAP_ARRAY(HeapWord*, _survivor_chunk_array, mtGC);
 773         _survivor_chunk_array = NULL;
 774       }
 775       if (_cursor != NULL) {
 776         FREE_C_HEAP_ARRAY(size_t, _cursor, mtGC);
 777         _cursor = NULL;


2119         // restarted from scratch;  start the cycle.
2120         _collectorState = InitialMarking;
2121       }
2122       // If first_state was not Idling, then a background GC
2123       // was in progress and has now finished.  No need to do it
2124       // again.  Leave the state as Idling.
2125       break;
2126     case Precleaning:
2127       // In the foreground case don't do the precleaning since
2128       // it is not done concurrently and there is extra work
2129       // required.
2130       _collectorState = FinalMarking;
2131   }
2132   collect_in_foreground(clear_all_soft_refs, GenCollectedHeap::heap()->gc_cause());
2133 
2134   // For a mark-sweep, compute_new_size() will be called
2135   // in the heap's do_collection() method.
2136 }
2137 
2138 

































2139 void CMSCollector::getFreelistLocks() const {
2140   // Get locks for all free lists in all generations that this
2141   // collector is responsible for
2142   _cmsGen->freelistLock()->lock_without_safepoint_check();
2143   _permGen->freelistLock()->lock_without_safepoint_check();
2144 }
2145 
2146 void CMSCollector::releaseFreelistLocks() const {
2147   // Release locks for all free lists in all generations that this
2148   // collector is responsible for
2149   _cmsGen->freelistLock()->unlock();
2150   _permGen->freelistLock()->unlock();
2151 }
2152 
2153 bool CMSCollector::haveFreelistLocks() const {
2154   // Check locks for all free lists in all generations that this
2155   // collector is responsible for
2156   assert_lock_strong(_cmsGen->freelistLock());
2157   assert_lock_strong(_permGen->freelistLock());
2158   PRODUCT_ONLY(ShouldNotReachHere());


3513   _collector->stopTimer();
3514   _wallclock.stop();
3515   if (PrintGCDetails) {
3516     gclog_or_tty->date_stamp(PrintGCDateStamps);
3517     gclog_or_tty->stamp(PrintGCTimeStamps);
3518     gclog_or_tty->print("[%s-concurrent-%s: %3.3f/%3.3f secs]",
3519                  _collector->cmsGen()->short_name(),
3520                  _phase, _collector->timerValue(), _wallclock.seconds());
3521     if (_print_cr) {
3522       gclog_or_tty->print_cr("");
3523     }
3524     if (PrintCMSStatistics != 0) {
3525       gclog_or_tty->print_cr(" (CMS-concurrent-%s yielded %d times)", _phase,
3526                     _collector->yields());
3527     }
3528   }
3529 }
3530 
3531 // CMS work
3532 

























3533 // Checkpoint the roots into this generation from outside
3534 // this generation. [Note this initial checkpoint need only
3535 // be approximate -- we'll do a catch up phase subsequently.]
3536 void CMSCollector::checkpointRootsInitial(bool asynch) {
3537   assert(_collectorState == InitialMarking, "Wrong collector state");
3538   check_correct_thread_executing();
3539   TraceCMSMemoryManagerStats tms(_collectorState,GenCollectedHeap::heap()->gc_cause());
3540 
3541   save_heap_summary();
3542   report_heap_summary(GCWhen::BeforeGC);
3543 
3544   ReferenceProcessor* rp = ref_processor();
3545   SpecializationStats::clear();
3546   assert(_restart_addr == NULL, "Control point invariant");
3547   if (asynch) {
3548     // acquire locks for subsequent manipulations
3549     MutexLockerEx x(bitMapLock(),
3550                     Mutex::_no_safepoint_check_flag);
3551     checkpointRootsInitialWork(asynch);
3552     // enable ("weak") refs discovery


3602 
3603   FalseClosure falseClosure;
3604   // In the case of a synchronous collection, we will elide the
3605   // remark step, so it's important to catch all the nmethod oops
3606   // in this step.
3607   // The final 'true' flag to gen_process_strong_roots will ensure this.
3608   // If 'async' is true, we can relax the nmethod tracing.
3609   MarkRefsIntoClosure notOlder(_span, &_markBitMap);
3610   GenCollectedHeap* gch = GenCollectedHeap::heap();
3611 
3612   verify_work_stacks_empty();
3613   verify_overflow_empty();
3614 
3615   gch->ensure_parsability(false);  // fill TLABs, but no need to retire them
3616   // Update the saved marks which may affect the root scans.
3617   gch->save_marks();
3618 
3619   // weak reference processing has not started yet.
3620   ref_processor()->set_enqueuing_is_done(false);
3621 




3622   {
3623     // This is not needed. DEBUG_ONLY(RememberKlassesChecker imx(true);)
3624     COMPILER2_PRESENT(DerivedPointerTableDeactivate dpt_deact;)


















3625     gch->rem_set()->prepare_for_younger_refs_iterate(false); // Not parallel.
3626     gch->gen_process_strong_roots(_cmsGen->level(),
3627                                   true,   // younger gens are roots
3628                                   true,   // activate StrongRootsScope
3629                                   true,   // collecting perm gen
3630                                   SharedHeap::ScanningOption(roots_scanning_options()),
3631                                   &notOlder,
3632                                   true,   // walk all of code cache if (so & SO_CodeCache)
3633                                   NULL);
3634   }
3635 
3636   // Clear mod-union table; it will be dirtied in the prologue of
3637   // CMS generation per each younger generation collection.
3638 
3639   assert(_modUnionTable.isAllClear(),
3640        "Was cleared in most recent final checkpoint phase"
3641        " or no bits are set in the gc_prologue before the start of the next "
3642        "subsequent marking phase.");
3643 
3644   // Save the end of the used_region of the constituent generations
3645   // to be used to limit the extent of sweep in each generation.
3646   save_sweep_limits();
3647   if (UseAdaptiveSizePolicy) {
3648     size_policy()->checkpoint_roots_initial_end(gch->gc_cause());
3649   }
3650   verify_overflow_empty();
3651 }
3652 
3653 bool CMSCollector::markFromRoots(bool asynch) {
3654   // we might be tempted to assert that:
3655   // assert(asynch == !SafepointSynchronize::is_at_safepoint(),


4397       _restart_addr = NULL;
4398       return false;  // indicating failure to complete marking
4399     }
4400     // Deal with stack overflow:
4401     // we restart marking from _restart_addr
4402     HeapWord* ra = _restart_addr;
4403     markFromRootsClosure.reset(ra);
4404     _restart_addr = NULL;
4405     _markBitMap.iterate(&markFromRootsClosure, ra, _span.end());
4406   }
4407   return true;
4408 }
4409 
4410 void CMSCollector::preclean() {
4411   check_correct_thread_executing();
4412   assert(Thread::current()->is_ConcurrentGC_thread(), "Wrong thread");
4413   verify_work_stacks_empty();
4414   verify_overflow_empty();
4415   _abort_preclean = false;
4416   if (CMSPrecleaningEnabled) {

4417     _eden_chunk_index = 0;

4418     size_t used = get_eden_used();
4419     size_t capacity = get_eden_capacity();
4420     // Don't start sampling unless we will get sufficiently
4421     // many samples.
4422     if (used < (capacity/(CMSScheduleRemarkSamplingRatio * 100)
4423                 * CMSScheduleRemarkEdenPenetration)) {
4424       _start_sampling = true;
4425     } else {
4426       _start_sampling = false;
4427     }
4428     TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty);
4429     CMSPhaseAccounting pa(this, "preclean", !PrintGCDetails);
4430     preclean_work(CMSPrecleanRefLists1, CMSPrecleanSurvivors1);
4431   }
4432   CMSTokenSync x(true); // is cms thread
4433   if (CMSPrecleaningEnabled) {
4434     sample_eden();
4435     _collectorState = AbortablePreclean;
4436   } else {
4437     _collectorState = FinalMarking;


4506   CMSTokenSync x(true); // is cms thread
4507   if (_collectorState != Idling) {
4508     assert(_collectorState == AbortablePreclean,
4509            "Spontaneous state transition?");
4510     _collectorState = FinalMarking;
4511   } // Else, a foreground collection completed this CMS cycle.
4512   return;
4513 }
4514 
4515 // Respond to an Eden sampling opportunity
4516 void CMSCollector::sample_eden() {
4517   // Make sure a young gc cannot sneak in between our
4518   // reading and recording of a sample.
4519   assert(Thread::current()->is_ConcurrentGC_thread(),
4520          "Only the cms thread may collect Eden samples");
4521   assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
4522          "Should collect samples while holding CMS token");
4523   if (!_start_sampling) {
4524     return;
4525   }
4526   if (_eden_chunk_array) {


4527     if (_eden_chunk_index < _eden_chunk_capacity) {
4528       _eden_chunk_array[_eden_chunk_index] = *_top_addr;   // take sample
4529       assert(_eden_chunk_array[_eden_chunk_index] <= *_end_addr,
4530              "Unexpected state of Eden");
4531       // We'd like to check that what we just sampled is an oop-start address;
4532       // however, we cannot do that here since the object may not yet have been
4533       // initialized. So we'll instead do the check when we _use_ this sample
4534       // later.
4535       if (_eden_chunk_index == 0 ||
4536           (pointer_delta(_eden_chunk_array[_eden_chunk_index],
4537                          _eden_chunk_array[_eden_chunk_index-1])
4538            >= CMSSamplingGrain)) {
4539         _eden_chunk_index++;  // commit sample
4540       }
4541     }
4542   }
4543   if ((_collectorState == AbortablePreclean) && !_abort_preclean) {
4544     size_t used = get_eden_used();
4545     size_t capacity = get_eden_capacity();
4546     assert(used <= capacity, "Unexpected state of Eden");


4983   assert_lock_strong(bitMapLock());
4984 
4985   DEBUG_ONLY(RememberKlassesChecker fmx(should_unload_classes());)
4986   if (!init_mark_was_synchronous) {
4987     // We might assume that we need not fill TLAB's when
4988     // CMSScavengeBeforeRemark is set, because we may have just done
4989     // a scavenge which would have filled all TLAB's -- and besides
4990     // Eden would be empty. This however may not always be the case --
4991     // for instance although we asked for a scavenge, it may not have
4992     // happened because of a JNI critical section. We probably need
4993     // a policy for deciding whether we can in that case wait until
4994     // the critical section releases and then do the remark following
4995     // the scavenge, and skip it here. In the absence of that policy,
4996     // or of an indication of whether the scavenge did indeed occur,
4997     // we cannot rely on TLAB's having been filled and must do
4998     // so here just in case a scavenge did not happen.
4999     gch->ensure_parsability(false);  // fill TLAB's, but no need to retire them
5000     // Update the saved marks which may affect the root scans.
5001     gch->save_marks();
5002 




5003     {
5004       COMPILER2_PRESENT(DerivedPointerTableDeactivate dpt_deact;)
5005 
5006       // Note on the role of the mod union table:
5007       // Since the marker in "markFromRoots" marks concurrently with
5008       // mutators, it is possible for some reachable objects not to have been
5009       // scanned. For instance, an only reference to an object A was
5010       // placed in object B after the marker scanned B. Unless B is rescanned,
5011       // A would be collected. Such updates to references in marked objects
5012       // are detected via the mod union table which is the set of all cards
5013       // dirtied since the first checkpoint in this GC cycle and prior to
5014       // the most recent young generation GC, minus those cleaned up by the
5015       // concurrent precleaning.
5016       if (CMSParallelRemarkEnabled && CollectedHeap::use_parallel_gc_threads()) {
5017         GCTraceTime t("Rescan (parallel) ", PrintGCDetails, false, _gc_timer_cm);
5018         do_remark_parallel();
5019       } else {
5020         GCTraceTime t("Rescan (non-parallel) ", PrintGCDetails, false,
5021                     _gc_timer_cm);
5022         do_remark_non_parallel();


5090   // Check that all the klasses have been checked
5091   assert(_revisitStack.isEmpty(), "Not all klasses revisited");
5092 
5093   if ((VerifyAfterGC || VerifyDuringGC) &&
5094       GenCollectedHeap::heap()->total_collections() >= VerifyGCStartAt) {
5095     verify_after_remark();
5096   }
5097 
5098   _gc_tracer_cm->report_object_count_after_gc(&_is_alive_closure);
5099 
5100   // Change under the freelistLocks.
5101   _collectorState = Sweeping;
5102   // Call isAllClear() under bitMapLock
5103   assert(_modUnionTable.isAllClear(), "Should be clear by end of the"
5104     " final marking");
5105   if (UseAdaptiveSizePolicy) {
5106     size_policy()->checkpoint_roots_final_end(gch->gc_cause());
5107   }
5108 }
5109 











































5110 // Parallel remark task
5111 class CMSParRemarkTask: public AbstractGangTask {
5112   CMSCollector* _collector;
5113   int           _n_workers;
5114   CompactibleFreeListSpace* _cms_space;
5115   CompactibleFreeListSpace* _perm_space;
5116 
5117   // The per-thread work queues, available here for stealing.
5118   OopTaskQueueSet*       _task_queues;
5119   ParallelTaskTerminator _term;
5120 
5121  public:
5122   // A value of 0 passed to n_workers will cause the number of
5123   // workers to be taken from the active workers in the work gang.
5124   CMSParRemarkTask(CMSCollector* collector,
5125                    CompactibleFreeListSpace* cms_space,
5126                    CompactibleFreeListSpace* perm_space,
5127                    int n_workers, FlexibleWorkGang* workers,
5128                    OopTaskQueueSet* task_queues):
5129     AbstractGangTask("Rescan roots and grey objects in parallel"),
5130     _collector(collector),
5131     _cms_space(cms_space), _perm_space(perm_space),
5132     _n_workers(n_workers),
5133     _task_queues(task_queues),
5134     _term(n_workers, task_queues) { }
5135 
5136   OopTaskQueueSet* task_queues() { return _task_queues; }
5137 
5138   OopTaskQueue* work_queue(int i) { return task_queues()->queue(i); }
5139 
5140   ParallelTaskTerminator* terminator() { return &_term; }
5141   int n_workers() { return _n_workers; }
5142 
5143   void work(uint worker_id);
5144 
5145  private:
5146   // Work method in support of parallel rescan ... of young gen spaces
5147   void do_young_space_rescan(int i, Par_MarkRefsIntoAndScanClosure* cl,
5148                              ContiguousSpace* space,
5149                              HeapWord** chunk_array, size_t chunk_top);
5150 
5151   // ... of  dirty cards in old space
5152   void do_dirty_card_rescan_tasks(CompactibleFreeListSpace* sp, int i,
5153                                   Par_MarkRefsIntoAndScanClosure* cl);
5154 
5155   // ... work stealing for the above
5156   void do_work_steal(int i, Par_MarkRefsIntoAndScanClosure* cl, int* seed);
5157 };
5158 



















5159 // work_queue(i) is passed to the closure
5160 // Par_MarkRefsIntoAndScanClosure.  The "i" parameter
5161 // also is passed to do_dirty_card_rescan_tasks() and to
5162 // do_work_steal() to select the i-th task_queue.
5163 
5164 void CMSParRemarkTask::work(uint worker_id) {
5165   elapsedTimer _timer;
5166   ResourceMark rm;
5167   HandleMark   hm;
5168 
5169   // ---------- rescan from roots --------------
5170   _timer.start();
5171   GenCollectedHeap* gch = GenCollectedHeap::heap();
5172   Par_MarkRefsIntoAndScanClosure par_mrias_cl(_collector,
5173     _collector->_span, _collector->ref_processor(),
5174     &(_collector->_markBitMap),
5175     work_queue(worker_id), &(_collector->_revisitStack));
5176 
5177   // Rescan young gen roots first since these are likely
5178   // coarsely partitioned and may, on that account, constitute
5179   // the critical path; thus, it's best to start off that
5180   // work first.
5181   // ---------- young gen roots --------------
5182   {
5183     DefNewGeneration* dng = _collector->_young_gen->as_DefNewGeneration();
5184     EdenSpace* eden_space = dng->eden();
5185     ContiguousSpace* from_space = dng->from();
5186     ContiguousSpace* to_space   = dng->to();
5187 
5188     HeapWord** eca = _collector->_eden_chunk_array;
5189     size_t     ect = _collector->_eden_chunk_index;
5190     HeapWord** sca = _collector->_survivor_chunk_array;
5191     size_t     sct = _collector->_survivor_chunk_index;
5192 
5193     assert(ect <= _collector->_eden_chunk_capacity, "out of bounds");
5194     assert(sct <= _collector->_survivor_chunk_capacity, "out of bounds");
5195 
5196     do_young_space_rescan(worker_id, &par_mrias_cl, to_space, NULL, 0);
5197     do_young_space_rescan(worker_id, &par_mrias_cl, from_space, sca, sct);
5198     do_young_space_rescan(worker_id, &par_mrias_cl, eden_space, eca, ect);
5199 
5200     _timer.stop();
5201     if (PrintCMSStatistics != 0) {
5202       gclog_or_tty->print_cr(
5203         "Finished young gen rescan work in %dth thread: %3.3f sec",
5204         worker_id, _timer.seconds());
5205     }
5206   }
5207 
5208   // ---------- remaining roots --------------
5209   _timer.reset();
5210   _timer.start();
5211   gch->gen_process_strong_roots(_collector->_cmsGen->level(),
5212                                 false,     // yg was scanned above
5213                                 false,     // this is parallel code
5214                                 true,      // collecting perm gen
5215                                 SharedHeap::ScanningOption(_collector->CMSCollector::roots_scanning_options()),
5216                                 &par_mrias_cl,
5217                                 true,   // walk all of code cache if (so & SO_CodeCache)
5218                                 NULL);
5219   assert(_collector->should_unload_classes()


5240     gclog_or_tty->print_cr(
5241       "Finished dirty card rescan work in %dth thread: %3.3f sec",
5242       worker_id, _timer.seconds());
5243   }
5244 
5245   // ---------- steal work from other threads ...
5246   // ---------- ... and drain overflow list.
5247   _timer.reset();
5248   _timer.start();
5249   do_work_steal(worker_id, &par_mrias_cl, _collector->hash_seed(worker_id));
5250   _timer.stop();
5251   if (PrintCMSStatistics != 0) {
5252     gclog_or_tty->print_cr(
5253       "Finished work stealing in %dth thread: %3.3f sec",
5254       worker_id, _timer.seconds());
5255   }
5256 }
5257 
5258 // Note that parameter "i" is not used.
5259 void
5260 CMSParRemarkTask::do_young_space_rescan(int i,
5261   Par_MarkRefsIntoAndScanClosure* cl, ContiguousSpace* space,
5262   HeapWord** chunk_array, size_t chunk_top) {
5263   // Until all tasks completed:
5264   // . claim an unclaimed task
5265   // . compute region boundaries corresponding to task claimed
5266   //   using chunk_array
5267   // . par_oop_iterate(cl) over that region
5268 
5269   ResourceMark rm;
5270   HandleMark   hm;
5271 
5272   SequentialSubTasksDone* pst = space->par_seq_tasks();
5273   assert(pst->valid(), "Uninitialized use?");
5274 
5275   uint nth_task = 0;
5276   uint n_tasks  = pst->n_tasks();
5277 
5278   HeapWord *start, *end;
5279   while (!pst->is_task_claimed(/* reference */ nth_task)) {
5280     // We claimed task # nth_task; compute its boundaries.
5281     if (chunk_top == 0) {  // no samples were taken


5437     if (task_queues()->steal(i, seed, /* reference */ obj_to_scan)) {
5438       NOT_PRODUCT(num_steals++;)
5439       assert(obj_to_scan->is_oop(), "Oops, not an oop!");
5440       assert(bm->isMarked((HeapWord*)obj_to_scan), "Stole an unmarked oop?");
5441       // Do scanning work
5442       obj_to_scan->oop_iterate(cl);
5443       // Loop around, finish this work, and try to steal some more
5444     } else if (terminator()->offer_termination()) {
5445         break;  // nirvana from the infinite cycle
5446     }
5447   }
5448   NOT_PRODUCT(
5449     if (PrintCMSStatistics != 0) {
5450       gclog_or_tty->print("\n\t(%d: stole %d oops)", i, num_steals);
5451     }
5452   )
5453   assert(work_q->size() == 0 && _collector->overflow_list_is_empty(),
5454          "Else our work is not yet done");
5455 }
5456 


























5457 // Return a thread-local PLAB recording array, as appropriate.
5458 void* CMSCollector::get_data_recorder(int thr_num) {
5459   if (_survivor_plab_array != NULL &&
5460       (CMSPLABRecordAlways ||
5461        (_collectorState > Marking && _collectorState < FinalMarking))) {
5462     assert(thr_num < (int)ParallelGCThreads, "thr_num is out of bounds");
5463     ChunkArray* ca = &_survivor_plab_array[thr_num];
5464     ca->reset();   // clear it so that fresh data is recorded
5465     return (void*) ca;
5466   } else {
5467     return NULL;
5468   }
5469 }
5470 
5471 // Reset all the thread-local PLAB recording arrays
5472 void CMSCollector::reset_survivor_plab_arrays() {
5473   for (uint i = 0; i < ParallelGCThreads; i++) {
5474     _survivor_plab_array[i].reset();
5475   }
5476 }
5477 
5478 // Merge the per-thread plab arrays into the global survivor chunk
5479 // array which will provide the partitioning of the survivor space
5480 // for CMS rescan.
5481 void CMSCollector::merge_survivor_plab_arrays(ContiguousSpace* surv,
5482                                               int no_of_gc_threads) {
5483   assert(_survivor_plab_array  != NULL, "Error");
5484   assert(_survivor_chunk_array != NULL, "Error");
5485   assert(_collectorState == FinalMarking, "Error");

5486   for (int j = 0; j < no_of_gc_threads; j++) {
5487     _cursor[j] = 0;
5488   }
5489   HeapWord* top = surv->top();
5490   size_t i;
5491   for (i = 0; i < _survivor_chunk_capacity; i++) {  // all sca entries
5492     HeapWord* min_val = top;          // Higher than any PLAB address
5493     uint      min_tid = 0;            // position of min_val this round
5494     for (int j = 0; j < no_of_gc_threads; j++) {
5495       ChunkArray* cur_sca = &_survivor_plab_array[j];
5496       if (_cursor[j] == cur_sca->end()) {
5497         continue;
5498       }
5499       assert(_cursor[j] < cur_sca->end(), "ctl pt invariant");
5500       HeapWord* cur_val = cur_sca->nth(_cursor[j]);
5501       assert(surv->used_region().contains(cur_val), "Out of bounds value");
5502       if (cur_val < min_val) {
5503         min_tid = j;
5504         min_val = cur_val;
5505       } else {


5528     for (int j = 0; j < no_of_gc_threads; j++) {
5529       assert(_cursor[j] == _survivor_plab_array[j].end(), "Ctl pt invariant");
5530       total += _cursor[j];
5531     }
5532     assert(total == _survivor_chunk_index, "Ctl Pt Invariant");
5533     // Check that the merged array is in sorted order
5534     if (total > 0) {
5535       for (size_t i = 0; i < total - 1; i++) {
5536         if (PrintCMSStatistics > 0) {
5537           gclog_or_tty->print(" (chunk" SIZE_FORMAT ":" INTPTR_FORMAT ") ",
5538                               i, _survivor_chunk_array[i]);
5539         }
5540         assert(_survivor_chunk_array[i] < _survivor_chunk_array[i+1],
5541                "Not sorted");
5542       }
5543     }
5544   #endif // ASSERT
5545 }
5546 
5547 // Set up the space's par_seq_tasks structure for work claiming
5548 // for parallel rescan of young gen.
5549 // See ParRescanTask where this is currently used.
5550 void
5551 CMSCollector::
5552 initialize_sequential_subtasks_for_young_gen_rescan(int n_threads) {
5553   assert(n_threads > 0, "Unexpected n_threads argument");
5554   DefNewGeneration* dng = (DefNewGeneration*)_young_gen;
5555 
5556   // Eden space
5557   {
5558     SequentialSubTasksDone* pst = dng->eden()->par_seq_tasks();
5559     assert(!pst->valid(), "Clobbering existing data?");
5560     // Each valid entry in [0, _eden_chunk_index) represents a task.
5561     size_t n_tasks = _eden_chunk_index + 1;
5562     assert(n_tasks == 1 || _eden_chunk_array != NULL, "Error");
5563     // Sets the condition for completion of the subtask (how many threads
5564     // need to finish in order to be done).
5565     pst->set_n_threads(n_threads);
5566     pst->set_n_tasks((int)n_tasks);
5567   }
5568 


6678     _span(span),
6679     _bitMap(bitMap)
6680 {
6681     assert(_ref_processor == NULL, "deliberately left NULL");
6682     assert(_bitMap->covers(_span), "_bitMap/_span mismatch");
6683 }
6684 
6685 void MarkRefsIntoClosure::do_oop(oop obj) {
6686   // if p points into _span, then mark corresponding bit in _markBitMap
6687   assert(obj->is_oop(), "expected an oop");
6688   HeapWord* addr = (HeapWord*)obj;
6689   if (_span.contains(addr)) {
6690     // this should be made more efficient
6691     _bitMap->mark(addr);
6692   }
6693 }
6694 
6695 void MarkRefsIntoClosure::do_oop(oop* p)       { MarkRefsIntoClosure::do_oop_work(p); }
6696 void MarkRefsIntoClosure::do_oop(narrowOop* p) { MarkRefsIntoClosure::do_oop_work(p); }
6697 






















6698 // A variant of the above, used for CMS marking verification.
6699 MarkRefsIntoVerifyClosure::MarkRefsIntoVerifyClosure(
6700   MemRegion span, CMSBitMap* verification_bm, CMSBitMap* cms_bm):
6701     _span(span),
6702     _verification_bm(verification_bm),
6703     _cms_bm(cms_bm)
6704 {
6705     assert(_ref_processor == NULL, "deliberately left NULL");
6706     assert(_verification_bm->covers(_span), "_verification_bm/_span mismatch");
6707 }
6708 
6709 void MarkRefsIntoVerifyClosure::do_oop(oop obj) {
6710   // if p points into _span, then mark corresponding bit in _markBitMap
6711   assert(obj->is_oop(), "expected an oop");
6712   HeapWord* addr = (HeapWord*)obj;
6713   if (_span.contains(addr)) {
6714     _verification_bm->mark(addr);
6715     if (!_cms_bm->isMarked(addr)) {
6716       oop(addr)->print();
6717       gclog_or_tty->print_cr(" (" INTPTR_FORMAT " should have been marked)", addr);


9343       if (UsePerfData) {
9344         _space_counters->update_capacity();
9345         _gen_counters->update_all();
9346       }
9347 
9348       if (Verbose && PrintGCDetails) {
9349         size_t new_mem_size = _virtual_space.committed_size();
9350         size_t old_mem_size = new_mem_size + bytes;
9351         gclog_or_tty->print_cr("Shrinking %s from %ldK by %ldK to %ldK",
9352                       name(), old_mem_size/K, bytes/K, new_mem_size/K);
9353       }
9354     }
9355 
9356     assert(_cmsSpace->unallocated_block() <= _cmsSpace->end(),
9357       "Inconsistency at end of space");
9358     assert(chunk_at_end->end() == _cmsSpace->end(),
9359       "Shrinking is inconsistent");
9360     return;
9361   }
9362 }
9363 
9364 // Transfer some number of overflown objects to usual marking
9365 // stack. Return true if some objects were transferred.
9366 bool MarkRefsIntoAndScanClosure::take_from_overflow_list() {
9367   size_t num = MIN2((size_t)(_mark_stack->capacity() - _mark_stack->length())/4,
9368                     (size_t)ParGCDesiredObjsFromOverflowList);
9369 
9370   bool res = _collector->take_from_overflow_list(num, _mark_stack);
9371   assert(_collector->overflow_list_is_empty() || res,
9372          "If list is not empty, we should have taken something");
9373   assert(!res || !_mark_stack->isEmpty(),
9374          "If we took something, it should now be on our stack");
9375   return res;
9376 }
9377 
9378 size_t MarkDeadObjectsClosure::do_blk(HeapWord* addr) {
9379   size_t res = _sp->block_size_no_stall(addr, _collector);
9380   if (_sp->block_is_obj(addr)) {
9381     if (_live_bit_map->isMarked(addr)) {
9382       // It can't have been dead in a previous cycle
9383       guarantee(!_dead_bit_map->isMarked(addr), "No resurrection!");


9415                  false /* recordGCEndTime */,
9416                  false /* countCollection */  );
9417       break;
9418 
9419     case CMSCollector::Sweeping:
9420       initialize(true  /* fullGC */ ,
9421                  cause /* cause of the GC */,
9422                  false /* recordGCBeginTime */,
9423                  false /* recordPreGCUsage */,
9424                  true  /* recordPeakUsage */,
9425                  true  /* recordPostGCusage */,
9426                  false /* recordAccumulatedGCTime */,
9427                  true  /* recordGCEndTime */,
9428                  true  /* countCollection */  );
9429       break;
9430 
9431     default:
9432       ShouldNotReachHere();
9433   }
9434 }
9435 


 558   _permGen(permGen),
 559   _ct(ct),
 560   _ref_processor(NULL),    // will be set later
 561   _conc_workers(NULL),     // may be set later
 562   _abort_preclean(false),
 563   _start_sampling(false),
 564   _between_prologue_and_epilogue(false),
 565   _markBitMap(0, Mutex::leaf + 1, "CMS_markBitMap_lock"),
 566   _perm_gen_verify_bit_map(0, -1 /* no mutex */, "No_lock"),
 567   _modUnionTable((CardTableModRefBS::card_shift - LogHeapWordSize),
 568                  -1 /* lock-free */, "No_lock" /* dummy */),
 569   _modUnionClosure(&_modUnionTable),
 570   _modUnionClosurePar(&_modUnionTable),
 571   // Adjust my span to cover old (cms) gen and perm gen
 572   _span(cmsGen->reserved()._union(permGen->reserved())),
 573   // Construct the is_alive_closure with _span & markBitMap
 574   _is_alive_closure(_span, &_markBitMap),
 575   _restart_addr(NULL),
 576   _overflow_list(NULL),
 577   _stats(cmsGen),
 578   _eden_chunk_lock(new Mutex(Mutex::leaf + 1, "CMS_eden_chunk_lock", true)),
 579   _eden_chunk_array(NULL),     // may be set in ctor body
 580   _eden_chunk_capacity(0),     // -- ditto --
 581   _eden_chunk_index(0),        // -- ditto --
 582   _survivor_plab_array(NULL),  // -- ditto --
 583   _survivor_chunk_array(NULL), // -- ditto --
 584   _survivor_chunk_capacity(0), // -- ditto --
 585   _survivor_chunk_index(0),    // -- ditto --
 586   _ser_pmc_preclean_ovflw(0),
 587   _ser_kac_preclean_ovflw(0),
 588   _ser_pmc_remark_ovflw(0),
 589   _par_pmc_remark_ovflw(0),
 590   _ser_kac_ovflw(0),
 591   _par_kac_ovflw(0),
 592 #ifndef PRODUCT
 593   _num_par_pushes(0),
 594 #endif
 595   _collection_count_start(0),
 596   _verifying(false),
 597   _icms_start_limit(NULL),
 598   _icms_stop_limit(NULL),


 738   assert(CGC_lock != NULL, "Where's the CGC_lock?");
 739 
 740   // Support for parallelizing young gen rescan
 741   GenCollectedHeap* gch = GenCollectedHeap::heap();
 742   _young_gen = gch->prev_gen(_cmsGen);
 743   if (gch->supports_inline_contig_alloc()) {
 744     _top_addr = gch->top_addr();
 745     _end_addr = gch->end_addr();
 746     assert(_young_gen != NULL, "no _young_gen");
 747     _eden_chunk_index = 0;
 748     _eden_chunk_capacity = (_young_gen->max_capacity()+CMSSamplingGrain)/CMSSamplingGrain;
 749     _eden_chunk_array = NEW_C_HEAP_ARRAY(HeapWord*, _eden_chunk_capacity, mtGC);
 750     if (_eden_chunk_array == NULL) {
 751       _eden_chunk_capacity = 0;
 752       warning("GC/CMS: _eden_chunk_array allocation failure");
 753     }
 754   }
 755   assert(_eden_chunk_array != NULL || _eden_chunk_capacity == 0, "Error");
 756 
 757   // Support for parallelizing survivor space rescan
 758   if ((CMSParallelRemarkEnabled && CMSParallelSurvivorRemarkEnabled) || CMSParallelInitialMarkEnabled) {
 759     const size_t max_plab_samples =
 760       ((DefNewGeneration*)_young_gen)->max_survivor_size()/MinTLABSize;
 761 
 762     _survivor_plab_array  = NEW_C_HEAP_ARRAY(ChunkArray, ParallelGCThreads, mtGC);
 763     _survivor_chunk_array = NEW_C_HEAP_ARRAY(HeapWord*, 2*max_plab_samples, mtGC);
 764     _cursor               = NEW_C_HEAP_ARRAY(size_t, ParallelGCThreads, mtGC);
 765     if (_survivor_plab_array == NULL || _survivor_chunk_array == NULL
 766         || _cursor == NULL) {
 767       warning("Failed to allocate survivor plab/chunk array");
 768       if (_survivor_plab_array  != NULL) {
 769         FREE_C_HEAP_ARRAY(ChunkArray, _survivor_plab_array, mtGC);
 770         _survivor_plab_array = NULL;
 771       }
 772       if (_survivor_chunk_array != NULL) {
 773         FREE_C_HEAP_ARRAY(HeapWord*, _survivor_chunk_array, mtGC);
 774         _survivor_chunk_array = NULL;
 775       }
 776       if (_cursor != NULL) {
 777         FREE_C_HEAP_ARRAY(size_t, _cursor, mtGC);
 778         _cursor = NULL;


2120         // restarted from scratch;  start the cycle.
2121         _collectorState = InitialMarking;
2122       }
2123       // If first_state was not Idling, then a background GC
2124       // was in progress and has now finished.  No need to do it
2125       // again.  Leave the state as Idling.
2126       break;
2127     case Precleaning:
2128       // In the foreground case don't do the precleaning since
2129       // it is not done concurrently and there is extra work
2130       // required.
2131       _collectorState = FinalMarking;
2132   }
2133   collect_in_foreground(clear_all_soft_refs, GenCollectedHeap::heap()->gc_cause());
2134 
2135   // For a mark-sweep, compute_new_size() will be called
2136   // in the heap's do_collection() method.
2137 }
2138 
2139 
2140 void CMSCollector::print_eden_and_survivor_chunk_arrays() {
2141   DefNewGeneration* dng = _young_gen->as_DefNewGeneration();
2142   EdenSpace* eden_space = dng->eden();
2143   ContiguousSpace* from_space = dng->from();
2144   ContiguousSpace* to_space   = dng->to();
2145   // Eden
2146   if (_eden_chunk_array != NULL) {
2147     gclog_or_tty->print_cr("eden " PTR_FORMAT "-" PTR_FORMAT "-" PTR_FORMAT "(" SIZE_FORMAT ")",
2148                            eden_space->bottom(), eden_space->top(),
2149                            eden_space->end(), eden_space->capacity());
2150     gclog_or_tty->print_cr("_eden_chunk_index=" SIZE_FORMAT ", "
2151                            "_eden_chunk_capacity=" SIZE_FORMAT,
2152                            _eden_chunk_index, _eden_chunk_capacity);
2153     for (size_t i = 0; i < _eden_chunk_index; i++) {
2154       gclog_or_tty->print_cr("_eden_chunk_array[" SIZE_FORMAT "]=" PTR_FORMAT,
2155                              i, _eden_chunk_array[i]);
2156     }
2157   }
2158   // Survivor
2159   if (_survivor_chunk_array != NULL) {
2160     gclog_or_tty->print_cr("survivor " PTR_FORMAT "-" PTR_FORMAT "-" PTR_FORMAT "(" SIZE_FORMAT ")",
2161                            from_space->bottom(), from_space->top(),
2162                            from_space->end(), from_space->capacity());
2163     gclog_or_tty->print_cr("_survivor_chunk_index=" SIZE_FORMAT ", "
2164                            "_survivor_chunk_capacity=" SIZE_FORMAT,
2165                            _survivor_chunk_index, _survivor_chunk_capacity);
2166     for (size_t i = 0; i < _survivor_chunk_index; i++) {
2167       gclog_or_tty->print_cr("_survivor_chunk_array[" SIZE_FORMAT "]=" PTR_FORMAT,
2168                              i, _survivor_chunk_array[i]);
2169     }
2170   }
2171 }
2172 
2173 void CMSCollector::getFreelistLocks() const {
2174   // Get locks for all free lists in all generations that this
2175   // collector is responsible for
2176   _cmsGen->freelistLock()->lock_without_safepoint_check();
2177   _permGen->freelistLock()->lock_without_safepoint_check();
2178 }
2179 
2180 void CMSCollector::releaseFreelistLocks() const {
2181   // Release locks for all free lists in all generations that this
2182   // collector is responsible for
2183   _cmsGen->freelistLock()->unlock();
2184   _permGen->freelistLock()->unlock();
2185 }
2186 
2187 bool CMSCollector::haveFreelistLocks() const {
2188   // Check locks for all free lists in all generations that this
2189   // collector is responsible for
2190   assert_lock_strong(_cmsGen->freelistLock());
2191   assert_lock_strong(_permGen->freelistLock());
2192   PRODUCT_ONLY(ShouldNotReachHere());


3547   _collector->stopTimer();
3548   _wallclock.stop();
3549   if (PrintGCDetails) {
3550     gclog_or_tty->date_stamp(PrintGCDateStamps);
3551     gclog_or_tty->stamp(PrintGCTimeStamps);
3552     gclog_or_tty->print("[%s-concurrent-%s: %3.3f/%3.3f secs]",
3553                  _collector->cmsGen()->short_name(),
3554                  _phase, _collector->timerValue(), _wallclock.seconds());
3555     if (_print_cr) {
3556       gclog_or_tty->print_cr("");
3557     }
3558     if (PrintCMSStatistics != 0) {
3559       gclog_or_tty->print_cr(" (CMS-concurrent-%s yielded %d times)", _phase,
3560                     _collector->yields());
3561     }
3562   }
3563 }
3564 
3565 // CMS work
3566 
3567 // The common parts of CMSParInitialMarkTask and CMSParRemarkTask.
3568 class CMSParMarkTask : public AbstractGangTask {
3569  protected:
3570   CMSCollector*     _collector;
3571   int               _n_workers;
3572   CMSParMarkTask(const char* name, CMSCollector* collector, int n_workers) :
3573       AbstractGangTask(name),
3574       _collector(collector),
3575       _n_workers(n_workers) {}
3576   // Work method in support of parallel rescan ... of young gen spaces
3577   void do_young_space_rescan(uint worker_id, OopsInGenClosure* cl,
3578                              ContiguousSpace* space,
3579                              HeapWord** chunk_array, size_t chunk_top);
3580   void work_on_young_gen_roots(uint worker_id, OopsInGenClosure* cl);
3581 };
3582 
3583 // Parallel initial mark task
3584 class CMSParInitialMarkTask: public CMSParMarkTask {
3585  public:
3586   CMSParInitialMarkTask(CMSCollector* collector, int n_workers) :
3587       CMSParMarkTask("Scan roots and young gen for initial mark in parallel",
3588                      collector, n_workers) {}
3589   void work(uint worker_id);
3590 };
3591 
3592 // Checkpoint the roots into this generation from outside
3593 // this generation. [Note this initial checkpoint need only
3594 // be approximate -- we'll do a catch up phase subsequently.]
3595 void CMSCollector::checkpointRootsInitial(bool asynch) {
3596   assert(_collectorState == InitialMarking, "Wrong collector state");
3597   check_correct_thread_executing();
3598   TraceCMSMemoryManagerStats tms(_collectorState,GenCollectedHeap::heap()->gc_cause());
3599 
3600   save_heap_summary();
3601   report_heap_summary(GCWhen::BeforeGC);
3602 
3603   ReferenceProcessor* rp = ref_processor();
3604   SpecializationStats::clear();
3605   assert(_restart_addr == NULL, "Control point invariant");
3606   if (asynch) {
3607     // acquire locks for subsequent manipulations
3608     MutexLockerEx x(bitMapLock(),
3609                     Mutex::_no_safepoint_check_flag);
3610     checkpointRootsInitialWork(asynch);
3611     // enable ("weak") refs discovery


3661 
3662   FalseClosure falseClosure;
3663   // In the case of a synchronous collection, we will elide the
3664   // remark step, so it's important to catch all the nmethod oops
3665   // in this step.
3666   // The final 'true' flag to gen_process_strong_roots will ensure this.
3667   // If 'async' is true, we can relax the nmethod tracing.
3668   MarkRefsIntoClosure notOlder(_span, &_markBitMap);
3669   GenCollectedHeap* gch = GenCollectedHeap::heap();
3670 
3671   verify_work_stacks_empty();
3672   verify_overflow_empty();
3673 
3674   gch->ensure_parsability(false);  // fill TLABs, but no need to retire them
3675   // Update the saved marks which may affect the root scans.
3676   gch->save_marks();
3677 
3678   // weak reference processing has not started yet.
3679   ref_processor()->set_enqueuing_is_done(false);
3680 
3681   if (CMSPrintEdenSurvivorChunks) {
3682     print_eden_and_survivor_chunk_arrays();
3683   }
3684 
3685   {
3686     // This is not needed. DEBUG_ONLY(RememberKlassesChecker imx(true);)
3687     COMPILER2_PRESENT(DerivedPointerTableDeactivate dpt_deact;)
3688     if (CMSParallelInitialMarkEnabled && CollectedHeap::use_parallel_gc_threads()) {
3689       // The parallel version.
3690       FlexibleWorkGang* workers = gch->workers();
3691       assert(workers != NULL, "Need parallel worker threads.");
3692       int n_workers = workers->active_workers();
3693       CMSParInitialMarkTask tsk(this, n_workers);
3694       gch->set_par_threads(n_workers);
3695       initialize_sequential_subtasks_for_young_gen_rescan(n_workers);
3696       if (n_workers > 1) {
3697         GenCollectedHeap::StrongRootsScope srs(gch);
3698         workers->run_task(&tsk);
3699       } else {
3700         GenCollectedHeap::StrongRootsScope srs(gch);
3701         tsk.work(0);
3702       }
3703       gch->set_par_threads(0);
3704     } else {
3705       // The serial version.
3706       gch->rem_set()->prepare_for_younger_refs_iterate(false); // Not parallel.
3707       gch->gen_process_strong_roots(_cmsGen->level(),
3708                                     true,   // younger gens are roots
3709                                     true,   // activate StrongRootsScope
3710                                     true,   // collecting perm gen
3711                                     SharedHeap::ScanningOption(roots_scanning_options()),
3712                                     &notOlder,
3713                                     true,   // walk all of code cache if (so & SO_CodeCache)
3714                                     NULL);
3715     }
3716   }
3717   // Clear mod-union table; it will be dirtied in the prologue of
3718   // CMS generation per each younger generation collection.
3719 
3720   assert(_modUnionTable.isAllClear(),
3721        "Was cleared in most recent final checkpoint phase"
3722        " or no bits are set in the gc_prologue before the start of the next "
3723        "subsequent marking phase.");
3724 
3725   // Save the end of the used_region of the constituent generations
3726   // to be used to limit the extent of sweep in each generation.
3727   save_sweep_limits();
3728   if (UseAdaptiveSizePolicy) {
3729     size_policy()->checkpoint_roots_initial_end(gch->gc_cause());
3730   }
3731   verify_overflow_empty();
3732 }
3733 
3734 bool CMSCollector::markFromRoots(bool asynch) {
3735   // we might be tempted to assert that:
3736   // assert(asynch == !SafepointSynchronize::is_at_safepoint(),


4478       _restart_addr = NULL;
4479       return false;  // indicating failure to complete marking
4480     }
4481     // Deal with stack overflow:
4482     // we restart marking from _restart_addr
4483     HeapWord* ra = _restart_addr;
4484     markFromRootsClosure.reset(ra);
4485     _restart_addr = NULL;
4486     _markBitMap.iterate(&markFromRootsClosure, ra, _span.end());
4487   }
4488   return true;
4489 }
4490 
4491 void CMSCollector::preclean() {
4492   check_correct_thread_executing();
4493   assert(Thread::current()->is_ConcurrentGC_thread(), "Wrong thread");
4494   verify_work_stacks_empty();
4495   verify_overflow_empty();
4496   _abort_preclean = false;
4497   if (CMSPrecleaningEnabled) {
4498     if (!CMSEdenChunksRecordAlways) {
4499       _eden_chunk_index = 0;
4500     }
4501     size_t used = get_eden_used();
4502     size_t capacity = get_eden_capacity();
4503     // Don't start sampling unless we will get sufficiently
4504     // many samples.
4505     if (used < (capacity/(CMSScheduleRemarkSamplingRatio * 100)
4506                 * CMSScheduleRemarkEdenPenetration)) {
4507       _start_sampling = true;
4508     } else {
4509       _start_sampling = false;
4510     }
4511     TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty);
4512     CMSPhaseAccounting pa(this, "preclean", !PrintGCDetails);
4513     preclean_work(CMSPrecleanRefLists1, CMSPrecleanSurvivors1);
4514   }
4515   CMSTokenSync x(true); // is cms thread
4516   if (CMSPrecleaningEnabled) {
4517     sample_eden();
4518     _collectorState = AbortablePreclean;
4519   } else {
4520     _collectorState = FinalMarking;


4589   CMSTokenSync x(true); // is cms thread
4590   if (_collectorState != Idling) {
4591     assert(_collectorState == AbortablePreclean,
4592            "Spontaneous state transition?");
4593     _collectorState = FinalMarking;
4594   } // Else, a foreground collection completed this CMS cycle.
4595   return;
4596 }
4597 
4598 // Respond to an Eden sampling opportunity
4599 void CMSCollector::sample_eden() {
4600   // Make sure a young gc cannot sneak in between our
4601   // reading and recording of a sample.
4602   assert(Thread::current()->is_ConcurrentGC_thread(),
4603          "Only the cms thread may collect Eden samples");
4604   assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
4605          "Should collect samples while holding CMS token");
4606   if (!_start_sampling) {
4607     return;
4608   }
4609   // When CMSEdenChunksRecordAlways is true, the eden chunk array
4610   // is populated by the young generation.
4611   if (_eden_chunk_array != NULL && !CMSEdenChunksRecordAlways) {
4612     if (_eden_chunk_index < _eden_chunk_capacity) {
4613       _eden_chunk_array[_eden_chunk_index] = *_top_addr;   // take sample
4614       assert(_eden_chunk_array[_eden_chunk_index] <= *_end_addr,
4615              "Unexpected state of Eden");
4616       // We'd like to check that what we just sampled is an oop-start address;
4617       // however, we cannot do that here since the object may not yet have been
4618       // initialized. So we'll instead do the check when we _use_ this sample
4619       // later.
4620       if (_eden_chunk_index == 0 ||
4621           (pointer_delta(_eden_chunk_array[_eden_chunk_index],
4622                          _eden_chunk_array[_eden_chunk_index-1])
4623            >= CMSSamplingGrain)) {
4624         _eden_chunk_index++;  // commit sample
4625       }
4626     }
4627   }
4628   if ((_collectorState == AbortablePreclean) && !_abort_preclean) {
4629     size_t used = get_eden_used();
4630     size_t capacity = get_eden_capacity();
4631     assert(used <= capacity, "Unexpected state of Eden");


5068   assert_lock_strong(bitMapLock());
5069 
5070   DEBUG_ONLY(RememberKlassesChecker fmx(should_unload_classes());)
5071   if (!init_mark_was_synchronous) {
5072     // We might assume that we need not fill TLAB's when
5073     // CMSScavengeBeforeRemark is set, because we may have just done
5074     // a scavenge which would have filled all TLAB's -- and besides
5075     // Eden would be empty. This however may not always be the case --
5076     // for instance although we asked for a scavenge, it may not have
5077     // happened because of a JNI critical section. We probably need
5078     // a policy for deciding whether we can in that case wait until
5079     // the critical section releases and then do the remark following
5080     // the scavenge, and skip it here. In the absence of that policy,
5081     // or of an indication of whether the scavenge did indeed occur,
5082     // we cannot rely on TLAB's having been filled and must do
5083     // so here just in case a scavenge did not happen.
5084     gch->ensure_parsability(false);  // fill TLAB's, but no need to retire them
5085     // Update the saved marks which may affect the root scans.
5086     gch->save_marks();
5087 
5088     if (CMSPrintEdenSurvivorChunks) {
5089       print_eden_and_survivor_chunk_arrays();
5090     }
5091 
5092     {
5093       COMPILER2_PRESENT(DerivedPointerTableDeactivate dpt_deact;)
5094 
5095       // Note on the role of the mod union table:
5096       // Since the marker in "markFromRoots" marks concurrently with
5097       // mutators, it is possible for some reachable objects not to have been
5098       // scanned. For instance, an only reference to an object A was
5099       // placed in object B after the marker scanned B. Unless B is rescanned,
5100       // A would be collected. Such updates to references in marked objects
5101       // are detected via the mod union table which is the set of all cards
5102       // dirtied since the first checkpoint in this GC cycle and prior to
5103       // the most recent young generation GC, minus those cleaned up by the
5104       // concurrent precleaning.
5105       if (CMSParallelRemarkEnabled && CollectedHeap::use_parallel_gc_threads()) {
5106         GCTraceTime t("Rescan (parallel) ", PrintGCDetails, false, _gc_timer_cm);
5107         do_remark_parallel();
5108       } else {
5109         GCTraceTime t("Rescan (non-parallel) ", PrintGCDetails, false,
5110                     _gc_timer_cm);
5111         do_remark_non_parallel();


5179   // Check that all the klasses have been checked
5180   assert(_revisitStack.isEmpty(), "Not all klasses revisited");
5181 
5182   if ((VerifyAfterGC || VerifyDuringGC) &&
5183       GenCollectedHeap::heap()->total_collections() >= VerifyGCStartAt) {
5184     verify_after_remark();
5185   }
5186 
5187   _gc_tracer_cm->report_object_count_after_gc(&_is_alive_closure);
5188 
5189   // Change under the freelistLocks.
5190   _collectorState = Sweeping;
5191   // Call isAllClear() under bitMapLock
5192   assert(_modUnionTable.isAllClear(), "Should be clear by end of the"
5193     " final marking");
5194   if (UseAdaptiveSizePolicy) {
5195     size_policy()->checkpoint_roots_final_end(gch->gc_cause());
5196   }
5197 }
5198 
5199 void CMSParInitialMarkTask::work(uint worker_id) {
5200   elapsedTimer _timer;
5201   ResourceMark rm;
5202   HandleMark   hm;
5203 
5204   // ---------- scan from roots --------------
5205   _timer.start();
5206   GenCollectedHeap* gch = GenCollectedHeap::heap();
5207   Par_MarkRefsIntoClosure par_mri_cl(_collector->_span, &(_collector->_markBitMap));
5208 
5209   // ---------- young gen roots --------------
5210   {
5211     work_on_young_gen_roots(worker_id, &par_mri_cl);
5212     _timer.stop();
5213     if (PrintCMSStatistics != 0) {
5214       gclog_or_tty->print_cr(
5215         "Finished young gen initial mark scan work in %dth thread: %3.3f sec",
5216         worker_id, _timer.seconds());
5217     }
5218   }
5219 
5220   // ---------- remaining roots --------------
5221   _timer.reset();
5222   _timer.start();
5223   gch->gen_process_strong_roots(_collector->_cmsGen->level(),
5224                                 false,     // yg was scanned above
5225                                 false,     // this is parallel code
5226                                 true,      // collecting perm gen
5227                                 SharedHeap::ScanningOption(_collector->CMSCollector::roots_scanning_options()),
5228                                 &par_mri_cl,
5229                                 true,   // walk all of code cache if (so & SO_CodeCache)
5230                                 NULL);
5231   assert(_collector->should_unload_classes()
5232          || (_collector->CMSCollector::roots_scanning_options() & SharedHeap::SO_CodeCache),
5233          "if we didn't scan the code cache, we have to be ready to drop nmethods with expired weak oops");
5234   _timer.stop();
5235   if (PrintCMSStatistics != 0) {
5236     gclog_or_tty->print_cr(
5237       "Finished remaining root initial mark scan work in %dth thread: %3.3f sec",
5238       worker_id, _timer.seconds());
5239   }
5240 }
5241 
5242 // Parallel remark task
5243 class CMSParRemarkTask: public CMSParMarkTask {


5244   CompactibleFreeListSpace* _cms_space;
5245   CompactibleFreeListSpace* _perm_space;
5246 
5247   // The per-thread work queues, available here for stealing.
5248   OopTaskQueueSet*       _task_queues;
5249   ParallelTaskTerminator _term;
5250 
5251  public:
5252   // A value of 0 passed to n_workers will cause the number of
5253   // workers to be taken from the active workers in the work gang.
5254   CMSParRemarkTask(CMSCollector* collector,
5255                    CompactibleFreeListSpace* cms_space,
5256                    CompactibleFreeListSpace* perm_space,
5257                    int n_workers, FlexibleWorkGang* workers,
5258                    OopTaskQueueSet* task_queues):
5259     CMSParMarkTask("Rescan roots and grey objects in parallel",
5260                    collector, n_workers), 
5261     _cms_space(cms_space), _perm_space(perm_space),

5262     _task_queues(task_queues),
5263     _term(n_workers, task_queues) { }
5264 
5265   OopTaskQueueSet* task_queues() { return _task_queues; }
5266 
5267   OopTaskQueue* work_queue(int i) { return task_queues()->queue(i); }
5268 
5269   ParallelTaskTerminator* terminator() { return &_term; }
5270   int n_workers() { return _n_workers; }
5271 
5272   void work(uint worker_id);
5273 
5274  private:





5275   // ... of  dirty cards in old space
5276   void do_dirty_card_rescan_tasks(CompactibleFreeListSpace* sp, int i,
5277                                   Par_MarkRefsIntoAndScanClosure* cl);
5278 
5279   // ... work stealing for the above
5280   void do_work_steal(int i, Par_MarkRefsIntoAndScanClosure* cl, int* seed);
5281 };
5282 
5283 void CMSParMarkTask::work_on_young_gen_roots(uint worker_id, OopsInGenClosure* cl) {
5284   DefNewGeneration* dng = _collector->_young_gen->as_DefNewGeneration();
5285   EdenSpace* eden_space = dng->eden();
5286   ContiguousSpace* from_space = dng->from();
5287   ContiguousSpace* to_space   = dng->to();
5288 
5289   HeapWord** eca = _collector->_eden_chunk_array;
5290   size_t     ect = _collector->_eden_chunk_index;
5291   HeapWord** sca = _collector->_survivor_chunk_array;
5292   size_t     sct = _collector->_survivor_chunk_index;
5293 
5294   assert(ect <= _collector->_eden_chunk_capacity, "out of bounds");
5295   assert(sct <= _collector->_survivor_chunk_capacity, "out of bounds");
5296 
5297   do_young_space_rescan(worker_id, cl, to_space, NULL, 0);
5298   do_young_space_rescan(worker_id, cl, from_space, sca, sct);
5299   do_young_space_rescan(worker_id, cl, eden_space, eca, ect);
5300 }
5301 
5302 // work_queue(i) is passed to the closure
5303 // Par_MarkRefsIntoAndScanClosure.  The "i" parameter
5304 // also is passed to do_dirty_card_rescan_tasks() and to
5305 // do_work_steal() to select the i-th task_queue.
5306 
5307 void CMSParRemarkTask::work(uint worker_id) {
5308   elapsedTimer _timer;
5309   ResourceMark rm;
5310   HandleMark   hm;
5311 
5312   // ---------- rescan from roots --------------
5313   _timer.start();
5314   GenCollectedHeap* gch = GenCollectedHeap::heap();
5315   Par_MarkRefsIntoAndScanClosure par_mrias_cl(_collector,
5316     _collector->_span, _collector->ref_processor(),
5317     &(_collector->_markBitMap),
5318     work_queue(worker_id), &(_collector->_revisitStack));
5319 
5320   // Rescan young gen roots first since these are likely
5321   // coarsely partitioned and may, on that account, constitute
5322   // the critical path; thus, it's best to start off that
5323   // work first.
5324   // ---------- young gen roots --------------
5325   {
5326     work_on_young_gen_roots(worker_id, &par_mrias_cl);
















5327     _timer.stop();
5328     if (PrintCMSStatistics != 0) {
5329       gclog_or_tty->print_cr(
5330         "Finished young gen rescan work in %dth thread: %3.3f sec",
5331         worker_id, _timer.seconds());
5332     }
5333   }
5334 
5335   // ---------- remaining roots --------------
5336   _timer.reset();
5337   _timer.start();
5338   gch->gen_process_strong_roots(_collector->_cmsGen->level(),
5339                                 false,     // yg was scanned above
5340                                 false,     // this is parallel code
5341                                 true,      // collecting perm gen
5342                                 SharedHeap::ScanningOption(_collector->CMSCollector::roots_scanning_options()),
5343                                 &par_mrias_cl,
5344                                 true,   // walk all of code cache if (so & SO_CodeCache)
5345                                 NULL);
5346   assert(_collector->should_unload_classes()


5367     gclog_or_tty->print_cr(
5368       "Finished dirty card rescan work in %dth thread: %3.3f sec",
5369       worker_id, _timer.seconds());
5370   }
5371 
5372   // ---------- steal work from other threads ...
5373   // ---------- ... and drain overflow list.
5374   _timer.reset();
5375   _timer.start();
5376   do_work_steal(worker_id, &par_mrias_cl, _collector->hash_seed(worker_id));
5377   _timer.stop();
5378   if (PrintCMSStatistics != 0) {
5379     gclog_or_tty->print_cr(
5380       "Finished work stealing in %dth thread: %3.3f sec",
5381       worker_id, _timer.seconds());
5382   }
5383 }
5384 
5385 // Note that parameter "i" is not used.
5386 void
5387 CMSParMarkTask::do_young_space_rescan(uint worker_id,
5388   OopsInGenClosure* cl, ContiguousSpace* space,
5389   HeapWord** chunk_array, size_t chunk_top) {
5390   // Until all tasks completed:
5391   // . claim an unclaimed task
5392   // . compute region boundaries corresponding to task claimed
5393   //   using chunk_array
5394   // . par_oop_iterate(cl) over that region
5395 
5396   ResourceMark rm;
5397   HandleMark   hm;
5398 
5399   SequentialSubTasksDone* pst = space->par_seq_tasks();
5400   assert(pst->valid(), "Uninitialized use?");
5401 
5402   uint nth_task = 0;
5403   uint n_tasks  = pst->n_tasks();
5404 
5405   HeapWord *start, *end;
5406   while (!pst->is_task_claimed(/* reference */ nth_task)) {
5407     // We claimed task # nth_task; compute its boundaries.
5408     if (chunk_top == 0) {  // no samples were taken


5564     if (task_queues()->steal(i, seed, /* reference */ obj_to_scan)) {
5565       NOT_PRODUCT(num_steals++;)
5566       assert(obj_to_scan->is_oop(), "Oops, not an oop!");
5567       assert(bm->isMarked((HeapWord*)obj_to_scan), "Stole an unmarked oop?");
5568       // Do scanning work
5569       obj_to_scan->oop_iterate(cl);
5570       // Loop around, finish this work, and try to steal some more
5571     } else if (terminator()->offer_termination()) {
5572         break;  // nirvana from the infinite cycle
5573     }
5574   }
5575   NOT_PRODUCT(
5576     if (PrintCMSStatistics != 0) {
5577       gclog_or_tty->print("\n\t(%d: stole %d oops)", i, num_steals);
5578     }
5579   )
5580   assert(work_q->size() == 0 && _collector->overflow_list_is_empty(),
5581          "Else our work is not yet done");
5582 }
5583 
5584 // Record object boundaries in _eden_chunk_array by sampling the eden
5585 // top in the slow-path eden object allocation code path and record
5586 // the boundaries, if CMSEdenChunksRecordAlways is true. If
5587 // CMSEdenChunksRecordAlways is false, we use the other asynchronous
5588 // sampling in sample_eden() that activates during the part of the
5589 // preclean phase.
5590 void CMSCollector::sample_eden_chunk() {
5591   if (CMSEdenChunksRecordAlways && _eden_chunk_array != NULL) {
5592     if (_eden_chunk_lock->try_lock()) {
5593       // Record a sample. This is the critical section. The contents
5594       // of the _eden_chunk_array have to be non-decreasing in the
5595       // address order.
5596       _eden_chunk_array[_eden_chunk_index] = *_top_addr;
5597       assert(_eden_chunk_array[_eden_chunk_index] <= *_end_addr,
5598              "Unexpected state of Eden");
5599       if (_eden_chunk_index == 0 ||
5600           ((_eden_chunk_array[_eden_chunk_index] > _eden_chunk_array[_eden_chunk_index-1]) &&
5601            (pointer_delta(_eden_chunk_array[_eden_chunk_index],
5602                           _eden_chunk_array[_eden_chunk_index-1]) >= CMSSamplingGrain))) {
5603         _eden_chunk_index++;  // commit sample
5604       }
5605       _eden_chunk_lock->unlock();
5606     }
5607   }
5608 }
5609 
5610 // Return a thread-local PLAB recording array, as appropriate.
5611 void* CMSCollector::get_data_recorder(int thr_num) {
5612   if (_survivor_plab_array != NULL &&
5613       (CMSPLABRecordAlways ||
5614        (_collectorState > Marking && _collectorState < FinalMarking))) {
5615     assert(thr_num < (int)ParallelGCThreads, "thr_num is out of bounds");
5616     ChunkArray* ca = &_survivor_plab_array[thr_num];
5617     ca->reset();   // clear it so that fresh data is recorded
5618     return (void*) ca;
5619   } else {
5620     return NULL;
5621   }
5622 }
5623 
5624 // Reset all the thread-local PLAB recording arrays
5625 void CMSCollector::reset_survivor_plab_arrays() {
5626   for (uint i = 0; i < ParallelGCThreads; i++) {
5627     _survivor_plab_array[i].reset();
5628   }
5629 }
5630 
5631 // Merge the per-thread plab arrays into the global survivor chunk
5632 // array which will provide the partitioning of the survivor space
5633 // for CMS initial scan and rescan.
5634 void CMSCollector::merge_survivor_plab_arrays(ContiguousSpace* surv,
5635                                               int no_of_gc_threads) {
5636   assert(_survivor_plab_array  != NULL, "Error");
5637   assert(_survivor_chunk_array != NULL, "Error");
5638   assert(_collectorState == FinalMarking ||
5639          (CMSParallelInitialMarkEnabled && _collectorState == InitialMarking), "Error");
5640   for (int j = 0; j < no_of_gc_threads; j++) {
5641     _cursor[j] = 0;
5642   }
5643   HeapWord* top = surv->top();
5644   size_t i;
5645   for (i = 0; i < _survivor_chunk_capacity; i++) {  // all sca entries
5646     HeapWord* min_val = top;          // Higher than any PLAB address
5647     uint      min_tid = 0;            // position of min_val this round
5648     for (int j = 0; j < no_of_gc_threads; j++) {
5649       ChunkArray* cur_sca = &_survivor_plab_array[j];
5650       if (_cursor[j] == cur_sca->end()) {
5651         continue;
5652       }
5653       assert(_cursor[j] < cur_sca->end(), "ctl pt invariant");
5654       HeapWord* cur_val = cur_sca->nth(_cursor[j]);
5655       assert(surv->used_region().contains(cur_val), "Out of bounds value");
5656       if (cur_val < min_val) {
5657         min_tid = j;
5658         min_val = cur_val;
5659       } else {


5682     for (int j = 0; j < no_of_gc_threads; j++) {
5683       assert(_cursor[j] == _survivor_plab_array[j].end(), "Ctl pt invariant");
5684       total += _cursor[j];
5685     }
5686     assert(total == _survivor_chunk_index, "Ctl Pt Invariant");
5687     // Check that the merged array is in sorted order
5688     if (total > 0) {
5689       for (size_t i = 0; i < total - 1; i++) {
5690         if (PrintCMSStatistics > 0) {
5691           gclog_or_tty->print(" (chunk" SIZE_FORMAT ":" INTPTR_FORMAT ") ",
5692                               i, _survivor_chunk_array[i]);
5693         }
5694         assert(_survivor_chunk_array[i] < _survivor_chunk_array[i+1],
5695                "Not sorted");
5696       }
5697     }
5698   #endif // ASSERT
5699 }
5700 
5701 // Set up the space's par_seq_tasks structure for work claiming
5702 // for parallel initial scan and rescan of young gen.
5703 // See ParRescanTask where this is currently used.
5704 void
5705 CMSCollector::
5706 initialize_sequential_subtasks_for_young_gen_rescan(int n_threads) {
5707   assert(n_threads > 0, "Unexpected n_threads argument");
5708   DefNewGeneration* dng = (DefNewGeneration*)_young_gen;
5709 
5710   // Eden space
5711   {
5712     SequentialSubTasksDone* pst = dng->eden()->par_seq_tasks();
5713     assert(!pst->valid(), "Clobbering existing data?");
5714     // Each valid entry in [0, _eden_chunk_index) represents a task.
5715     size_t n_tasks = _eden_chunk_index + 1;
5716     assert(n_tasks == 1 || _eden_chunk_array != NULL, "Error");
5717     // Sets the condition for completion of the subtask (how many threads
5718     // need to finish in order to be done).
5719     pst->set_n_threads(n_threads);
5720     pst->set_n_tasks((int)n_tasks);
5721   }
5722 


6832     _span(span),
6833     _bitMap(bitMap)
6834 {
6835     assert(_ref_processor == NULL, "deliberately left NULL");
6836     assert(_bitMap->covers(_span), "_bitMap/_span mismatch");
6837 }
6838 
6839 void MarkRefsIntoClosure::do_oop(oop obj) {
6840   // if p points into _span, then mark corresponding bit in _markBitMap
6841   assert(obj->is_oop(), "expected an oop");
6842   HeapWord* addr = (HeapWord*)obj;
6843   if (_span.contains(addr)) {
6844     // this should be made more efficient
6845     _bitMap->mark(addr);
6846   }
6847 }
6848 
6849 void MarkRefsIntoClosure::do_oop(oop* p)       { MarkRefsIntoClosure::do_oop_work(p); }
6850 void MarkRefsIntoClosure::do_oop(narrowOop* p) { MarkRefsIntoClosure::do_oop_work(p); }
6851 
6852 Par_MarkRefsIntoClosure::Par_MarkRefsIntoClosure(
6853   MemRegion span, CMSBitMap* bitMap):
6854     _span(span),
6855     _bitMap(bitMap)
6856 {
6857     assert(_ref_processor == NULL, "deliberately left NULL");
6858     assert(_bitMap->covers(_span), "_bitMap/_span mismatch");
6859 }
6860 
6861 void Par_MarkRefsIntoClosure::do_oop(oop obj) {
6862   // if p points into _span, then mark corresponding bit in _markBitMap
6863   assert(obj->is_oop(), "expected an oop");
6864   HeapWord* addr = (HeapWord*)obj;
6865   if (_span.contains(addr)) {
6866     // this should be made more efficient
6867     _bitMap->par_mark(addr);
6868   }
6869 }
6870 
6871 void Par_MarkRefsIntoClosure::do_oop(oop* p)       { Par_MarkRefsIntoClosure::do_oop_work(p); }
6872 void Par_MarkRefsIntoClosure::do_oop(narrowOop* p) { Par_MarkRefsIntoClosure::do_oop_work(p); }
6873 
6874 // A variant of the above, used for CMS marking verification.
6875 MarkRefsIntoVerifyClosure::MarkRefsIntoVerifyClosure(
6876   MemRegion span, CMSBitMap* verification_bm, CMSBitMap* cms_bm):
6877     _span(span),
6878     _verification_bm(verification_bm),
6879     _cms_bm(cms_bm)
6880 {
6881     assert(_ref_processor == NULL, "deliberately left NULL");
6882     assert(_verification_bm->covers(_span), "_verification_bm/_span mismatch");
6883 }
6884 
6885 void MarkRefsIntoVerifyClosure::do_oop(oop obj) {
6886   // if p points into _span, then mark corresponding bit in _markBitMap
6887   assert(obj->is_oop(), "expected an oop");
6888   HeapWord* addr = (HeapWord*)obj;
6889   if (_span.contains(addr)) {
6890     _verification_bm->mark(addr);
6891     if (!_cms_bm->isMarked(addr)) {
6892       oop(addr)->print();
6893       gclog_or_tty->print_cr(" (" INTPTR_FORMAT " should have been marked)", addr);


9519       if (UsePerfData) {
9520         _space_counters->update_capacity();
9521         _gen_counters->update_all();
9522       }
9523 
9524       if (Verbose && PrintGCDetails) {
9525         size_t new_mem_size = _virtual_space.committed_size();
9526         size_t old_mem_size = new_mem_size + bytes;
9527         gclog_or_tty->print_cr("Shrinking %s from %ldK by %ldK to %ldK",
9528                       name(), old_mem_size/K, bytes/K, new_mem_size/K);
9529       }
9530     }
9531 
9532     assert(_cmsSpace->unallocated_block() <= _cmsSpace->end(),
9533       "Inconsistency at end of space");
9534     assert(chunk_at_end->end() == _cmsSpace->end(),
9535       "Shrinking is inconsistent");
9536     return;
9537   }
9538 }

9539 // Transfer some number of overflown objects to usual marking
9540 // stack. Return true if some objects were transferred.
9541 bool MarkRefsIntoAndScanClosure::take_from_overflow_list() {
9542   size_t num = MIN2((size_t)(_mark_stack->capacity() - _mark_stack->length())/4,
9543                     (size_t)ParGCDesiredObjsFromOverflowList);
9544 
9545   bool res = _collector->take_from_overflow_list(num, _mark_stack);
9546   assert(_collector->overflow_list_is_empty() || res,
9547          "If list is not empty, we should have taken something");
9548   assert(!res || !_mark_stack->isEmpty(),
9549          "If we took something, it should now be on our stack");
9550   return res;
9551 }
9552 
9553 size_t MarkDeadObjectsClosure::do_blk(HeapWord* addr) {
9554   size_t res = _sp->block_size_no_stall(addr, _collector);
9555   if (_sp->block_is_obj(addr)) {
9556     if (_live_bit_map->isMarked(addr)) {
9557       // It can't have been dead in a previous cycle
9558       guarantee(!_dead_bit_map->isMarked(addr), "No resurrection!");


9590                  false /* recordGCEndTime */,
9591                  false /* countCollection */  );
9592       break;
9593 
9594     case CMSCollector::Sweeping:
9595       initialize(true  /* fullGC */ ,
9596                  cause /* cause of the GC */,
9597                  false /* recordGCBeginTime */,
9598                  false /* recordPreGCUsage */,
9599                  true  /* recordPeakUsage */,
9600                  true  /* recordPostGCusage */,
9601                  false /* recordAccumulatedGCTime */,
9602                  true  /* recordGCEndTime */,
9603                  true  /* countCollection */  );
9604       break;
9605 
9606     default:
9607       ShouldNotReachHere();
9608   }
9609 }