< prev index next >

src/hotspot/share/gc/cms/concurrentMarkSweepGeneration.cpp

Print this page




1536   ReferenceProcessorSpanMutator rp_mut_span(ref_processor(), new_span);
1537   // Temporarily, clear the "is_alive_non_header" field of the
1538   // reference processor.
1539   ReferenceProcessorIsAliveMutator rp_mut_closure(ref_processor(), NULL);
1540   // Temporarily make reference _processing_ single threaded (non-MT).
1541   ReferenceProcessorMTProcMutator rp_mut_mt_processing(ref_processor(), false);
1542   // Temporarily make refs discovery atomic
1543   ReferenceProcessorAtomicMutator rp_mut_atomic(ref_processor(), true);
1544   // Temporarily make reference _discovery_ single threaded (non-MT)
1545   ReferenceProcessorMTDiscoveryMutator rp_mut_discovery(ref_processor(), false);
1546 
1547   ref_processor()->set_enqueuing_is_done(false);
1548   ref_processor()->enable_discovery();
1549   ref_processor()->setup_policy(clear_all_soft_refs);
1550   // If an asynchronous collection finishes, the _modUnionTable is
1551   // all clear.  If we are assuming the collection from an asynchronous
1552   // collection, clear the _modUnionTable.
1553   assert(_collectorState != Idling || _modUnionTable.isAllClear(),
1554     "_modUnionTable should be clear if the baton was not passed");
1555   _modUnionTable.clear_all();
1556   assert(_collectorState != Idling || _ct->klass_rem_set()->mod_union_is_clear(),
1557     "mod union for klasses should be clear if the baton was passed");
1558   _ct->klass_rem_set()->clear_mod_union();

1559 
1560   // We must adjust the allocation statistics being maintained
1561   // in the free list space. We do so by reading and clearing
1562   // the sweep timer and updating the block flux rate estimates below.
1563   assert(!_intra_sweep_timer.is_active(), "_intra_sweep_timer should be inactive");
1564   if (_inter_sweep_timer.is_active()) {
1565     _inter_sweep_timer.stop();
1566     // Note that we do not use this sample to update the _inter_sweep_estimate.
1567     _cmsGen->cmsSpace()->beginSweepFLCensus((float)(_inter_sweep_timer.seconds()),
1568                                             _inter_sweep_estimate.padded_average(),
1569                                             _intra_sweep_estimate.padded_average());
1570   }
1571 
1572   GenMarkSweep::invoke_at_safepoint(ref_processor(), clear_all_soft_refs);
1573   #ifdef ASSERT
1574     CompactibleFreeListSpace* cms_space = _cmsGen->cmsSpace();
1575     size_t free_size = cms_space->free();
1576     assert(free_size ==
1577            pointer_delta(cms_space->end(), cms_space->compaction_top())
1578            * HeapWordSize,


2008     return;
2009   }
2010 
2011   // set a bit saying prologue has been called; cleared in epilogue
2012   _between_prologue_and_epilogue = true;
2013   // Claim locks for common data structures, then call gc_prologue_work()
2014   // for each CMSGen.
2015 
2016   getFreelistLocks();   // gets free list locks on constituent spaces
2017   bitMapLock()->lock_without_safepoint_check();
2018 
2019   // Should call gc_prologue_work() for all cms gens we are responsible for
2020   bool duringMarking =    _collectorState >= Marking
2021                          && _collectorState < Sweeping;
2022 
2023   // The young collections clear the modified oops state, which tells if
2024   // there are any modified oops in the class. The remark phase also needs
2025   // that information. Tell the young collection to save the union of all
2026   // modified klasses.
2027   if (duringMarking) {
2028     _ct->klass_rem_set()->set_accumulate_modified_oops(true);
2029   }
2030 
2031   bool registerClosure = duringMarking;
2032 
2033   _cmsGen->gc_prologue_work(full, registerClosure, &_modUnionClosurePar);
2034 
2035   if (!full) {
2036     stats().record_gc0_begin();
2037   }
2038 }
2039 
2040 void ConcurrentMarkSweepGeneration::gc_prologue(bool full) {
2041 
2042   _capacity_at_prologue = capacity();
2043   _used_at_prologue = used();
2044 
2045   // We enable promotion tracking so that card-scanning can recognize
2046   // which objects have been promoted during this GC and skip them.
2047   for (uint i = 0; i < ParallelGCThreads; i++) {
2048     _par_gc_thread_states[i]->promo.startTrackingPromotions();


2084          "world is stopped assumption");
2085 
2086   // Currently the CMS epilogue (see CompactibleFreeListSpace) merely checks
2087   // if linear allocation blocks need to be appropriately marked to allow the
2088   // the blocks to be parsable. We also check here whether we need to nudge the
2089   // CMS collector thread to start a new cycle (if it's not already active).
2090   assert(   Thread::current()->is_VM_thread()
2091          || (   CMSScavengeBeforeRemark
2092              && Thread::current()->is_ConcurrentGC_thread()),
2093          "Incorrect thread type for epilogue execution");
2094 
2095   if (!_between_prologue_and_epilogue) {
2096     // We have already been invoked; this is a gc_epilogue delegation
2097     // from yet another CMS generation that we are responsible for, just
2098     // ignore it since all relevant work has already been done.
2099     return;
2100   }
2101   assert(haveFreelistLocks(), "must have freelist locks");
2102   assert_lock_strong(bitMapLock());
2103 
2104   _ct->klass_rem_set()->set_accumulate_modified_oops(false);
2105 
2106   _cmsGen->gc_epilogue_work(full);
2107 
2108   if (_collectorState == AbortablePreclean || _collectorState == Precleaning) {
2109     // in case sampling was not already enabled, enable it
2110     _start_sampling = true;
2111   }
2112   // reset _eden_chunk_array so sampling starts afresh
2113   _eden_chunk_index = 0;
2114 
2115   size_t cms_used   = _cmsGen->cmsSpace()->used();
2116 
2117   // update performance counters - this uses a special version of
2118   // update_counters() that allows the utilization to be passed as a
2119   // parameter, avoiding multiple calls to used().
2120   //
2121   _cmsGen->update_counters(cms_used);
2122 
2123   bitMapLock()->unlock();
2124   releaseFreelistLocks();


2363     verification_mark_bm()->iterate(&markFromRootsClosure, ra, _span.end());
2364   }
2365   assert(verification_mark_stack()->isEmpty(), "Should have been drained");
2366   verify_work_stacks_empty();
2367 
2368   // Marking completed -- now verify that each bit marked in
2369   // verification_mark_bm() is also marked in markBitMap(); flag all
2370   // errors by printing corresponding objects.
2371   VerifyMarkedClosure vcl(markBitMap());
2372   verification_mark_bm()->iterate(&vcl);
2373   if (vcl.failed()) {
2374     Log(gc, verify) log;
2375     log.error("Failed marking verification after remark");
2376     ResourceMark rm;
2377     LogStream ls(log.error());
2378     gch->print_on(&ls);
2379     fatal("CMS: failed marking verification after remark");
2380   }
2381 }
2382 
2383 class VerifyKlassOopsKlassClosure : public KlassClosure {
2384   class VerifyKlassOopsClosure : public OopClosure {
2385     CMSBitMap* _bitmap;
2386    public:
2387     VerifyKlassOopsClosure(CMSBitMap* bitmap) : _bitmap(bitmap) { }
2388     void do_oop(oop* p)       { guarantee(*p == NULL || _bitmap->isMarked((HeapWord*) *p), "Should be marked"); }
2389     void do_oop(narrowOop* p) { ShouldNotReachHere(); }
2390   } _oop_closure;
2391  public:
2392   VerifyKlassOopsKlassClosure(CMSBitMap* bitmap) : _oop_closure(bitmap) {}
2393   void do_klass(Klass* k) {
2394     k->oops_do(&_oop_closure);
2395   }
2396 };
2397 
2398 void CMSCollector::verify_after_remark_work_2() {
2399   ResourceMark rm;
2400   HandleMark  hm;
2401   GenCollectedHeap* gch = GenCollectedHeap::heap();
2402 
2403   // Get a clear set of claim bits for the roots processing to work with.
2404   ClassLoaderDataGraph::clear_claimed_marks();
2405 
2406   // Mark from roots one level into CMS
2407   MarkRefsIntoVerifyClosure notOlder(_span, verification_mark_bm(),
2408                                      markBitMap());
2409   CLDToOopClosure cld_closure(&notOlder, true);
2410 
2411   gch->rem_set()->prepare_for_younger_refs_iterate(false); // Not parallel.
2412 
2413   {
2414     StrongRootsScope srs(1);


2420                            &notOlder,
2421                            &cld_closure);
2422   }
2423 
2424   // Now mark from the roots
2425   MarkFromRootsVerifyClosure markFromRootsClosure(this, _span,
2426     verification_mark_bm(), markBitMap(), verification_mark_stack());
2427   assert(_restart_addr == NULL, "Expected pre-condition");
2428   verification_mark_bm()->iterate(&markFromRootsClosure);
2429   while (_restart_addr != NULL) {
2430     // Deal with stack overflow: by restarting at the indicated
2431     // address.
2432     HeapWord* ra = _restart_addr;
2433     markFromRootsClosure.reset(ra);
2434     _restart_addr = NULL;
2435     verification_mark_bm()->iterate(&markFromRootsClosure, ra, _span.end());
2436   }
2437   assert(verification_mark_stack()->isEmpty(), "Should have been drained");
2438   verify_work_stacks_empty();
2439 
2440   VerifyKlassOopsKlassClosure verify_klass_oops(verification_mark_bm());
2441   ClassLoaderDataGraph::classes_do(&verify_klass_oops);
2442 
2443   // Marking completed -- now verify that each bit marked in
2444   // verification_mark_bm() is also marked in markBitMap(); flag all
2445   // errors by printing corresponding objects.
2446   VerifyMarkedClosure vcl(markBitMap());
2447   verification_mark_bm()->iterate(&vcl);
2448   assert(!vcl.failed(), "Else verification above should not have succeeded");
2449 }
2450 
2451 void ConcurrentMarkSweepGeneration::save_marks() {
2452   // delegate to CMS space
2453   cmsSpace()->save_marks();
2454 }
2455 
2456 bool ConcurrentMarkSweepGeneration::no_allocs_since_save_marks() {
2457   return cmsSpace()->no_allocs_since_save_marks();
2458 }
2459 
2460 #define CMS_SINCE_SAVE_MARKS_DEFN(OopClosureType, nv_suffix)    \
2461                                                                 \


2894 
2895       StrongRootsScope srs(1);
2896 
2897       gch->cms_process_roots(&srs,
2898                              true,   // young gen as roots
2899                              GenCollectedHeap::ScanningOption(roots_scanning_options()),
2900                              should_unload_classes(),
2901                              &notOlder,
2902                              &cld_closure);
2903     }
2904   }
2905 
2906   // Clear mod-union table; it will be dirtied in the prologue of
2907   // CMS generation per each young generation collection.
2908 
2909   assert(_modUnionTable.isAllClear(),
2910        "Was cleared in most recent final checkpoint phase"
2911        " or no bits are set in the gc_prologue before the start of the next "
2912        "subsequent marking phase.");
2913 
2914   assert(_ct->klass_rem_set()->mod_union_is_clear(), "Must be");
2915 
2916   // Save the end of the used_region of the constituent generations
2917   // to be used to limit the extent of sweep in each generation.
2918   save_sweep_limits();
2919   verify_overflow_empty();
2920 }
2921 
2922 bool CMSCollector::markFromRoots() {
2923   // we might be tempted to assert that:
2924   // assert(!SafepointSynchronize::is_at_safepoint(),
2925   //        "inconsistent argument?");
2926   // However that wouldn't be right, because it's possible that
2927   // a safepoint is indeed in progress as a young generation
2928   // stop-the-world GC happens even as we mark in this generation.
2929   assert(_collectorState == Marking, "inconsistent state?");
2930   check_correct_thread_executing();
2931   verify_overflow_empty();
2932 
2933   // Weak ref discovery note: We may be discovering weak
2934   // refs in this generation concurrent (but interleaved) with


3831   for (numIter = 0, cumNumCards = lastNumCards = curNumCards = 0;
3832        numIter < CMSPrecleanIter;
3833        numIter++, lastNumCards = curNumCards, cumNumCards += curNumCards) {
3834     curNumCards  = preclean_mod_union_table(_cmsGen, &smoac_cl);
3835     log_trace(gc)(" (modUnionTable: " SIZE_FORMAT " cards)", curNumCards);
3836     // Either there are very few dirty cards, so re-mark
3837     // pause will be small anyway, or our pre-cleaning isn't
3838     // that much faster than the rate at which cards are being
3839     // dirtied, so we might as well stop and re-mark since
3840     // precleaning won't improve our re-mark time by much.
3841     if (curNumCards <= CMSPrecleanThreshold ||
3842         (numIter > 0 &&
3843          (curNumCards * CMSPrecleanDenominator >
3844          lastNumCards * CMSPrecleanNumerator))) {
3845       numIter++;
3846       cumNumCards += curNumCards;
3847       break;
3848     }
3849   }
3850 
3851   preclean_klasses(&mrias_cl, _cmsGen->freelistLock());
3852 
3853   curNumCards = preclean_card_table(_cmsGen, &smoac_cl);
3854   cumNumCards += curNumCards;
3855   log_trace(gc)(" (cardTable: " SIZE_FORMAT " cards, re-scanned " SIZE_FORMAT " cards, " SIZE_FORMAT " iterations)",
3856                              curNumCards, cumNumCards, numIter);
3857   return cumNumCards;   // as a measure of useful work done
3858 }
3859 
3860 // PRECLEANING NOTES:
3861 // Precleaning involves:
3862 // . reading the bits of the modUnionTable and clearing the set bits.
3863 // . For the cards corresponding to the set bits, we scan the
3864 //   objects on those cards. This means we need the free_list_lock
3865 //   so that we can safely iterate over the CMS space when scanning
3866 //   for oops.
3867 // . When we scan the objects, we'll be both reading and setting
3868 //   marks in the marking bit map, so we'll need the marking bit map.
3869 // . For protecting _collector_state transitions, we take the CGC_lock.
3870 //   Note that any races in the reading of of card table entries by the
3871 //   CMS thread on the one hand and the clearing of those entries by the


4050       if (stop_point != NULL) {
4051         assert((_collectorState == AbortablePreclean && should_abort_preclean()),
4052                "Should only be AbortablePreclean.");
4053         _ct->ct_bs()->invalidate(MemRegion(stop_point, dirtyRegion.end()));
4054         if (should_abort_preclean()) {
4055           break; // out of preclean loop
4056         } else {
4057           // Compute the next address at which preclean should pick up.
4058           lastAddr = next_card_start_after_block(stop_point);
4059         }
4060       }
4061     } else {
4062       break;
4063     }
4064   }
4065   verify_work_stacks_empty();
4066   verify_overflow_empty();
4067   return cumNumDirtyCards;
4068 }
4069 
4070 class PrecleanKlassClosure : public KlassClosure {
4071   KlassToOopClosure _cm_klass_closure;
4072  public:
4073   PrecleanKlassClosure(OopClosure* oop_closure) : _cm_klass_closure(oop_closure) {}
4074   void do_klass(Klass* k) {
4075     if (k->has_accumulated_modified_oops()) {
4076       k->clear_accumulated_modified_oops();
4077 
4078       _cm_klass_closure.do_klass(k);
4079     }
4080   }
4081 };
4082 
4083 // The freelist lock is needed to prevent asserts, is it really needed?
4084 void CMSCollector::preclean_klasses(MarkRefsIntoAndScanClosure* cl, Mutex* freelistLock) {
4085 
4086   cl->set_freelistLock(freelistLock);
4087 
4088   CMSTokenSyncWithLocks ts(true, freelistLock, bitMapLock());
4089 
4090   // SSS: Add equivalent to ScanMarkedObjectsAgainCarefullyClosure::do_yield_check and should_abort_preclean?
4091   // SSS: We should probably check if precleaning should be aborted, at suitable intervals?
4092   PrecleanKlassClosure preclean_klass_closure(cl);
4093   ClassLoaderDataGraph::classes_do(&preclean_klass_closure);
4094 
4095   verify_work_stacks_empty();
4096   verify_overflow_empty();
4097 }
4098 
4099 void CMSCollector::checkpointRootsFinal() {
4100   assert(_collectorState == FinalMarking, "incorrect state transition?");
4101   check_correct_thread_executing();
4102   // world is stopped at this checkpoint
4103   assert(SafepointSynchronize::is_at_safepoint(),
4104          "world should be stopped");
4105   TraceCMSMemoryManagerStats tms(_collectorState,GenCollectedHeap::heap()->gc_cause());
4106 
4107   verify_work_stacks_empty();
4108   verify_overflow_empty();
4109 
4110   log_debug(gc)("YG occupancy: " SIZE_FORMAT " K (" SIZE_FORMAT " K)",
4111                 _young_gen->used() / K, _young_gen->capacity() / K);
4112   {
4113     if (CMSScavengeBeforeRemark) {


4233    }
4234    if (_markStack._failed_double > 0) {
4235      log_trace(gc)(" (benign) Failed stack doubling (" SIZE_FORMAT "), current capacity " SIZE_FORMAT,
4236                           _markStack._failed_double, _markStack.capacity());
4237    }
4238   _markStack._hit_limit = 0;
4239   _markStack._failed_double = 0;
4240 
4241   if ((VerifyAfterGC || VerifyDuringGC) &&
4242       GenCollectedHeap::heap()->total_collections() >= VerifyGCStartAt) {
4243     verify_after_remark();
4244   }
4245 
4246   _gc_tracer_cm->report_object_count_after_gc(&_is_alive_closure);
4247 
4248   // Change under the freelistLocks.
4249   _collectorState = Sweeping;
4250   // Call isAllClear() under bitMapLock
4251   assert(_modUnionTable.isAllClear(),
4252       "Should be clear by end of the final marking");
4253   assert(_ct->klass_rem_set()->mod_union_is_clear(),
4254       "Should be clear by end of the final marking");
4255 }
4256 
4257 void CMSParInitialMarkTask::work(uint worker_id) {
4258   elapsedTimer _timer;
4259   ResourceMark rm;
4260   HandleMark   hm;
4261 
4262   // ---------- scan from roots --------------
4263   _timer.start();
4264   GenCollectedHeap* gch = GenCollectedHeap::heap();
4265   ParMarkRefsIntoClosure par_mri_cl(_collector->_span, &(_collector->_markBitMap));
4266 
4267   // ---------- young gen roots --------------
4268   {
4269     work_on_young_gen_roots(&par_mri_cl);
4270     _timer.stop();
4271     log_trace(gc, task)("Finished young gen initial mark scan work in %dth thread: %3.3f sec", worker_id, _timer.seconds());
4272   }
4273 


4315     _strong_roots_scope(strong_roots_scope) { }
4316 
4317   OopTaskQueueSet* task_queues() { return _task_queues; }
4318 
4319   OopTaskQueue* work_queue(int i) { return task_queues()->queue(i); }
4320 
4321   ParallelTaskTerminator* terminator() { return &_term; }
4322   uint n_workers() { return _n_workers; }
4323 
4324   void work(uint worker_id);
4325 
4326  private:
4327   // ... of  dirty cards in old space
4328   void do_dirty_card_rescan_tasks(CompactibleFreeListSpace* sp, int i,
4329                                   ParMarkRefsIntoAndScanClosure* cl);
4330 
4331   // ... work stealing for the above
4332   void do_work_steal(int i, ParMarkRefsIntoAndScanClosure* cl, int* seed);
4333 };
4334 
4335 class RemarkKlassClosure : public KlassClosure {
4336   KlassToOopClosure _cm_klass_closure;
4337  public:
4338   RemarkKlassClosure(OopClosure* oop_closure) : _cm_klass_closure(oop_closure) {}
4339   void do_klass(Klass* k) {
4340     // Check if we have modified any oops in the Klass during the concurrent marking.
4341     if (k->has_accumulated_modified_oops()) {
4342       k->clear_accumulated_modified_oops();
4343 
4344       // We could have transfered the current modified marks to the accumulated marks,
4345       // like we do with the Card Table to Mod Union Table. But it's not really necessary.
4346     } else if (k->has_modified_oops()) {
4347       // Don't clear anything, this info is needed by the next young collection.
4348     } else {
4349       // No modified oops in the Klass.
4350       return;
4351     }
4352 
4353     // The klass has modified fields, need to scan the klass.
4354     _cm_klass_closure.do_klass(k);
4355   }
4356 };
4357 
4358 void CMSParMarkTask::work_on_young_gen_roots(OopsInGenClosure* cl) {
4359   ParNewGeneration* young_gen = _collector->_young_gen;
4360   ContiguousSpace* eden_space = young_gen->eden();
4361   ContiguousSpace* from_space = young_gen->from();
4362   ContiguousSpace* to_space   = young_gen->to();
4363 
4364   HeapWord** eca = _collector->_eden_chunk_array;
4365   size_t     ect = _collector->_eden_chunk_index;
4366   HeapWord** sca = _collector->_survivor_chunk_array;
4367   size_t     sct = _collector->_survivor_chunk_index;
4368 
4369   assert(ect <= _collector->_eden_chunk_capacity, "out of bounds");
4370   assert(sct <= _collector->_survivor_chunk_capacity, "out of bounds");
4371 
4372   do_young_space_rescan(cl, to_space, NULL, 0);
4373   do_young_space_rescan(cl, from_space, sca, sct);
4374   do_young_space_rescan(cl, eden_space, eca, ect);


4422   // ---------- unhandled CLD scanning ----------
4423   if (worker_id == 0) { // Single threaded at the moment.
4424     _timer.reset();
4425     _timer.start();
4426 
4427     // Scan all new class loader data objects and new dependencies that were
4428     // introduced during concurrent marking.
4429     ResourceMark rm;
4430     GrowableArray<ClassLoaderData*>* array = ClassLoaderDataGraph::new_clds();
4431     for (int i = 0; i < array->length(); i++) {
4432       par_mrias_cl.do_cld_nv(array->at(i));
4433     }
4434 
4435     // We don't need to keep track of new CLDs anymore.
4436     ClassLoaderDataGraph::remember_new_clds(false);
4437 
4438     _timer.stop();
4439     log_trace(gc, task)("Finished unhandled CLD scanning work in %dth thread: %3.3f sec", worker_id, _timer.seconds());
4440   }
4441 
4442   // ---------- dirty klass scanning ----------





4443   if (worker_id == 0) { // Single threaded at the moment.
4444     _timer.reset();
4445     _timer.start();
4446 
4447     // Scan all classes that was dirtied during the concurrent marking phase.
4448     RemarkKlassClosure remark_klass_closure(&par_mrias_cl);
4449     ClassLoaderDataGraph::classes_do(&remark_klass_closure);
4450 
4451     _timer.stop();
4452     log_trace(gc, task)("Finished dirty klass scanning work in %dth thread: %3.3f sec", worker_id, _timer.seconds());
4453   }
4454 
4455   // We might have added oops to ClassLoaderData::_handles during the
4456   // concurrent marking phase. These oops point to newly allocated objects
4457   // that are guaranteed to be kept alive. Either by the direct allocation
4458   // code, or when the young collector processes the roots. Hence,
4459   // we don't have to revisit the _handles block during the remark phase.
4460 
4461   // ---------- rescan dirty cards ------------
4462   _timer.reset();
4463   _timer.start();
4464 
4465   // Do the rescan tasks for each of the two spaces
4466   // (cms_space) in turn.
4467   // "worker_id" is passed to select the task_queue for "worker_id"
4468   do_dirty_card_rescan_tasks(_cms_space, worker_id, &par_mrias_cl);
4469   _timer.stop();
4470   log_trace(gc, task)("Finished dirty card rescan work in %dth thread: %3.3f sec", worker_id, _timer.seconds());
4471 
4472   // ---------- steal work from other threads ...
4473   // ---------- ... and drain overflow list.
4474   _timer.reset();
4475   _timer.start();
4476   do_work_steal(worker_id, &par_mrias_cl, _collector->hash_seed(worker_id));
4477   _timer.stop();
4478   log_trace(gc, task)("Finished work stealing in %dth thread: %3.3f sec", worker_id, _timer.seconds());
4479 }


4964 
4965   {
4966     GCTraceTime(Trace, gc, phases) t("Visit Unhandled CLDs", _gc_timer_cm);
4967 
4968     verify_work_stacks_empty();
4969 
4970     // Scan all class loader data objects that might have been introduced
4971     // during concurrent marking.
4972     ResourceMark rm;
4973     GrowableArray<ClassLoaderData*>* array = ClassLoaderDataGraph::new_clds();
4974     for (int i = 0; i < array->length(); i++) {
4975       mrias_cl.do_cld_nv(array->at(i));
4976     }
4977 
4978     // We don't need to keep track of new CLDs anymore.
4979     ClassLoaderDataGraph::remember_new_clds(false);
4980 
4981     verify_work_stacks_empty();
4982   }
4983 




4984   {
4985     GCTraceTime(Trace, gc, phases) t("Dirty Klass Scan", _gc_timer_cm);
4986 
4987     verify_work_stacks_empty();
4988 
4989     RemarkKlassClosure remark_klass_closure(&mrias_cl);
4990     ClassLoaderDataGraph::classes_do(&remark_klass_closure);
4991 
4992     verify_work_stacks_empty();
4993   }
4994 
4995   // We might have added oops to ClassLoaderData::_handles during the
4996   // concurrent marking phase. These oops point to newly allocated objects
4997   // that are guaranteed to be kept alive. Either by the direct allocation
4998   // code, or when the young collector processes the roots. Hence,
4999   // we don't have to revisit the _handles block during the remark phase.
5000 
5001   verify_work_stacks_empty();
5002   // Restore evacuated mark words, if any, used for overflow list links
5003   restore_preserved_marks_if_any();
5004 
5005   verify_overflow_empty();
5006 }
5007 
5008 ////////////////////////////////////////////////////////
5009 // Parallel Reference Processing Task Proxy Class
5010 ////////////////////////////////////////////////////////
5011 class AbstractGangTaskWOopQueues : public AbstractGangTask {
5012   OopTaskQueueSet*       _queues;
5013   ParallelTaskTerminator _terminator;
5014  public:
5015   AbstractGangTaskWOopQueues(const char* name, OopTaskQueueSet* queues, uint n_threads) :
5016     AbstractGangTask(name), _queues(queues), _terminator(n_threads, _queues) {}
5017   ParallelTaskTerminator* terminator() { return &_terminator; }
5018   OopTaskQueueSet* queues() { return _queues; }
5019 };




1536   ReferenceProcessorSpanMutator rp_mut_span(ref_processor(), new_span);
1537   // Temporarily, clear the "is_alive_non_header" field of the
1538   // reference processor.
1539   ReferenceProcessorIsAliveMutator rp_mut_closure(ref_processor(), NULL);
1540   // Temporarily make reference _processing_ single threaded (non-MT).
1541   ReferenceProcessorMTProcMutator rp_mut_mt_processing(ref_processor(), false);
1542   // Temporarily make refs discovery atomic
1543   ReferenceProcessorAtomicMutator rp_mut_atomic(ref_processor(), true);
1544   // Temporarily make reference _discovery_ single threaded (non-MT)
1545   ReferenceProcessorMTDiscoveryMutator rp_mut_discovery(ref_processor(), false);
1546 
1547   ref_processor()->set_enqueuing_is_done(false);
1548   ref_processor()->enable_discovery();
1549   ref_processor()->setup_policy(clear_all_soft_refs);
1550   // If an asynchronous collection finishes, the _modUnionTable is
1551   // all clear.  If we are assuming the collection from an asynchronous
1552   // collection, clear the _modUnionTable.
1553   assert(_collectorState != Idling || _modUnionTable.isAllClear(),
1554     "_modUnionTable should be clear if the baton was not passed");
1555   _modUnionTable.clear_all();
1556   assert(_collectorState != Idling || _ct->cld_rem_set()->mod_union_is_clear(),
1557     "mod union for klasses should be clear if the baton was passed");
1558   _ct->cld_rem_set()->clear_mod_union();
1559 
1560 
1561   // We must adjust the allocation statistics being maintained
1562   // in the free list space. We do so by reading and clearing
1563   // the sweep timer and updating the block flux rate estimates below.
1564   assert(!_intra_sweep_timer.is_active(), "_intra_sweep_timer should be inactive");
1565   if (_inter_sweep_timer.is_active()) {
1566     _inter_sweep_timer.stop();
1567     // Note that we do not use this sample to update the _inter_sweep_estimate.
1568     _cmsGen->cmsSpace()->beginSweepFLCensus((float)(_inter_sweep_timer.seconds()),
1569                                             _inter_sweep_estimate.padded_average(),
1570                                             _intra_sweep_estimate.padded_average());
1571   }
1572 
1573   GenMarkSweep::invoke_at_safepoint(ref_processor(), clear_all_soft_refs);
1574   #ifdef ASSERT
1575     CompactibleFreeListSpace* cms_space = _cmsGen->cmsSpace();
1576     size_t free_size = cms_space->free();
1577     assert(free_size ==
1578            pointer_delta(cms_space->end(), cms_space->compaction_top())
1579            * HeapWordSize,


2009     return;
2010   }
2011 
2012   // set a bit saying prologue has been called; cleared in epilogue
2013   _between_prologue_and_epilogue = true;
2014   // Claim locks for common data structures, then call gc_prologue_work()
2015   // for each CMSGen.
2016 
2017   getFreelistLocks();   // gets free list locks on constituent spaces
2018   bitMapLock()->lock_without_safepoint_check();
2019 
2020   // Should call gc_prologue_work() for all cms gens we are responsible for
2021   bool duringMarking =    _collectorState >= Marking
2022                          && _collectorState < Sweeping;
2023 
2024   // The young collections clear the modified oops state, which tells if
2025   // there are any modified oops in the class. The remark phase also needs
2026   // that information. Tell the young collection to save the union of all
2027   // modified klasses.
2028   if (duringMarking) {
2029     _ct->cld_rem_set()->set_accumulate_modified_oops(true);
2030   }
2031 
2032   bool registerClosure = duringMarking;
2033 
2034   _cmsGen->gc_prologue_work(full, registerClosure, &_modUnionClosurePar);
2035 
2036   if (!full) {
2037     stats().record_gc0_begin();
2038   }
2039 }
2040 
2041 void ConcurrentMarkSweepGeneration::gc_prologue(bool full) {
2042 
2043   _capacity_at_prologue = capacity();
2044   _used_at_prologue = used();
2045 
2046   // We enable promotion tracking so that card-scanning can recognize
2047   // which objects have been promoted during this GC and skip them.
2048   for (uint i = 0; i < ParallelGCThreads; i++) {
2049     _par_gc_thread_states[i]->promo.startTrackingPromotions();


2085          "world is stopped assumption");
2086 
2087   // Currently the CMS epilogue (see CompactibleFreeListSpace) merely checks
2088   // if linear allocation blocks need to be appropriately marked to allow the
2089   // the blocks to be parsable. We also check here whether we need to nudge the
2090   // CMS collector thread to start a new cycle (if it's not already active).
2091   assert(   Thread::current()->is_VM_thread()
2092          || (   CMSScavengeBeforeRemark
2093              && Thread::current()->is_ConcurrentGC_thread()),
2094          "Incorrect thread type for epilogue execution");
2095 
2096   if (!_between_prologue_and_epilogue) {
2097     // We have already been invoked; this is a gc_epilogue delegation
2098     // from yet another CMS generation that we are responsible for, just
2099     // ignore it since all relevant work has already been done.
2100     return;
2101   }
2102   assert(haveFreelistLocks(), "must have freelist locks");
2103   assert_lock_strong(bitMapLock());
2104 
2105   _ct->cld_rem_set()->set_accumulate_modified_oops(false);
2106 
2107   _cmsGen->gc_epilogue_work(full);
2108 
2109   if (_collectorState == AbortablePreclean || _collectorState == Precleaning) {
2110     // in case sampling was not already enabled, enable it
2111     _start_sampling = true;
2112   }
2113   // reset _eden_chunk_array so sampling starts afresh
2114   _eden_chunk_index = 0;
2115 
2116   size_t cms_used   = _cmsGen->cmsSpace()->used();
2117 
2118   // update performance counters - this uses a special version of
2119   // update_counters() that allows the utilization to be passed as a
2120   // parameter, avoiding multiple calls to used().
2121   //
2122   _cmsGen->update_counters(cms_used);
2123 
2124   bitMapLock()->unlock();
2125   releaseFreelistLocks();


2364     verification_mark_bm()->iterate(&markFromRootsClosure, ra, _span.end());
2365   }
2366   assert(verification_mark_stack()->isEmpty(), "Should have been drained");
2367   verify_work_stacks_empty();
2368 
2369   // Marking completed -- now verify that each bit marked in
2370   // verification_mark_bm() is also marked in markBitMap(); flag all
2371   // errors by printing corresponding objects.
2372   VerifyMarkedClosure vcl(markBitMap());
2373   verification_mark_bm()->iterate(&vcl);
2374   if (vcl.failed()) {
2375     Log(gc, verify) log;
2376     log.error("Failed marking verification after remark");
2377     ResourceMark rm;
2378     LogStream ls(log.error());
2379     gch->print_on(&ls);
2380     fatal("CMS: failed marking verification after remark");
2381   }
2382 }
2383 
2384 class VerifyCLDOopsCLDClosure : public CLDClosure {
2385   class VerifyCLDOopsClosure : public OopClosure {
2386     CMSBitMap* _bitmap;
2387    public:
2388     VerifyCLDOopsClosure(CMSBitMap* bitmap) : _bitmap(bitmap) { }
2389     void do_oop(oop* p)       { guarantee(*p == NULL || _bitmap->isMarked((HeapWord*) *p), "Should be marked"); }
2390     void do_oop(narrowOop* p) { ShouldNotReachHere(); }
2391   } _oop_closure;
2392  public:
2393   VerifyCLDOopsCLDClosure(CMSBitMap* bitmap) : _oop_closure(bitmap) {}
2394   void do_cld(ClassLoaderData* cld) {
2395     cld->oops_do(&_oop_closure, false, false);
2396   }
2397 };
2398 
2399 void CMSCollector::verify_after_remark_work_2() {
2400   ResourceMark rm;
2401   HandleMark  hm;
2402   GenCollectedHeap* gch = GenCollectedHeap::heap();
2403 
2404   // Get a clear set of claim bits for the roots processing to work with.
2405   ClassLoaderDataGraph::clear_claimed_marks();
2406 
2407   // Mark from roots one level into CMS
2408   MarkRefsIntoVerifyClosure notOlder(_span, verification_mark_bm(),
2409                                      markBitMap());
2410   CLDToOopClosure cld_closure(&notOlder, true);
2411 
2412   gch->rem_set()->prepare_for_younger_refs_iterate(false); // Not parallel.
2413 
2414   {
2415     StrongRootsScope srs(1);


2421                            &notOlder,
2422                            &cld_closure);
2423   }
2424 
2425   // Now mark from the roots
2426   MarkFromRootsVerifyClosure markFromRootsClosure(this, _span,
2427     verification_mark_bm(), markBitMap(), verification_mark_stack());
2428   assert(_restart_addr == NULL, "Expected pre-condition");
2429   verification_mark_bm()->iterate(&markFromRootsClosure);
2430   while (_restart_addr != NULL) {
2431     // Deal with stack overflow: by restarting at the indicated
2432     // address.
2433     HeapWord* ra = _restart_addr;
2434     markFromRootsClosure.reset(ra);
2435     _restart_addr = NULL;
2436     verification_mark_bm()->iterate(&markFromRootsClosure, ra, _span.end());
2437   }
2438   assert(verification_mark_stack()->isEmpty(), "Should have been drained");
2439   verify_work_stacks_empty();
2440 
2441   VerifyCLDOopsCLDClosure verify_cld_oops(verification_mark_bm());
2442   ClassLoaderDataGraph::cld_do(&verify_cld_oops);
2443 
2444   // Marking completed -- now verify that each bit marked in
2445   // verification_mark_bm() is also marked in markBitMap(); flag all
2446   // errors by printing corresponding objects.
2447   VerifyMarkedClosure vcl(markBitMap());
2448   verification_mark_bm()->iterate(&vcl);
2449   assert(!vcl.failed(), "Else verification above should not have succeeded");
2450 }
2451 
2452 void ConcurrentMarkSweepGeneration::save_marks() {
2453   // delegate to CMS space
2454   cmsSpace()->save_marks();
2455 }
2456 
2457 bool ConcurrentMarkSweepGeneration::no_allocs_since_save_marks() {
2458   return cmsSpace()->no_allocs_since_save_marks();
2459 }
2460 
2461 #define CMS_SINCE_SAVE_MARKS_DEFN(OopClosureType, nv_suffix)    \
2462                                                                 \


2895 
2896       StrongRootsScope srs(1);
2897 
2898       gch->cms_process_roots(&srs,
2899                              true,   // young gen as roots
2900                              GenCollectedHeap::ScanningOption(roots_scanning_options()),
2901                              should_unload_classes(),
2902                              &notOlder,
2903                              &cld_closure);
2904     }
2905   }
2906 
2907   // Clear mod-union table; it will be dirtied in the prologue of
2908   // CMS generation per each young generation collection.
2909 
2910   assert(_modUnionTable.isAllClear(),
2911        "Was cleared in most recent final checkpoint phase"
2912        " or no bits are set in the gc_prologue before the start of the next "
2913        "subsequent marking phase.");
2914 
2915   assert(_ct->cld_rem_set()->mod_union_is_clear(), "Must be");
2916 
2917   // Save the end of the used_region of the constituent generations
2918   // to be used to limit the extent of sweep in each generation.
2919   save_sweep_limits();
2920   verify_overflow_empty();
2921 }
2922 
2923 bool CMSCollector::markFromRoots() {
2924   // we might be tempted to assert that:
2925   // assert(!SafepointSynchronize::is_at_safepoint(),
2926   //        "inconsistent argument?");
2927   // However that wouldn't be right, because it's possible that
2928   // a safepoint is indeed in progress as a young generation
2929   // stop-the-world GC happens even as we mark in this generation.
2930   assert(_collectorState == Marking, "inconsistent state?");
2931   check_correct_thread_executing();
2932   verify_overflow_empty();
2933 
2934   // Weak ref discovery note: We may be discovering weak
2935   // refs in this generation concurrent (but interleaved) with


3832   for (numIter = 0, cumNumCards = lastNumCards = curNumCards = 0;
3833        numIter < CMSPrecleanIter;
3834        numIter++, lastNumCards = curNumCards, cumNumCards += curNumCards) {
3835     curNumCards  = preclean_mod_union_table(_cmsGen, &smoac_cl);
3836     log_trace(gc)(" (modUnionTable: " SIZE_FORMAT " cards)", curNumCards);
3837     // Either there are very few dirty cards, so re-mark
3838     // pause will be small anyway, or our pre-cleaning isn't
3839     // that much faster than the rate at which cards are being
3840     // dirtied, so we might as well stop and re-mark since
3841     // precleaning won't improve our re-mark time by much.
3842     if (curNumCards <= CMSPrecleanThreshold ||
3843         (numIter > 0 &&
3844          (curNumCards * CMSPrecleanDenominator >
3845          lastNumCards * CMSPrecleanNumerator))) {
3846       numIter++;
3847       cumNumCards += curNumCards;
3848       break;
3849     }
3850   }
3851 
3852   preclean_cld(&mrias_cl, _cmsGen->freelistLock());
3853 
3854   curNumCards = preclean_card_table(_cmsGen, &smoac_cl);
3855   cumNumCards += curNumCards;
3856   log_trace(gc)(" (cardTable: " SIZE_FORMAT " cards, re-scanned " SIZE_FORMAT " cards, " SIZE_FORMAT " iterations)",
3857                              curNumCards, cumNumCards, numIter);
3858   return cumNumCards;   // as a measure of useful work done
3859 }
3860 
3861 // PRECLEANING NOTES:
3862 // Precleaning involves:
3863 // . reading the bits of the modUnionTable and clearing the set bits.
3864 // . For the cards corresponding to the set bits, we scan the
3865 //   objects on those cards. This means we need the free_list_lock
3866 //   so that we can safely iterate over the CMS space when scanning
3867 //   for oops.
3868 // . When we scan the objects, we'll be both reading and setting
3869 //   marks in the marking bit map, so we'll need the marking bit map.
3870 // . For protecting _collector_state transitions, we take the CGC_lock.
3871 //   Note that any races in the reading of of card table entries by the
3872 //   CMS thread on the one hand and the clearing of those entries by the


4051       if (stop_point != NULL) {
4052         assert((_collectorState == AbortablePreclean && should_abort_preclean()),
4053                "Should only be AbortablePreclean.");
4054         _ct->ct_bs()->invalidate(MemRegion(stop_point, dirtyRegion.end()));
4055         if (should_abort_preclean()) {
4056           break; // out of preclean loop
4057         } else {
4058           // Compute the next address at which preclean should pick up.
4059           lastAddr = next_card_start_after_block(stop_point);
4060         }
4061       }
4062     } else {
4063       break;
4064     }
4065   }
4066   verify_work_stacks_empty();
4067   verify_overflow_empty();
4068   return cumNumDirtyCards;
4069 }
4070 
4071 class PrecleanCLDClosure : public CLDClosure {
4072   MetadataAwareOopsInGenClosure* _cm_closure;
4073  public:
4074   PrecleanCLDClosure(MetadataAwareOopsInGenClosure* oop_closure) : _cm_closure(oop_closure) {}
4075   void do_cld(ClassLoaderData* cld) {
4076     if (cld->has_accumulated_modified_oops()) {
4077       cld->clear_accumulated_modified_oops();
4078 
4079       _cm_closure->do_cld(cld);
4080     }
4081   }
4082 };
4083 
4084 // The freelist lock is needed to prevent asserts, is it really needed?
4085 void CMSCollector::preclean_cld(MarkRefsIntoAndScanClosure* cl, Mutex* freelistLock) {
4086 
4087   cl->set_freelistLock(freelistLock);
4088 
4089   CMSTokenSyncWithLocks ts(true, freelistLock, bitMapLock());
4090 
4091   // SSS: Add equivalent to ScanMarkedObjectsAgainCarefullyClosure::do_yield_check and should_abort_preclean?
4092   // SSS: We should probably check if precleaning should be aborted, at suitable intervals?
4093   PrecleanCLDClosure preclean_closure(cl);
4094   ClassLoaderDataGraph::cld_do(&preclean_closure);
4095 
4096   verify_work_stacks_empty();
4097   verify_overflow_empty();
4098 }
4099 
4100 void CMSCollector::checkpointRootsFinal() {
4101   assert(_collectorState == FinalMarking, "incorrect state transition?");
4102   check_correct_thread_executing();
4103   // world is stopped at this checkpoint
4104   assert(SafepointSynchronize::is_at_safepoint(),
4105          "world should be stopped");
4106   TraceCMSMemoryManagerStats tms(_collectorState,GenCollectedHeap::heap()->gc_cause());
4107 
4108   verify_work_stacks_empty();
4109   verify_overflow_empty();
4110 
4111   log_debug(gc)("YG occupancy: " SIZE_FORMAT " K (" SIZE_FORMAT " K)",
4112                 _young_gen->used() / K, _young_gen->capacity() / K);
4113   {
4114     if (CMSScavengeBeforeRemark) {


4234    }
4235    if (_markStack._failed_double > 0) {
4236      log_trace(gc)(" (benign) Failed stack doubling (" SIZE_FORMAT "), current capacity " SIZE_FORMAT,
4237                           _markStack._failed_double, _markStack.capacity());
4238    }
4239   _markStack._hit_limit = 0;
4240   _markStack._failed_double = 0;
4241 
4242   if ((VerifyAfterGC || VerifyDuringGC) &&
4243       GenCollectedHeap::heap()->total_collections() >= VerifyGCStartAt) {
4244     verify_after_remark();
4245   }
4246 
4247   _gc_tracer_cm->report_object_count_after_gc(&_is_alive_closure);
4248 
4249   // Change under the freelistLocks.
4250   _collectorState = Sweeping;
4251   // Call isAllClear() under bitMapLock
4252   assert(_modUnionTable.isAllClear(),
4253       "Should be clear by end of the final marking");
4254   assert(_ct->cld_rem_set()->mod_union_is_clear(),
4255       "Should be clear by end of the final marking");
4256 }
4257 
4258 void CMSParInitialMarkTask::work(uint worker_id) {
4259   elapsedTimer _timer;
4260   ResourceMark rm;
4261   HandleMark   hm;
4262 
4263   // ---------- scan from roots --------------
4264   _timer.start();
4265   GenCollectedHeap* gch = GenCollectedHeap::heap();
4266   ParMarkRefsIntoClosure par_mri_cl(_collector->_span, &(_collector->_markBitMap));
4267 
4268   // ---------- young gen roots --------------
4269   {
4270     work_on_young_gen_roots(&par_mri_cl);
4271     _timer.stop();
4272     log_trace(gc, task)("Finished young gen initial mark scan work in %dth thread: %3.3f sec", worker_id, _timer.seconds());
4273   }
4274 


4316     _strong_roots_scope(strong_roots_scope) { }
4317 
4318   OopTaskQueueSet* task_queues() { return _task_queues; }
4319 
4320   OopTaskQueue* work_queue(int i) { return task_queues()->queue(i); }
4321 
4322   ParallelTaskTerminator* terminator() { return &_term; }
4323   uint n_workers() { return _n_workers; }
4324 
4325   void work(uint worker_id);
4326 
4327  private:
4328   // ... of  dirty cards in old space
4329   void do_dirty_card_rescan_tasks(CompactibleFreeListSpace* sp, int i,
4330                                   ParMarkRefsIntoAndScanClosure* cl);
4331 
4332   // ... work stealing for the above
4333   void do_work_steal(int i, ParMarkRefsIntoAndScanClosure* cl, int* seed);
4334 };
4335 
4336 class RemarkCLDClosure : public CLDClosure {
4337   CLDToOopClosure _cm_closure;
4338  public:
4339   RemarkCLDClosure(OopClosure* oop_closure) : _cm_closure(oop_closure) {}
4340   void do_cld(ClassLoaderData* cld) {
4341     // Check if we have modified any oops in the CLD during the concurrent marking.
4342     if (cld->has_accumulated_modified_oops()) {
4343       cld->clear_accumulated_modified_oops();
4344 
4345       // We could have transfered the current modified marks to the accumulated marks,
4346       // like we do with the Card Table to Mod Union Table. But it's not really necessary.
4347     } else if (cld->has_modified_oops()) {
4348       // Don't clear anything, this info is needed by the next young collection.
4349     } else {
4350       // No modified oops in the ClassLoaderData.
4351       return;
4352     }
4353 
4354     // The klass has modified fields, need to scan the klass.
4355     _cm_closure.do_cld(cld);
4356   }
4357 };
4358 
4359 void CMSParMarkTask::work_on_young_gen_roots(OopsInGenClosure* cl) {
4360   ParNewGeneration* young_gen = _collector->_young_gen;
4361   ContiguousSpace* eden_space = young_gen->eden();
4362   ContiguousSpace* from_space = young_gen->from();
4363   ContiguousSpace* to_space   = young_gen->to();
4364 
4365   HeapWord** eca = _collector->_eden_chunk_array;
4366   size_t     ect = _collector->_eden_chunk_index;
4367   HeapWord** sca = _collector->_survivor_chunk_array;
4368   size_t     sct = _collector->_survivor_chunk_index;
4369 
4370   assert(ect <= _collector->_eden_chunk_capacity, "out of bounds");
4371   assert(sct <= _collector->_survivor_chunk_capacity, "out of bounds");
4372 
4373   do_young_space_rescan(cl, to_space, NULL, 0);
4374   do_young_space_rescan(cl, from_space, sca, sct);
4375   do_young_space_rescan(cl, eden_space, eca, ect);


4423   // ---------- unhandled CLD scanning ----------
4424   if (worker_id == 0) { // Single threaded at the moment.
4425     _timer.reset();
4426     _timer.start();
4427 
4428     // Scan all new class loader data objects and new dependencies that were
4429     // introduced during concurrent marking.
4430     ResourceMark rm;
4431     GrowableArray<ClassLoaderData*>* array = ClassLoaderDataGraph::new_clds();
4432     for (int i = 0; i < array->length(); i++) {
4433       par_mrias_cl.do_cld_nv(array->at(i));
4434     }
4435 
4436     // We don't need to keep track of new CLDs anymore.
4437     ClassLoaderDataGraph::remember_new_clds(false);
4438 
4439     _timer.stop();
4440     log_trace(gc, task)("Finished unhandled CLD scanning work in %dth thread: %3.3f sec", worker_id, _timer.seconds());
4441   }
4442 
4443   // We might have added oops to ClassLoaderData::_handles during the
4444   // concurrent marking phase. These oops do not always point to newly allocated objects
4445   // that are guaranteed to be kept alive.  Hence,
4446   // we do have to revisit the _handles block during the remark phase.
4447 
4448   // ---------- dirty CLD scanning ----------
4449   if (worker_id == 0) { // Single threaded at the moment.
4450     _timer.reset();
4451     _timer.start();
4452 
4453     // Scan all classes that was dirtied during the concurrent marking phase.
4454     RemarkCLDClosure remark_closure(&par_mrias_cl);
4455     ClassLoaderDataGraph::cld_do(&remark_closure);
4456 
4457     _timer.stop();
4458     log_trace(gc, task)("Finished dirty CLD scanning work in %dth thread: %3.3f sec", worker_id, _timer.seconds());
4459   }
4460 





4461 
4462   // ---------- rescan dirty cards ------------
4463   _timer.reset();
4464   _timer.start();
4465 
4466   // Do the rescan tasks for each of the two spaces
4467   // (cms_space) in turn.
4468   // "worker_id" is passed to select the task_queue for "worker_id"
4469   do_dirty_card_rescan_tasks(_cms_space, worker_id, &par_mrias_cl);
4470   _timer.stop();
4471   log_trace(gc, task)("Finished dirty card rescan work in %dth thread: %3.3f sec", worker_id, _timer.seconds());
4472 
4473   // ---------- steal work from other threads ...
4474   // ---------- ... and drain overflow list.
4475   _timer.reset();
4476   _timer.start();
4477   do_work_steal(worker_id, &par_mrias_cl, _collector->hash_seed(worker_id));
4478   _timer.stop();
4479   log_trace(gc, task)("Finished work stealing in %dth thread: %3.3f sec", worker_id, _timer.seconds());
4480 }


4965 
4966   {
4967     GCTraceTime(Trace, gc, phases) t("Visit Unhandled CLDs", _gc_timer_cm);
4968 
4969     verify_work_stacks_empty();
4970 
4971     // Scan all class loader data objects that might have been introduced
4972     // during concurrent marking.
4973     ResourceMark rm;
4974     GrowableArray<ClassLoaderData*>* array = ClassLoaderDataGraph::new_clds();
4975     for (int i = 0; i < array->length(); i++) {
4976       mrias_cl.do_cld_nv(array->at(i));
4977     }
4978 
4979     // We don't need to keep track of new CLDs anymore.
4980     ClassLoaderDataGraph::remember_new_clds(false);
4981 
4982     verify_work_stacks_empty();
4983   }
4984 
4985   // We might have added oops to ClassLoaderData::_handles during the
4986   // concurrent marking phase. These oops do not point to newly allocated objects
4987   // that are guaranteed to be kept alive.  Hence,
4988   // we do have to revisit the _handles block during the remark phase.
4989   {
4990     GCTraceTime(Trace, gc, phases) t("Dirty CLD Scan", _gc_timer_cm);
4991 
4992     verify_work_stacks_empty();
4993 
4994     RemarkCLDClosure remark_closure(&mrias_cl);
4995     ClassLoaderDataGraph::cld_do(&remark_closure);
4996 
4997     verify_work_stacks_empty();
4998   }






4999 
5000   verify_work_stacks_empty();
5001   // Restore evacuated mark words, if any, used for overflow list links
5002   restore_preserved_marks_if_any();
5003 
5004   verify_overflow_empty();
5005 }
5006 
5007 ////////////////////////////////////////////////////////
5008 // Parallel Reference Processing Task Proxy Class
5009 ////////////////////////////////////////////////////////
5010 class AbstractGangTaskWOopQueues : public AbstractGangTask {
5011   OopTaskQueueSet*       _queues;
5012   ParallelTaskTerminator _terminator;
5013  public:
5014   AbstractGangTaskWOopQueues(const char* name, OopTaskQueueSet* queues, uint n_threads) :
5015     AbstractGangTask(name), _queues(queues), _terminator(n_threads, _queues) {}
5016   ParallelTaskTerminator* terminator() { return &_terminator; }
5017   OopTaskQueueSet* queues() { return _queues; }
5018 };


< prev index next >