< prev index next >

src/hotspot/share/gc/cms/concurrentMarkSweepGeneration.cpp

Print this page




2428                            &cld_closure);
2429   }
2430 
2431   // Now mark from the roots
2432   MarkFromRootsVerifyClosure markFromRootsClosure(this, _span,
2433     verification_mark_bm(), markBitMap(), verification_mark_stack());
2434   assert(_restart_addr == NULL, "Expected pre-condition");
2435   verification_mark_bm()->iterate(&markFromRootsClosure);
2436   while (_restart_addr != NULL) {
2437     // Deal with stack overflow: by restarting at the indicated
2438     // address.
2439     HeapWord* ra = _restart_addr;
2440     markFromRootsClosure.reset(ra);
2441     _restart_addr = NULL;
2442     verification_mark_bm()->iterate(&markFromRootsClosure, ra, _span.end());
2443   }
2444   assert(verification_mark_stack()->isEmpty(), "Should have been drained");
2445   verify_work_stacks_empty();
2446 
2447   VerifyCLDOopsCLDClosure verify_cld_oops(verification_mark_bm());
2448   ClassLoaderDataGraph::cld_do(&verify_cld_oops);
2449 
2450   // Marking completed -- now verify that each bit marked in
2451   // verification_mark_bm() is also marked in markBitMap(); flag all
2452   // errors by printing corresponding objects.
2453   VerifyMarkedClosure vcl(markBitMap());
2454   verification_mark_bm()->iterate(&vcl);
2455   assert(!vcl.failed(), "Else verification above should not have succeeded");
2456 }
2457 
2458 void ConcurrentMarkSweepGeneration::save_marks() {
2459   // delegate to CMS space
2460   cmsSpace()->save_marks();
2461 }
2462 
2463 bool ConcurrentMarkSweepGeneration::no_allocs_since_save_marks() {
2464   return cmsSpace()->no_allocs_since_save_marks();
2465 }
2466 
2467 void
2468 ConcurrentMarkSweepGeneration::oop_iterate(OopIterateClosure* cl) {


4067     if (cld->has_accumulated_modified_oops()) {
4068       cld->clear_accumulated_modified_oops();
4069 
4070       _cm_closure->do_cld(cld);
4071     }
4072   }
4073 };
4074 
4075 // The freelist lock is needed to prevent asserts, is it really needed?
4076 void CMSCollector::preclean_cld(MarkRefsIntoAndScanClosure* cl, Mutex* freelistLock) {
4077   // Needed to walk CLDG
4078   MutexLocker ml(ClassLoaderDataGraph_lock);
4079 
4080   cl->set_freelistLock(freelistLock);
4081 
4082   CMSTokenSyncWithLocks ts(true, freelistLock, bitMapLock());
4083 
4084   // SSS: Add equivalent to ScanMarkedObjectsAgainCarefullyClosure::do_yield_check and should_abort_preclean?
4085   // SSS: We should probably check if precleaning should be aborted, at suitable intervals?
4086   PrecleanCLDClosure preclean_closure(cl);
4087   ClassLoaderDataGraph::cld_do(&preclean_closure);
4088 
4089   verify_work_stacks_empty();
4090   verify_overflow_empty();
4091 }
4092 
4093 void CMSCollector::checkpointRootsFinal() {
4094   assert(_collectorState == FinalMarking, "incorrect state transition?");
4095   check_correct_thread_executing();
4096   // world is stopped at this checkpoint
4097   assert(SafepointSynchronize::is_at_safepoint(),
4098          "world should be stopped");
4099   TraceCMSMemoryManagerStats tms(_collectorState, CMSHeap::heap()->gc_cause());
4100 
4101   verify_work_stacks_empty();
4102   verify_overflow_empty();
4103 
4104   log_debug(gc)("YG occupancy: " SIZE_FORMAT " K (" SIZE_FORMAT " K)",
4105                 _young_gen->used() / K, _young_gen->capacity() / K);
4106   {
4107     if (CMSScavengeBeforeRemark) {


4431 
4432     // We don't need to keep track of new CLDs anymore.
4433     ClassLoaderDataGraph::remember_new_clds(false);
4434 
4435     _timer.stop();
4436     log_trace(gc, task)("Finished unhandled CLD scanning work in %dth thread: %3.3f sec", worker_id, _timer.seconds());
4437   }
4438 
4439   // We might have added oops to ClassLoaderData::_handles during the
4440   // concurrent marking phase. These oops do not always point to newly allocated objects
4441   // that are guaranteed to be kept alive.  Hence,
4442   // we do have to revisit the _handles block during the remark phase.
4443 
4444   // ---------- dirty CLD scanning ----------
4445   if (worker_id == 0) { // Single threaded at the moment.
4446     _timer.reset();
4447     _timer.start();
4448 
4449     // Scan all classes that was dirtied during the concurrent marking phase.
4450     RemarkCLDClosure remark_closure(&par_mrias_cl);
4451     ClassLoaderDataGraph::cld_do(&remark_closure);
4452 
4453     _timer.stop();
4454     log_trace(gc, task)("Finished dirty CLD scanning work in %dth thread: %3.3f sec", worker_id, _timer.seconds());
4455   }
4456 
4457 
4458   // ---------- rescan dirty cards ------------
4459   _timer.reset();
4460   _timer.start();
4461 
4462   // Do the rescan tasks for each of the two spaces
4463   // (cms_space) in turn.
4464   // "worker_id" is passed to select the task_queue for "worker_id"
4465   do_dirty_card_rescan_tasks(_cms_space, worker_id, &par_mrias_cl);
4466   _timer.stop();
4467   log_trace(gc, task)("Finished dirty card rescan work in %dth thread: %3.3f sec", worker_id, _timer.seconds());
4468 
4469   // ---------- steal work from other threads ...
4470   // ---------- ... and drain overflow list.
4471   _timer.reset();


4968     for (int i = 0; i < array->length(); i++) {
4969       Devirtualizer::do_cld(&mrias_cl, array->at(i));
4970     }
4971 
4972     // We don't need to keep track of new CLDs anymore.
4973     ClassLoaderDataGraph::remember_new_clds(false);
4974 
4975     verify_work_stacks_empty();
4976   }
4977 
4978   // We might have added oops to ClassLoaderData::_handles during the
4979   // concurrent marking phase. These oops do not point to newly allocated objects
4980   // that are guaranteed to be kept alive.  Hence,
4981   // we do have to revisit the _handles block during the remark phase.
4982   {
4983     GCTraceTime(Trace, gc, phases) t("Dirty CLD Scan", _gc_timer_cm);
4984 
4985     verify_work_stacks_empty();
4986 
4987     RemarkCLDClosure remark_closure(&mrias_cl);
4988     ClassLoaderDataGraph::cld_do(&remark_closure);
4989 
4990     verify_work_stacks_empty();
4991   }
4992 
4993   verify_work_stacks_empty();
4994   // Restore evacuated mark words, if any, used for overflow list links
4995   restore_preserved_marks_if_any();
4996 
4997   verify_overflow_empty();
4998 }
4999 
5000 ////////////////////////////////////////////////////////
5001 // Parallel Reference Processing Task Proxy Class
5002 ////////////////////////////////////////////////////////
5003 class AbstractGangTaskWOopQueues : public AbstractGangTask {
5004   OopTaskQueueSet*       _queues;
5005   ParallelTaskTerminator _terminator;
5006  public:
5007   AbstractGangTaskWOopQueues(const char* name, OopTaskQueueSet* queues, uint n_threads) :
5008     AbstractGangTask(name), _queues(queues), _terminator(n_threads, _queues) {}




2428                            &cld_closure);
2429   }
2430 
2431   // Now mark from the roots
2432   MarkFromRootsVerifyClosure markFromRootsClosure(this, _span,
2433     verification_mark_bm(), markBitMap(), verification_mark_stack());
2434   assert(_restart_addr == NULL, "Expected pre-condition");
2435   verification_mark_bm()->iterate(&markFromRootsClosure);
2436   while (_restart_addr != NULL) {
2437     // Deal with stack overflow: by restarting at the indicated
2438     // address.
2439     HeapWord* ra = _restart_addr;
2440     markFromRootsClosure.reset(ra);
2441     _restart_addr = NULL;
2442     verification_mark_bm()->iterate(&markFromRootsClosure, ra, _span.end());
2443   }
2444   assert(verification_mark_stack()->isEmpty(), "Should have been drained");
2445   verify_work_stacks_empty();
2446 
2447   VerifyCLDOopsCLDClosure verify_cld_oops(verification_mark_bm());
2448   ClassLoaderDataGraph::cld_oops_do(&verify_cld_oops);
2449 
2450   // Marking completed -- now verify that each bit marked in
2451   // verification_mark_bm() is also marked in markBitMap(); flag all
2452   // errors by printing corresponding objects.
2453   VerifyMarkedClosure vcl(markBitMap());
2454   verification_mark_bm()->iterate(&vcl);
2455   assert(!vcl.failed(), "Else verification above should not have succeeded");
2456 }
2457 
2458 void ConcurrentMarkSweepGeneration::save_marks() {
2459   // delegate to CMS space
2460   cmsSpace()->save_marks();
2461 }
2462 
2463 bool ConcurrentMarkSweepGeneration::no_allocs_since_save_marks() {
2464   return cmsSpace()->no_allocs_since_save_marks();
2465 }
2466 
2467 void
2468 ConcurrentMarkSweepGeneration::oop_iterate(OopIterateClosure* cl) {


4067     if (cld->has_accumulated_modified_oops()) {
4068       cld->clear_accumulated_modified_oops();
4069 
4070       _cm_closure->do_cld(cld);
4071     }
4072   }
4073 };
4074 
4075 // The freelist lock is needed to prevent asserts, is it really needed?
4076 void CMSCollector::preclean_cld(MarkRefsIntoAndScanClosure* cl, Mutex* freelistLock) {
4077   // Needed to walk CLDG
4078   MutexLocker ml(ClassLoaderDataGraph_lock);
4079 
4080   cl->set_freelistLock(freelistLock);
4081 
4082   CMSTokenSyncWithLocks ts(true, freelistLock, bitMapLock());
4083 
4084   // SSS: Add equivalent to ScanMarkedObjectsAgainCarefullyClosure::do_yield_check and should_abort_preclean?
4085   // SSS: We should probably check if precleaning should be aborted, at suitable intervals?
4086   PrecleanCLDClosure preclean_closure(cl);
4087   ClassLoaderDataGraph::cld_oops_do(&preclean_closure);
4088 
4089   verify_work_stacks_empty();
4090   verify_overflow_empty();
4091 }
4092 
4093 void CMSCollector::checkpointRootsFinal() {
4094   assert(_collectorState == FinalMarking, "incorrect state transition?");
4095   check_correct_thread_executing();
4096   // world is stopped at this checkpoint
4097   assert(SafepointSynchronize::is_at_safepoint(),
4098          "world should be stopped");
4099   TraceCMSMemoryManagerStats tms(_collectorState, CMSHeap::heap()->gc_cause());
4100 
4101   verify_work_stacks_empty();
4102   verify_overflow_empty();
4103 
4104   log_debug(gc)("YG occupancy: " SIZE_FORMAT " K (" SIZE_FORMAT " K)",
4105                 _young_gen->used() / K, _young_gen->capacity() / K);
4106   {
4107     if (CMSScavengeBeforeRemark) {


4431 
4432     // We don't need to keep track of new CLDs anymore.
4433     ClassLoaderDataGraph::remember_new_clds(false);
4434 
4435     _timer.stop();
4436     log_trace(gc, task)("Finished unhandled CLD scanning work in %dth thread: %3.3f sec", worker_id, _timer.seconds());
4437   }
4438 
4439   // We might have added oops to ClassLoaderData::_handles during the
4440   // concurrent marking phase. These oops do not always point to newly allocated objects
4441   // that are guaranteed to be kept alive.  Hence,
4442   // we do have to revisit the _handles block during the remark phase.
4443 
4444   // ---------- dirty CLD scanning ----------
4445   if (worker_id == 0) { // Single threaded at the moment.
4446     _timer.reset();
4447     _timer.start();
4448 
4449     // Scan all classes that was dirtied during the concurrent marking phase.
4450     RemarkCLDClosure remark_closure(&par_mrias_cl);
4451     ClassLoaderDataGraph::cld_oops_do(&remark_closure);
4452 
4453     _timer.stop();
4454     log_trace(gc, task)("Finished dirty CLD scanning work in %dth thread: %3.3f sec", worker_id, _timer.seconds());
4455   }
4456 
4457 
4458   // ---------- rescan dirty cards ------------
4459   _timer.reset();
4460   _timer.start();
4461 
4462   // Do the rescan tasks for each of the two spaces
4463   // (cms_space) in turn.
4464   // "worker_id" is passed to select the task_queue for "worker_id"
4465   do_dirty_card_rescan_tasks(_cms_space, worker_id, &par_mrias_cl);
4466   _timer.stop();
4467   log_trace(gc, task)("Finished dirty card rescan work in %dth thread: %3.3f sec", worker_id, _timer.seconds());
4468 
4469   // ---------- steal work from other threads ...
4470   // ---------- ... and drain overflow list.
4471   _timer.reset();


4968     for (int i = 0; i < array->length(); i++) {
4969       Devirtualizer::do_cld(&mrias_cl, array->at(i));
4970     }
4971 
4972     // We don't need to keep track of new CLDs anymore.
4973     ClassLoaderDataGraph::remember_new_clds(false);
4974 
4975     verify_work_stacks_empty();
4976   }
4977 
4978   // We might have added oops to ClassLoaderData::_handles during the
4979   // concurrent marking phase. These oops do not point to newly allocated objects
4980   // that are guaranteed to be kept alive.  Hence,
4981   // we do have to revisit the _handles block during the remark phase.
4982   {
4983     GCTraceTime(Trace, gc, phases) t("Dirty CLD Scan", _gc_timer_cm);
4984 
4985     verify_work_stacks_empty();
4986 
4987     RemarkCLDClosure remark_closure(&mrias_cl);
4988     ClassLoaderDataGraph::cld_oops_do(&remark_closure);
4989 
4990     verify_work_stacks_empty();
4991   }
4992 
4993   verify_work_stacks_empty();
4994   // Restore evacuated mark words, if any, used for overflow list links
4995   restore_preserved_marks_if_any();
4996 
4997   verify_overflow_empty();
4998 }
4999 
5000 ////////////////////////////////////////////////////////
5001 // Parallel Reference Processing Task Proxy Class
5002 ////////////////////////////////////////////////////////
5003 class AbstractGangTaskWOopQueues : public AbstractGangTask {
5004   OopTaskQueueSet*       _queues;
5005   ParallelTaskTerminator _terminator;
5006  public:
5007   AbstractGangTaskWOopQueues(const char* name, OopTaskQueueSet* queues, uint n_threads) :
5008     AbstractGangTask(name), _queues(queues), _terminator(n_threads, _queues) {}


< prev index next >