489 _survivor_plab_array(NULL), // -- ditto --
490 _survivor_chunk_array(NULL), // -- ditto --
491 _survivor_chunk_capacity(0), // -- ditto --
492 _survivor_chunk_index(0), // -- ditto --
493 _ser_pmc_preclean_ovflw(0),
494 _ser_kac_preclean_ovflw(0),
495 _ser_pmc_remark_ovflw(0),
496 _par_pmc_remark_ovflw(0),
497 _ser_kac_ovflw(0),
498 _par_kac_ovflw(0),
499 #ifndef PRODUCT
500 _num_par_pushes(0),
501 #endif
502 _collection_count_start(0),
503 _verifying(false),
504 _verification_mark_bm(0, Mutex::leaf + 1, "CMS_verification_mark_bm_lock"),
505 _completed_initialization(false),
506 _collector_policy(cp),
507 _should_unload_classes(CMSClassUnloadingEnabled),
508 _concurrent_cycles_since_last_unload(0),
509 _roots_scanning_options(SharedHeap::SO_None),
510 _inter_sweep_estimate(CMS_SweepWeight, CMS_SweepPadding),
511 _intra_sweep_estimate(CMS_SweepWeight, CMS_SweepPadding),
512 _gc_tracer_cm(new (ResourceObj::C_HEAP, mtGC) CMSTracer()),
513 _gc_timer_cm(new (ResourceObj::C_HEAP, mtGC) ConcurrentGCTimer()),
514 _cms_start_registered(false)
515 {
516 if (ExplicitGCInvokesConcurrentAndUnloadsClasses) {
517 ExplicitGCInvokesConcurrent = true;
518 }
519 // Now expand the span and allocate the collection support structures
520 // (MUT, marking bit map etc.) to cover both generations subject to
521 // collection.
522
523 // For use by dirty card to oop closures.
524 _cmsGen->cmsSpace()->set_collector(this);
525
526 // Allocate MUT and marking bit map
527 {
528 MutexLockerEx x(_markBitMap.lock(), Mutex::_no_safepoint_check_flag);
529 if (!_markBitMap.allocate(_span)) {
2479 }
2480 if (!silent) gclog_or_tty->print(" done] ");
2481 return true;
2482 }
2483
2484 void CMSCollector::verify_after_remark_work_1() {
2485 ResourceMark rm;
2486 HandleMark hm;
2487 GenCollectedHeap* gch = GenCollectedHeap::heap();
2488
2489 // Get a clear set of claim bits for the roots processing to work with.
2490 ClassLoaderDataGraph::clear_claimed_marks();
2491
2492 // Mark from roots one level into CMS
2493 MarkRefsIntoClosure notOlder(_span, verification_mark_bm());
2494 gch->rem_set()->prepare_for_younger_refs_iterate(false); // Not parallel.
2495
2496 gch->gen_process_roots(_cmsGen->level(),
2497 true, // younger gens are roots
2498 true, // activate StrongRootsScope
2499 SharedHeap::ScanningOption(roots_scanning_options()),
2500 should_unload_classes(),
2501 ¬Older,
2502 NULL,
2503 NULL); // SSS: Provide correct closure
2504
2505 // Now mark from the roots
2506 MarkFromRootsClosure markFromRootsClosure(this, _span,
2507 verification_mark_bm(), verification_mark_stack(),
2508 false /* don't yield */, true /* verifying */);
2509 assert(_restart_addr == NULL, "Expected pre-condition");
2510 verification_mark_bm()->iterate(&markFromRootsClosure);
2511 while (_restart_addr != NULL) {
2512 // Deal with stack overflow: by restarting at the indicated
2513 // address.
2514 HeapWord* ra = _restart_addr;
2515 markFromRootsClosure.reset(ra);
2516 _restart_addr = NULL;
2517 verification_mark_bm()->iterate(&markFromRootsClosure, ra, _span.end());
2518 }
2519 assert(verification_mark_stack()->isEmpty(), "Should have been drained");
2547 };
2548
2549 void CMSCollector::verify_after_remark_work_2() {
2550 ResourceMark rm;
2551 HandleMark hm;
2552 GenCollectedHeap* gch = GenCollectedHeap::heap();
2553
2554 // Get a clear set of claim bits for the roots processing to work with.
2555 ClassLoaderDataGraph::clear_claimed_marks();
2556
2557 // Mark from roots one level into CMS
2558 MarkRefsIntoVerifyClosure notOlder(_span, verification_mark_bm(),
2559 markBitMap());
2560 CLDToOopClosure cld_closure(¬Older, true);
2561
2562 gch->rem_set()->prepare_for_younger_refs_iterate(false); // Not parallel.
2563
2564 gch->gen_process_roots(_cmsGen->level(),
2565 true, // younger gens are roots
2566 true, // activate StrongRootsScope
2567 SharedHeap::ScanningOption(roots_scanning_options()),
2568 should_unload_classes(),
2569 ¬Older,
2570 NULL,
2571 &cld_closure);
2572
2573 // Now mark from the roots
2574 MarkFromRootsVerifyClosure markFromRootsClosure(this, _span,
2575 verification_mark_bm(), markBitMap(), verification_mark_stack());
2576 assert(_restart_addr == NULL, "Expected pre-condition");
2577 verification_mark_bm()->iterate(&markFromRootsClosure);
2578 while (_restart_addr != NULL) {
2579 // Deal with stack overflow: by restarting at the indicated
2580 // address.
2581 HeapWord* ra = _restart_addr;
2582 markFromRootsClosure.reset(ra);
2583 _restart_addr = NULL;
2584 verification_mark_bm()->iterate(&markFromRootsClosure, ra, _span.end());
2585 }
2586 assert(verification_mark_stack()->isEmpty(), "Should have been drained");
2587 verify_work_stacks_empty();
2731 // Condition 1 above
2732 if (_full_gc_requested && ExplicitGCInvokesConcurrentAndUnloadsClasses) {
2733 _should_unload_classes = true;
2734 } else if (CMSClassUnloadingEnabled) { // Condition 2.a above
2735 // Disjuncts 2.b.(i,ii,iii) above
2736 _should_unload_classes = (concurrent_cycles_since_last_unload() >=
2737 CMSClassUnloadingMaxInterval)
2738 || _cmsGen->is_too_full();
2739 }
2740 }
2741
2742 bool ConcurrentMarkSweepGeneration::is_too_full() const {
2743 bool res = should_concurrent_collect();
2744 res = res && (occupancy() > (double)CMSIsTooFullPercentage/100.0);
2745 return res;
2746 }
2747
2748 void CMSCollector::setup_cms_unloading_and_verification_state() {
2749 const bool should_verify = VerifyBeforeGC || VerifyAfterGC || VerifyDuringGC
2750 || VerifyBeforeExit;
2751 const int rso = SharedHeap::SO_AllCodeCache;
2752
2753 // We set the proper root for this CMS cycle here.
2754 if (should_unload_classes()) { // Should unload classes this cycle
2755 remove_root_scanning_option(rso); // Shrink the root set appropriately
2756 set_verifying(should_verify); // Set verification state for this cycle
2757 return; // Nothing else needs to be done at this time
2758 }
2759
2760 // Not unloading classes this cycle
2761 assert(!should_unload_classes(), "Inconsistency!");
2762
2763 if ((!verifying() || unloaded_classes_last_cycle()) && should_verify) {
2764 // Include symbols, strings and code cache elements to prevent their resurrection.
2765 add_root_scanning_option(rso);
2766 set_verifying(true);
2767 } else if (verifying() && !should_verify) {
2768 // We were verifying, but some verification flags got disabled.
2769 set_verifying(false);
2770 // Exclude symbols, strings and code cache elements from root scanning to
2771 // reduce IM and RM pauses.
3070 assert(workers != NULL, "Need parallel worker threads.");
3071 int n_workers = workers->active_workers();
3072 CMSParInitialMarkTask tsk(this, n_workers);
3073 gch->set_par_threads(n_workers);
3074 initialize_sequential_subtasks_for_young_gen_rescan(n_workers);
3075 if (n_workers > 1) {
3076 GenCollectedHeap::StrongRootsScope srs(gch);
3077 workers->run_task(&tsk);
3078 } else {
3079 GenCollectedHeap::StrongRootsScope srs(gch);
3080 tsk.work(0);
3081 }
3082 gch->set_par_threads(0);
3083 } else {
3084 // The serial version.
3085 CLDToOopClosure cld_closure(¬Older, true);
3086 gch->rem_set()->prepare_for_younger_refs_iterate(false); // Not parallel.
3087 gch->gen_process_roots(_cmsGen->level(),
3088 true, // younger gens are roots
3089 true, // activate StrongRootsScope
3090 SharedHeap::ScanningOption(roots_scanning_options()),
3091 should_unload_classes(),
3092 ¬Older,
3093 NULL,
3094 &cld_closure);
3095 }
3096 }
3097
3098 // Clear mod-union table; it will be dirtied in the prologue of
3099 // CMS generation per each younger generation collection.
3100
3101 assert(_modUnionTable.isAllClear(),
3102 "Was cleared in most recent final checkpoint phase"
3103 " or no bits are set in the gc_prologue before the start of the next "
3104 "subsequent marking phase.");
3105
3106 assert(_ct->klass_rem_set()->mod_union_is_clear(), "Must be");
3107
3108 // Save the end of the used_region of the constituent generations
3109 // to be used to limit the extent of sweep in each generation.
3110 save_sweep_limits();
4504 // ---------- young gen roots --------------
4505 {
4506 work_on_young_gen_roots(worker_id, &par_mri_cl);
4507 _timer.stop();
4508 if (PrintCMSStatistics != 0) {
4509 gclog_or_tty->print_cr(
4510 "Finished young gen initial mark scan work in %dth thread: %3.3f sec",
4511 worker_id, _timer.seconds());
4512 }
4513 }
4514
4515 // ---------- remaining roots --------------
4516 _timer.reset();
4517 _timer.start();
4518
4519 CLDToOopClosure cld_closure(&par_mri_cl, true);
4520
4521 gch->gen_process_roots(_collector->_cmsGen->level(),
4522 false, // yg was scanned above
4523 false, // this is parallel code
4524 SharedHeap::ScanningOption(_collector->CMSCollector::roots_scanning_options()),
4525 _collector->should_unload_classes(),
4526 &par_mri_cl,
4527 NULL,
4528 &cld_closure);
4529 assert(_collector->should_unload_classes()
4530 || (_collector->CMSCollector::roots_scanning_options() & SharedHeap::SO_AllCodeCache),
4531 "if we didn't scan the code cache, we have to be ready to drop nmethods with expired weak oops");
4532 _timer.stop();
4533 if (PrintCMSStatistics != 0) {
4534 gclog_or_tty->print_cr(
4535 "Finished remaining root initial mark scan work in %dth thread: %3.3f sec",
4536 worker_id, _timer.seconds());
4537 }
4538 }
4539
4540 // Parallel remark task
4541 class CMSParRemarkTask: public CMSParMarkTask {
4542 CompactibleFreeListSpace* _cms_space;
4543
4544 // The per-thread work queues, available here for stealing.
4545 OopTaskQueueSet* _task_queues;
4546 ParallelTaskTerminator _term;
4547
4548 public:
4549 // A value of 0 passed to n_workers will cause the number of
4550 // workers to be taken from the active workers in the work gang.
4640 // coarsely partitioned and may, on that account, constitute
4641 // the critical path; thus, it's best to start off that
4642 // work first.
4643 // ---------- young gen roots --------------
4644 {
4645 work_on_young_gen_roots(worker_id, &par_mrias_cl);
4646 _timer.stop();
4647 if (PrintCMSStatistics != 0) {
4648 gclog_or_tty->print_cr(
4649 "Finished young gen rescan work in %dth thread: %3.3f sec",
4650 worker_id, _timer.seconds());
4651 }
4652 }
4653
4654 // ---------- remaining roots --------------
4655 _timer.reset();
4656 _timer.start();
4657 gch->gen_process_roots(_collector->_cmsGen->level(),
4658 false, // yg was scanned above
4659 false, // this is parallel code
4660 SharedHeap::ScanningOption(_collector->CMSCollector::roots_scanning_options()),
4661 _collector->should_unload_classes(),
4662 &par_mrias_cl,
4663 NULL,
4664 NULL); // The dirty klasses will be handled below
4665
4666 assert(_collector->should_unload_classes()
4667 || (_collector->CMSCollector::roots_scanning_options() & SharedHeap::SO_AllCodeCache),
4668 "if we didn't scan the code cache, we have to be ready to drop nmethods with expired weak oops");
4669 _timer.stop();
4670 if (PrintCMSStatistics != 0) {
4671 gclog_or_tty->print_cr(
4672 "Finished remaining root rescan work in %dth thread: %3.3f sec",
4673 worker_id, _timer.seconds());
4674 }
4675
4676 // ---------- unhandled CLD scanning ----------
4677 if (worker_id == 0) { // Single threaded at the moment.
4678 _timer.reset();
4679 _timer.start();
4680
4681 // Scan all new class loader data objects and new dependencies that were
4682 // introduced during concurrent marking.
4683 ResourceMark rm;
4684 GrowableArray<ClassLoaderData*>* array = ClassLoaderDataGraph::new_clds();
4685 for (int i = 0; i < array->length(); i++) {
4686 par_mrias_cl.do_class_loader_data(array->at(i));
4687 }
5231 markFromDirtyCardsClosure.num_dirty_cards());
5232 }
5233 }
5234 }
5235 if (VerifyDuringGC &&
5236 GenCollectedHeap::heap()->total_collections() >= VerifyGCStartAt) {
5237 HandleMark hm; // Discard invalid handles created during verification
5238 Universe::verify();
5239 }
5240 {
5241 GCTraceTime t("root rescan", PrintGCDetails, false, _gc_timer_cm, _gc_tracer_cm->gc_id());
5242
5243 verify_work_stacks_empty();
5244
5245 gch->rem_set()->prepare_for_younger_refs_iterate(false); // Not parallel.
5246 GenCollectedHeap::StrongRootsScope srs(gch);
5247
5248 gch->gen_process_roots(_cmsGen->level(),
5249 true, // younger gens as roots
5250 false, // use the local StrongRootsScope
5251 SharedHeap::ScanningOption(roots_scanning_options()),
5252 should_unload_classes(),
5253 &mrias_cl,
5254 NULL,
5255 NULL); // The dirty klasses will be handled below
5256
5257 assert(should_unload_classes()
5258 || (roots_scanning_options() & SharedHeap::SO_AllCodeCache),
5259 "if we didn't scan the code cache, we have to be ready to drop nmethods with expired weak oops");
5260 }
5261
5262 {
5263 GCTraceTime t("visit unhandled CLDs", PrintGCDetails, false, _gc_timer_cm, _gc_tracer_cm->gc_id());
5264
5265 verify_work_stacks_empty();
5266
5267 // Scan all class loader data objects that might have been introduced
5268 // during concurrent marking.
5269 ResourceMark rm;
5270 GrowableArray<ClassLoaderData*>* array = ClassLoaderDataGraph::new_clds();
5271 for (int i = 0; i < array->length(); i++) {
5272 mrias_cl.do_class_loader_data(array->at(i));
5273 }
5274
5275 // We don't need to keep track of new CLDs anymore.
5276 ClassLoaderDataGraph::remember_new_clds(false);
5277
5278 verify_work_stacks_empty();
|
489 _survivor_plab_array(NULL), // -- ditto --
490 _survivor_chunk_array(NULL), // -- ditto --
491 _survivor_chunk_capacity(0), // -- ditto --
492 _survivor_chunk_index(0), // -- ditto --
493 _ser_pmc_preclean_ovflw(0),
494 _ser_kac_preclean_ovflw(0),
495 _ser_pmc_remark_ovflw(0),
496 _par_pmc_remark_ovflw(0),
497 _ser_kac_ovflw(0),
498 _par_kac_ovflw(0),
499 #ifndef PRODUCT
500 _num_par_pushes(0),
501 #endif
502 _collection_count_start(0),
503 _verifying(false),
504 _verification_mark_bm(0, Mutex::leaf + 1, "CMS_verification_mark_bm_lock"),
505 _completed_initialization(false),
506 _collector_policy(cp),
507 _should_unload_classes(CMSClassUnloadingEnabled),
508 _concurrent_cycles_since_last_unload(0),
509 _roots_scanning_options(GenCollectedHeap::SO_None),
510 _inter_sweep_estimate(CMS_SweepWeight, CMS_SweepPadding),
511 _intra_sweep_estimate(CMS_SweepWeight, CMS_SweepPadding),
512 _gc_tracer_cm(new (ResourceObj::C_HEAP, mtGC) CMSTracer()),
513 _gc_timer_cm(new (ResourceObj::C_HEAP, mtGC) ConcurrentGCTimer()),
514 _cms_start_registered(false)
515 {
516 if (ExplicitGCInvokesConcurrentAndUnloadsClasses) {
517 ExplicitGCInvokesConcurrent = true;
518 }
519 // Now expand the span and allocate the collection support structures
520 // (MUT, marking bit map etc.) to cover both generations subject to
521 // collection.
522
523 // For use by dirty card to oop closures.
524 _cmsGen->cmsSpace()->set_collector(this);
525
526 // Allocate MUT and marking bit map
527 {
528 MutexLockerEx x(_markBitMap.lock(), Mutex::_no_safepoint_check_flag);
529 if (!_markBitMap.allocate(_span)) {
2479 }
2480 if (!silent) gclog_or_tty->print(" done] ");
2481 return true;
2482 }
2483
2484 void CMSCollector::verify_after_remark_work_1() {
2485 ResourceMark rm;
2486 HandleMark hm;
2487 GenCollectedHeap* gch = GenCollectedHeap::heap();
2488
2489 // Get a clear set of claim bits for the roots processing to work with.
2490 ClassLoaderDataGraph::clear_claimed_marks();
2491
2492 // Mark from roots one level into CMS
2493 MarkRefsIntoClosure notOlder(_span, verification_mark_bm());
2494 gch->rem_set()->prepare_for_younger_refs_iterate(false); // Not parallel.
2495
2496 gch->gen_process_roots(_cmsGen->level(),
2497 true, // younger gens are roots
2498 true, // activate StrongRootsScope
2499 GenCollectedHeap::ScanningOption(roots_scanning_options()),
2500 should_unload_classes(),
2501 ¬Older,
2502 NULL,
2503 NULL); // SSS: Provide correct closure
2504
2505 // Now mark from the roots
2506 MarkFromRootsClosure markFromRootsClosure(this, _span,
2507 verification_mark_bm(), verification_mark_stack(),
2508 false /* don't yield */, true /* verifying */);
2509 assert(_restart_addr == NULL, "Expected pre-condition");
2510 verification_mark_bm()->iterate(&markFromRootsClosure);
2511 while (_restart_addr != NULL) {
2512 // Deal with stack overflow: by restarting at the indicated
2513 // address.
2514 HeapWord* ra = _restart_addr;
2515 markFromRootsClosure.reset(ra);
2516 _restart_addr = NULL;
2517 verification_mark_bm()->iterate(&markFromRootsClosure, ra, _span.end());
2518 }
2519 assert(verification_mark_stack()->isEmpty(), "Should have been drained");
2547 };
2548
2549 void CMSCollector::verify_after_remark_work_2() {
2550 ResourceMark rm;
2551 HandleMark hm;
2552 GenCollectedHeap* gch = GenCollectedHeap::heap();
2553
2554 // Get a clear set of claim bits for the roots processing to work with.
2555 ClassLoaderDataGraph::clear_claimed_marks();
2556
2557 // Mark from roots one level into CMS
2558 MarkRefsIntoVerifyClosure notOlder(_span, verification_mark_bm(),
2559 markBitMap());
2560 CLDToOopClosure cld_closure(¬Older, true);
2561
2562 gch->rem_set()->prepare_for_younger_refs_iterate(false); // Not parallel.
2563
2564 gch->gen_process_roots(_cmsGen->level(),
2565 true, // younger gens are roots
2566 true, // activate StrongRootsScope
2567 GenCollectedHeap::ScanningOption(roots_scanning_options()),
2568 should_unload_classes(),
2569 ¬Older,
2570 NULL,
2571 &cld_closure);
2572
2573 // Now mark from the roots
2574 MarkFromRootsVerifyClosure markFromRootsClosure(this, _span,
2575 verification_mark_bm(), markBitMap(), verification_mark_stack());
2576 assert(_restart_addr == NULL, "Expected pre-condition");
2577 verification_mark_bm()->iterate(&markFromRootsClosure);
2578 while (_restart_addr != NULL) {
2579 // Deal with stack overflow: by restarting at the indicated
2580 // address.
2581 HeapWord* ra = _restart_addr;
2582 markFromRootsClosure.reset(ra);
2583 _restart_addr = NULL;
2584 verification_mark_bm()->iterate(&markFromRootsClosure, ra, _span.end());
2585 }
2586 assert(verification_mark_stack()->isEmpty(), "Should have been drained");
2587 verify_work_stacks_empty();
2731 // Condition 1 above
2732 if (_full_gc_requested && ExplicitGCInvokesConcurrentAndUnloadsClasses) {
2733 _should_unload_classes = true;
2734 } else if (CMSClassUnloadingEnabled) { // Condition 2.a above
2735 // Disjuncts 2.b.(i,ii,iii) above
2736 _should_unload_classes = (concurrent_cycles_since_last_unload() >=
2737 CMSClassUnloadingMaxInterval)
2738 || _cmsGen->is_too_full();
2739 }
2740 }
2741
2742 bool ConcurrentMarkSweepGeneration::is_too_full() const {
2743 bool res = should_concurrent_collect();
2744 res = res && (occupancy() > (double)CMSIsTooFullPercentage/100.0);
2745 return res;
2746 }
2747
2748 void CMSCollector::setup_cms_unloading_and_verification_state() {
2749 const bool should_verify = VerifyBeforeGC || VerifyAfterGC || VerifyDuringGC
2750 || VerifyBeforeExit;
2751 const int rso = GenCollectedHeap::SO_AllCodeCache;
2752
2753 // We set the proper root for this CMS cycle here.
2754 if (should_unload_classes()) { // Should unload classes this cycle
2755 remove_root_scanning_option(rso); // Shrink the root set appropriately
2756 set_verifying(should_verify); // Set verification state for this cycle
2757 return; // Nothing else needs to be done at this time
2758 }
2759
2760 // Not unloading classes this cycle
2761 assert(!should_unload_classes(), "Inconsistency!");
2762
2763 if ((!verifying() || unloaded_classes_last_cycle()) && should_verify) {
2764 // Include symbols, strings and code cache elements to prevent their resurrection.
2765 add_root_scanning_option(rso);
2766 set_verifying(true);
2767 } else if (verifying() && !should_verify) {
2768 // We were verifying, but some verification flags got disabled.
2769 set_verifying(false);
2770 // Exclude symbols, strings and code cache elements from root scanning to
2771 // reduce IM and RM pauses.
3070 assert(workers != NULL, "Need parallel worker threads.");
3071 int n_workers = workers->active_workers();
3072 CMSParInitialMarkTask tsk(this, n_workers);
3073 gch->set_par_threads(n_workers);
3074 initialize_sequential_subtasks_for_young_gen_rescan(n_workers);
3075 if (n_workers > 1) {
3076 GenCollectedHeap::StrongRootsScope srs(gch);
3077 workers->run_task(&tsk);
3078 } else {
3079 GenCollectedHeap::StrongRootsScope srs(gch);
3080 tsk.work(0);
3081 }
3082 gch->set_par_threads(0);
3083 } else {
3084 // The serial version.
3085 CLDToOopClosure cld_closure(¬Older, true);
3086 gch->rem_set()->prepare_for_younger_refs_iterate(false); // Not parallel.
3087 gch->gen_process_roots(_cmsGen->level(),
3088 true, // younger gens are roots
3089 true, // activate StrongRootsScope
3090 GenCollectedHeap::ScanningOption(roots_scanning_options()),
3091 should_unload_classes(),
3092 ¬Older,
3093 NULL,
3094 &cld_closure);
3095 }
3096 }
3097
3098 // Clear mod-union table; it will be dirtied in the prologue of
3099 // CMS generation per each younger generation collection.
3100
3101 assert(_modUnionTable.isAllClear(),
3102 "Was cleared in most recent final checkpoint phase"
3103 " or no bits are set in the gc_prologue before the start of the next "
3104 "subsequent marking phase.");
3105
3106 assert(_ct->klass_rem_set()->mod_union_is_clear(), "Must be");
3107
3108 // Save the end of the used_region of the constituent generations
3109 // to be used to limit the extent of sweep in each generation.
3110 save_sweep_limits();
4504 // ---------- young gen roots --------------
4505 {
4506 work_on_young_gen_roots(worker_id, &par_mri_cl);
4507 _timer.stop();
4508 if (PrintCMSStatistics != 0) {
4509 gclog_or_tty->print_cr(
4510 "Finished young gen initial mark scan work in %dth thread: %3.3f sec",
4511 worker_id, _timer.seconds());
4512 }
4513 }
4514
4515 // ---------- remaining roots --------------
4516 _timer.reset();
4517 _timer.start();
4518
4519 CLDToOopClosure cld_closure(&par_mri_cl, true);
4520
4521 gch->gen_process_roots(_collector->_cmsGen->level(),
4522 false, // yg was scanned above
4523 false, // this is parallel code
4524 GenCollectedHeap::ScanningOption(_collector->CMSCollector::roots_scanning_options()),
4525 _collector->should_unload_classes(),
4526 &par_mri_cl,
4527 NULL,
4528 &cld_closure);
4529 assert(_collector->should_unload_classes()
4530 || (_collector->CMSCollector::roots_scanning_options() & GenCollectedHeap::SO_AllCodeCache),
4531 "if we didn't scan the code cache, we have to be ready to drop nmethods with expired weak oops");
4532 _timer.stop();
4533 if (PrintCMSStatistics != 0) {
4534 gclog_or_tty->print_cr(
4535 "Finished remaining root initial mark scan work in %dth thread: %3.3f sec",
4536 worker_id, _timer.seconds());
4537 }
4538 }
4539
4540 // Parallel remark task
4541 class CMSParRemarkTask: public CMSParMarkTask {
4542 CompactibleFreeListSpace* _cms_space;
4543
4544 // The per-thread work queues, available here for stealing.
4545 OopTaskQueueSet* _task_queues;
4546 ParallelTaskTerminator _term;
4547
4548 public:
4549 // A value of 0 passed to n_workers will cause the number of
4550 // workers to be taken from the active workers in the work gang.
4640 // coarsely partitioned and may, on that account, constitute
4641 // the critical path; thus, it's best to start off that
4642 // work first.
4643 // ---------- young gen roots --------------
4644 {
4645 work_on_young_gen_roots(worker_id, &par_mrias_cl);
4646 _timer.stop();
4647 if (PrintCMSStatistics != 0) {
4648 gclog_or_tty->print_cr(
4649 "Finished young gen rescan work in %dth thread: %3.3f sec",
4650 worker_id, _timer.seconds());
4651 }
4652 }
4653
4654 // ---------- remaining roots --------------
4655 _timer.reset();
4656 _timer.start();
4657 gch->gen_process_roots(_collector->_cmsGen->level(),
4658 false, // yg was scanned above
4659 false, // this is parallel code
4660 GenCollectedHeap::ScanningOption(_collector->CMSCollector::roots_scanning_options()),
4661 _collector->should_unload_classes(),
4662 &par_mrias_cl,
4663 NULL,
4664 NULL); // The dirty klasses will be handled below
4665
4666 assert(_collector->should_unload_classes()
4667 || (_collector->CMSCollector::roots_scanning_options() & GenCollectedHeap::SO_AllCodeCache),
4668 "if we didn't scan the code cache, we have to be ready to drop nmethods with expired weak oops");
4669 _timer.stop();
4670 if (PrintCMSStatistics != 0) {
4671 gclog_or_tty->print_cr(
4672 "Finished remaining root rescan work in %dth thread: %3.3f sec",
4673 worker_id, _timer.seconds());
4674 }
4675
4676 // ---------- unhandled CLD scanning ----------
4677 if (worker_id == 0) { // Single threaded at the moment.
4678 _timer.reset();
4679 _timer.start();
4680
4681 // Scan all new class loader data objects and new dependencies that were
4682 // introduced during concurrent marking.
4683 ResourceMark rm;
4684 GrowableArray<ClassLoaderData*>* array = ClassLoaderDataGraph::new_clds();
4685 for (int i = 0; i < array->length(); i++) {
4686 par_mrias_cl.do_class_loader_data(array->at(i));
4687 }
5231 markFromDirtyCardsClosure.num_dirty_cards());
5232 }
5233 }
5234 }
5235 if (VerifyDuringGC &&
5236 GenCollectedHeap::heap()->total_collections() >= VerifyGCStartAt) {
5237 HandleMark hm; // Discard invalid handles created during verification
5238 Universe::verify();
5239 }
5240 {
5241 GCTraceTime t("root rescan", PrintGCDetails, false, _gc_timer_cm, _gc_tracer_cm->gc_id());
5242
5243 verify_work_stacks_empty();
5244
5245 gch->rem_set()->prepare_for_younger_refs_iterate(false); // Not parallel.
5246 GenCollectedHeap::StrongRootsScope srs(gch);
5247
5248 gch->gen_process_roots(_cmsGen->level(),
5249 true, // younger gens as roots
5250 false, // use the local StrongRootsScope
5251 GenCollectedHeap::ScanningOption(roots_scanning_options()),
5252 should_unload_classes(),
5253 &mrias_cl,
5254 NULL,
5255 NULL); // The dirty klasses will be handled below
5256
5257 assert(should_unload_classes()
5258 || (roots_scanning_options() & GenCollectedHeap::SO_AllCodeCache),
5259 "if we didn't scan the code cache, we have to be ready to drop nmethods with expired weak oops");
5260 }
5261
5262 {
5263 GCTraceTime t("visit unhandled CLDs", PrintGCDetails, false, _gc_timer_cm, _gc_tracer_cm->gc_id());
5264
5265 verify_work_stacks_empty();
5266
5267 // Scan all class loader data objects that might have been introduced
5268 // during concurrent marking.
5269 ResourceMark rm;
5270 GrowableArray<ClassLoaderData*>* array = ClassLoaderDataGraph::new_clds();
5271 for (int i = 0; i < array->length(); i++) {
5272 mrias_cl.do_class_loader_data(array->at(i));
5273 }
5274
5275 // We don't need to keep track of new CLDs anymore.
5276 ClassLoaderDataGraph::remember_new_clds(false);
5277
5278 verify_work_stacks_empty();
|