397 _markBitMap1(),
398 _markBitMap2(),
399 _parallel_marking_threads(0),
400 _max_parallel_marking_threads(0),
401 _sleep_factor(0.0),
402 _marking_task_overhead(1.0),
403 _cleanup_list("Cleanup List"),
404
405 _prevMarkBitMap(&_markBitMap1),
406 _nextMarkBitMap(&_markBitMap2),
407
408 _global_mark_stack(),
409 // _finger set in set_non_marking_state
410
411 _max_worker_id(ParallelGCThreads),
412 // _active_tasks set in set_non_marking_state
413 // _tasks set inside the constructor
414 _task_queues(new G1CMTaskQueueSet((int) _max_worker_id)),
415 _terminator(ParallelTaskTerminator((int) _max_worker_id, _task_queues)),
416
417 _has_overflown(false),
418 _concurrent(false),
419 _has_aborted(false),
420 _restart_for_overflow(false),
421 _concurrent_marking_in_progress(false),
422 _gc_timer_cm(new (ResourceObj::C_HEAP, mtGC) ConcurrentGCTimer()),
423 _gc_tracer_cm(new (ResourceObj::C_HEAP, mtGC) G1OldTracer()),
424
425 // _verbose_level set below
426
427 _init_times(),
428 _remark_times(), _remark_mark_times(), _remark_weak_ref_times(),
429 _cleanup_times(),
430 _total_counting_time(0.0),
431 _total_rs_scrub_time(0.0),
432
433 _parallel_workers(NULL),
434
435 _completed_initialization(false) {
436
437 _markBitMap1.initialize(g1h->reserved_region(), prev_bitmap_storage);
575 assert(_heap_start != NULL, "heap bounds should look ok");
576 assert(_heap_end != NULL, "heap bounds should look ok");
577 assert(_heap_start < _heap_end, "heap bounds should look ok");
578
579 // Reset all the marking data structures and any necessary flags
580 reset_marking_state();
581
582 // We do reset all of them, since different phases will use
583 // different number of active threads. So, it's easiest to have all
584 // of them ready.
585 for (uint i = 0; i < _max_worker_id; ++i) {
586 _tasks[i]->reset(_nextMarkBitMap);
587 }
588
589 // we need this to make sure that the flag is on during the evac
590 // pause with initial mark piggy-backed
591 set_concurrent_marking_in_progress();
592 }
593
594
595 void G1ConcurrentMark::reset_marking_state(bool clear_overflow) {
596 _global_mark_stack.set_should_expand(has_overflown());
597 _global_mark_stack.set_empty(); // Also clears the overflow stack's overflow flag
598 if (clear_overflow) {
599 clear_has_overflown();
600 } else {
601 assert(has_overflown(), "pre-condition");
602 }
603 _finger = _heap_start;
604
605 for (uint i = 0; i < _max_worker_id; ++i) {
606 G1CMTaskQueue* queue = _task_queues->queue(i);
607 queue->set_empty();
608 }
609 }
610
611 void G1ConcurrentMark::set_concurrency(uint active_tasks) {
612 assert(active_tasks <= _max_worker_id, "we should not have more");
613
614 _active_tasks = active_tasks;
615 // Need to update the three data structures below according to the
616 // number of active threads for this phase.
617 _terminator = ParallelTaskTerminator((int) active_tasks, _task_queues);
618 _first_overflow_barrier_sync.set_n_workers((int) active_tasks);
619 _second_overflow_barrier_sync.set_n_workers((int) active_tasks);
620 }
621
622 void G1ConcurrentMark::set_concurrency_and_phase(uint active_tasks, bool concurrent) {
866 if (barrier_aborted) {
867 // If the barrier aborted we ignore the overflow condition and
868 // just abort the whole marking phase as quickly as possible.
869 return;
870 }
871
872 // If we're executing the concurrent phase of marking, reset the marking
873 // state; otherwise the marking state is reset after reference processing,
874 // during the remark pause.
875 // If we reset here as a result of an overflow during the remark we will
876 // see assertion failures from any subsequent set_concurrency_and_phase()
877 // calls.
878 if (concurrent()) {
879 // let the task associated with with worker 0 do this
880 if (worker_id == 0) {
881 // task 0 is responsible for clearing the global data structures
882 // We should be here because of an overflow. During STW we should
883 // not clear the overflow flag since we rely on it being true when
884 // we exit this method to abort the pause and restart concurrent
885 // marking.
886 reset_marking_state(true /* clear_overflow */);
887
888 log_info(gc, marking)("Concurrent Mark reset for overflow");
889 }
890 }
891
892 // after this, each task should reset its own data structures then
893 // then go into the second barrier
894 }
895
896 void G1ConcurrentMark::enter_second_sync_barrier(uint worker_id) {
897 SuspendibleThreadSetLeaver sts_leave(concurrent());
898 _second_overflow_barrier_sync.enter();
899
900 // at this point everything should be re-initialized and ready to go
901 }
902
903 class G1CMConcurrentMarkingTask: public AbstractGangTask {
904 private:
905 G1ConcurrentMark* _cm;
906 ConcurrentMarkThread* _cmt;
1735 // the number of active workers. This is OK as long as the discovered
1736 // Reference lists are balanced (see balance_all_queues() and balance_queues()).
1737 rp->set_active_mt_degree(active_workers);
1738
1739 // Process the weak references.
1740 const ReferenceProcessorStats& stats =
1741 rp->process_discovered_references(&g1_is_alive,
1742 &g1_keep_alive,
1743 &g1_drain_mark_stack,
1744 executor,
1745 _gc_timer_cm);
1746 _gc_tracer_cm->report_gc_reference_stats(stats);
1747
1748 // The do_oop work routines of the keep_alive and drain_marking_stack
1749 // oop closures will set the has_overflown flag if we overflow the
1750 // global marking stack.
1751
1752 assert(_global_mark_stack.is_out_of_memory() || _global_mark_stack.is_empty(),
1753 "Mark stack should be empty (unless it is out of memory)");
1754
1755 if (_global_mark_stack.is_out_of_memory()) {
1756 // This should have been done already when we tried to push an
1757 // entry on to the global mark stack. But let's do it again.
1758 set_has_overflown();
1759 }
1760
1761 assert(rp->num_q() == active_workers, "why not");
1762
1763 rp->enqueue_discovered_references(executor);
1764
1765 rp->verify_no_references_recorded();
1766 assert(!rp->discovery_enabled(), "Post condition");
1767 }
1768
1769 if (has_overflown()) {
1770 // We can not trust g1_is_alive if the marking stack overflowed
1771 return;
1772 }
1773
1774 assert(_global_mark_stack.is_empty(), "Marking should have completed");
1775
1776 // Unload Klasses, String, Symbols, Code Cache, etc.
1777 if (ClassUnloadingWithConcurrentMark) {
1778 bool purged_classes;
1779
1780 {
2914
2915 if (_worker_id == 0) {
2916 // let's allow task 0 to do this
2917 if (concurrent()) {
2918 assert(_cm->concurrent_marking_in_progress(), "invariant");
2919 // we need to set this to false before the next
2920 // safepoint. This way we ensure that the marking phase
2921 // doesn't observe any more heap expansions.
2922 _cm->clear_concurrent_marking_in_progress();
2923 }
2924 }
2925
2926 // We can now guarantee that the global stack is empty, since
2927 // all other tasks have finished. We separated the guarantees so
2928 // that, if a condition is false, we can immediately find out
2929 // which one.
2930 guarantee(_cm->out_of_regions(), "only way to reach here");
2931 guarantee(_cm->mark_stack_empty(), "only way to reach here");
2932 guarantee(_task_queue->size() == 0, "only way to reach here");
2933 guarantee(!_cm->has_overflown(), "only way to reach here");
2934 guarantee(!_cm->mark_stack_overflow(), "only way to reach here");
2935 } else {
2936 // Apparently there's more work to do. Let's abort this task. It
2937 // will restart it and we can hopefully find more things to do.
2938 set_has_aborted();
2939 }
2940 }
2941
2942 // Mainly for debugging purposes to make sure that a pointer to the
2943 // closure which was statically allocated in this frame doesn't
2944 // escape it by accident.
2945 set_cm_oop_closure(NULL);
2946 double end_time_ms = os::elapsedVTime() * 1000.0;
2947 double elapsed_time_ms = end_time_ms - _start_time_ms;
2948 // Update the step history.
2949 _step_times_ms.add(elapsed_time_ms);
2950
2951 if (has_aborted()) {
2952 // The task was aborted for some reason.
2953 if (_has_timed_out) {
2954 double diff_ms = elapsed_time_ms - _time_target_ms;
|
397 _markBitMap1(),
398 _markBitMap2(),
399 _parallel_marking_threads(0),
400 _max_parallel_marking_threads(0),
401 _sleep_factor(0.0),
402 _marking_task_overhead(1.0),
403 _cleanup_list("Cleanup List"),
404
405 _prevMarkBitMap(&_markBitMap1),
406 _nextMarkBitMap(&_markBitMap2),
407
408 _global_mark_stack(),
409 // _finger set in set_non_marking_state
410
411 _max_worker_id(ParallelGCThreads),
412 // _active_tasks set in set_non_marking_state
413 // _tasks set inside the constructor
414 _task_queues(new G1CMTaskQueueSet((int) _max_worker_id)),
415 _terminator(ParallelTaskTerminator((int) _max_worker_id, _task_queues)),
416
417 _concurrent(false),
418 _has_aborted(false),
419 _restart_for_overflow(false),
420 _concurrent_marking_in_progress(false),
421 _gc_timer_cm(new (ResourceObj::C_HEAP, mtGC) ConcurrentGCTimer()),
422 _gc_tracer_cm(new (ResourceObj::C_HEAP, mtGC) G1OldTracer()),
423
424 // _verbose_level set below
425
426 _init_times(),
427 _remark_times(), _remark_mark_times(), _remark_weak_ref_times(),
428 _cleanup_times(),
429 _total_counting_time(0.0),
430 _total_rs_scrub_time(0.0),
431
432 _parallel_workers(NULL),
433
434 _completed_initialization(false) {
435
436 _markBitMap1.initialize(g1h->reserved_region(), prev_bitmap_storage);
574 assert(_heap_start != NULL, "heap bounds should look ok");
575 assert(_heap_end != NULL, "heap bounds should look ok");
576 assert(_heap_start < _heap_end, "heap bounds should look ok");
577
578 // Reset all the marking data structures and any necessary flags
579 reset_marking_state();
580
581 // We do reset all of them, since different phases will use
582 // different number of active threads. So, it's easiest to have all
583 // of them ready.
584 for (uint i = 0; i < _max_worker_id; ++i) {
585 _tasks[i]->reset(_nextMarkBitMap);
586 }
587
588 // we need this to make sure that the flag is on during the evac
589 // pause with initial mark piggy-backed
590 set_concurrent_marking_in_progress();
591 }
592
593
594 void G1ConcurrentMark::reset_marking_state() {
595 _global_mark_stack.set_should_expand(has_overflown());
596 _global_mark_stack.set_empty(); // Also clears the overflow stack's overflow flag
597 _finger = _heap_start;
598
599 for (uint i = 0; i < _max_worker_id; ++i) {
600 G1CMTaskQueue* queue = _task_queues->queue(i);
601 queue->set_empty();
602 }
603 }
604
605 void G1ConcurrentMark::set_concurrency(uint active_tasks) {
606 assert(active_tasks <= _max_worker_id, "we should not have more");
607
608 _active_tasks = active_tasks;
609 // Need to update the three data structures below according to the
610 // number of active threads for this phase.
611 _terminator = ParallelTaskTerminator((int) active_tasks, _task_queues);
612 _first_overflow_barrier_sync.set_n_workers((int) active_tasks);
613 _second_overflow_barrier_sync.set_n_workers((int) active_tasks);
614 }
615
616 void G1ConcurrentMark::set_concurrency_and_phase(uint active_tasks, bool concurrent) {
860 if (barrier_aborted) {
861 // If the barrier aborted we ignore the overflow condition and
862 // just abort the whole marking phase as quickly as possible.
863 return;
864 }
865
866 // If we're executing the concurrent phase of marking, reset the marking
867 // state; otherwise the marking state is reset after reference processing,
868 // during the remark pause.
869 // If we reset here as a result of an overflow during the remark we will
870 // see assertion failures from any subsequent set_concurrency_and_phase()
871 // calls.
872 if (concurrent()) {
873 // let the task associated with with worker 0 do this
874 if (worker_id == 0) {
875 // task 0 is responsible for clearing the global data structures
876 // We should be here because of an overflow. During STW we should
877 // not clear the overflow flag since we rely on it being true when
878 // we exit this method to abort the pause and restart concurrent
879 // marking.
880 reset_marking_state();
881
882 log_info(gc, marking)("Concurrent Mark reset for overflow");
883 }
884 }
885
886 // after this, each task should reset its own data structures then
887 // then go into the second barrier
888 }
889
890 void G1ConcurrentMark::enter_second_sync_barrier(uint worker_id) {
891 SuspendibleThreadSetLeaver sts_leave(concurrent());
892 _second_overflow_barrier_sync.enter();
893
894 // at this point everything should be re-initialized and ready to go
895 }
896
897 class G1CMConcurrentMarkingTask: public AbstractGangTask {
898 private:
899 G1ConcurrentMark* _cm;
900 ConcurrentMarkThread* _cmt;
1729 // the number of active workers. This is OK as long as the discovered
1730 // Reference lists are balanced (see balance_all_queues() and balance_queues()).
1731 rp->set_active_mt_degree(active_workers);
1732
1733 // Process the weak references.
1734 const ReferenceProcessorStats& stats =
1735 rp->process_discovered_references(&g1_is_alive,
1736 &g1_keep_alive,
1737 &g1_drain_mark_stack,
1738 executor,
1739 _gc_timer_cm);
1740 _gc_tracer_cm->report_gc_reference_stats(stats);
1741
1742 // The do_oop work routines of the keep_alive and drain_marking_stack
1743 // oop closures will set the has_overflown flag if we overflow the
1744 // global marking stack.
1745
1746 assert(_global_mark_stack.is_out_of_memory() || _global_mark_stack.is_empty(),
1747 "Mark stack should be empty (unless it is out of memory)");
1748
1749 assert(rp->num_q() == active_workers, "why not");
1750
1751 rp->enqueue_discovered_references(executor);
1752
1753 rp->verify_no_references_recorded();
1754 assert(!rp->discovery_enabled(), "Post condition");
1755 }
1756
1757 if (has_overflown()) {
1758 // We can not trust g1_is_alive if the marking stack overflowed
1759 return;
1760 }
1761
1762 assert(_global_mark_stack.is_empty(), "Marking should have completed");
1763
1764 // Unload Klasses, String, Symbols, Code Cache, etc.
1765 if (ClassUnloadingWithConcurrentMark) {
1766 bool purged_classes;
1767
1768 {
2902
2903 if (_worker_id == 0) {
2904 // let's allow task 0 to do this
2905 if (concurrent()) {
2906 assert(_cm->concurrent_marking_in_progress(), "invariant");
2907 // we need to set this to false before the next
2908 // safepoint. This way we ensure that the marking phase
2909 // doesn't observe any more heap expansions.
2910 _cm->clear_concurrent_marking_in_progress();
2911 }
2912 }
2913
2914 // We can now guarantee that the global stack is empty, since
2915 // all other tasks have finished. We separated the guarantees so
2916 // that, if a condition is false, we can immediately find out
2917 // which one.
2918 guarantee(_cm->out_of_regions(), "only way to reach here");
2919 guarantee(_cm->mark_stack_empty(), "only way to reach here");
2920 guarantee(_task_queue->size() == 0, "only way to reach here");
2921 guarantee(!_cm->has_overflown(), "only way to reach here");
2922 } else {
2923 // Apparently there's more work to do. Let's abort this task. It
2924 // will restart it and we can hopefully find more things to do.
2925 set_has_aborted();
2926 }
2927 }
2928
2929 // Mainly for debugging purposes to make sure that a pointer to the
2930 // closure which was statically allocated in this frame doesn't
2931 // escape it by accident.
2932 set_cm_oop_closure(NULL);
2933 double end_time_ms = os::elapsedVTime() * 1000.0;
2934 double elapsed_time_ms = end_time_ms - _start_time_ms;
2935 // Update the step history.
2936 _step_times_ms.add(elapsed_time_ms);
2937
2938 if (has_aborted()) {
2939 // The task was aborted for some reason.
2940 if (_has_timed_out) {
2941 double diff_ms = elapsed_time_ms - _time_target_ms;
|