368
369 _root_regions(_g1h->max_regions()),
370
371 _global_mark_stack(),
372
373 // _finger set in set_non_marking_state
374
375 _worker_id_offset(G1DirtyCardQueueSet::num_par_ids() + G1ConcRefinementThreads),
376 _max_num_tasks(ParallelGCThreads),
377 // _num_active_tasks set in set_non_marking_state()
378 // _tasks set inside the constructor
379
380 _task_queues(new G1CMTaskQueueSet((int) _max_num_tasks)),
381 _terminator((int) _max_num_tasks, _task_queues),
382
383 _first_overflow_barrier_sync(),
384 _second_overflow_barrier_sync(),
385
386 _has_overflown(false),
387 _concurrent(false),
388 _has_aborted(false),
389 _restart_for_overflow(false),
390 _gc_timer_cm(new (ResourceObj::C_HEAP, mtGC) ConcurrentGCTimer()),
391 _gc_tracer_cm(new (ResourceObj::C_HEAP, mtGC) G1OldTracer()),
392
393 // _verbose_level set below
394
395 _init_times(),
396 _remark_times(),
397 _remark_mark_times(),
398 _remark_weak_ref_times(),
399 _cleanup_times(),
400 _total_cleanup_time(0.0),
401
402 _accum_task_vtime(NULL),
403
404 _concurrent_workers(NULL),
405 _num_concurrent_workers(0),
406 _max_concurrent_workers(0),
407
408 _region_mark_stats(NEW_C_HEAP_ARRAY(G1RegionMarkStats, _g1h->max_regions(), mtGC)),
435 _tasks = NEW_C_HEAP_ARRAY(G1CMTask*, _max_num_tasks, mtGC);
436 _accum_task_vtime = NEW_C_HEAP_ARRAY(double, _max_num_tasks, mtGC);
437
438 // so that the assertion in MarkingTaskQueue::task_queue doesn't fail
439 _num_active_tasks = _max_num_tasks;
440
441 for (uint i = 0; i < _max_num_tasks; ++i) {
442 G1CMTaskQueue* task_queue = new G1CMTaskQueue();
443 task_queue->initialize();
444 _task_queues->register_queue(i, task_queue);
445
446 _tasks[i] = new G1CMTask(i, this, task_queue, _region_mark_stats, _g1h->max_regions());
447
448 _accum_task_vtime[i] = 0.0;
449 }
450
451 reset_at_marking_complete();
452 }
453
454 void G1ConcurrentMark::reset() {
455 _has_aborted = false;
456
457 reset_marking_for_restart();
458
459 // Reset all tasks, since different phases will use different number of active
460 // threads. So, it's easiest to have all of them ready.
461 for (uint i = 0; i < _max_num_tasks; ++i) {
462 _tasks[i]->reset(_next_mark_bitmap);
463 }
464
465 uint max_regions = _g1h->max_regions();
466 for (uint i = 0; i < max_regions; i++) {
467 _top_at_rebuild_starts[i] = NULL;
468 _region_mark_stats[i].clear();
469 }
470 }
471
472 void G1ConcurrentMark::clear_statistics_in_region(uint region_idx) {
473 for (uint j = 0; j < _max_num_tasks; ++j) {
474 _tasks[j]->clear_mark_stats_cache(region_idx);
475 }
887 // It's possible that has_aborted() is true here without actually
888 // aborting the survivor scan earlier. This is OK as it's
889 // mainly used for sanity checking.
890 root_regions()->scan_finished();
891 }
892 }
893
894 void G1ConcurrentMark::concurrent_cycle_start() {
895 _gc_timer_cm->register_gc_start();
896
897 _gc_tracer_cm->report_gc_start(GCCause::_no_gc /* first parameter is not used */, _gc_timer_cm->gc_start());
898
899 _g1h->trace_heap_before_gc(_gc_tracer_cm);
900 }
901
902 void G1ConcurrentMark::concurrent_cycle_end() {
903 _g1h->collector_state()->set_clearing_next_bitmap(false);
904
905 _g1h->trace_heap_after_gc(_gc_tracer_cm);
906
907 if (has_aborted()) {
908 log_info(gc, marking)("Concurrent Mark Abort");
909 _gc_tracer_cm->report_concurrent_mode_failure();
910 }
911
912 _gc_timer_cm->register_gc_end();
913
914 _gc_tracer_cm->report_gc_end(_gc_timer_cm->gc_end(), _gc_timer_cm->time_partitions());
915 }
916
917 void G1ConcurrentMark::mark_from_roots() {
918 _restart_for_overflow = false;
919
920 _num_concurrent_workers = calc_active_marking_workers();
921
922 uint active_workers = MAX2(1U, _num_concurrent_workers);
923
924 // Setting active workers is not guaranteed since fewer
925 // worker threads may currently exist and more may not be
926 // available.
927 active_workers = _concurrent_workers->update_active_workers(active_workers);
928 log_info(gc, task)("Using %u workers of %u for marking", active_workers, _concurrent_workers->total_workers());
929
1942 }
1943 }
1944 }
1945 #endif // PRODUCT
1946
1947 void G1ConcurrentMark::rebuild_rem_set_concurrently() {
1948 _g1h->rem_set()->rebuild_rem_set(this, _concurrent_workers, _worker_id_offset);
1949 }
1950
1951 void G1ConcurrentMark::print_stats() {
1952 if (!log_is_enabled(Debug, gc, stats)) {
1953 return;
1954 }
1955 log_debug(gc, stats)("---------------------------------------------------------------------");
1956 for (size_t i = 0; i < _num_active_tasks; ++i) {
1957 _tasks[i]->print_stats();
1958 log_debug(gc, stats)("---------------------------------------------------------------------");
1959 }
1960 }
1961
1962 void G1ConcurrentMark::concurrent_cycle_abort() {
1963 if (!cm_thread()->during_cycle() || _has_aborted) {
1964 // We haven't started a concurrent cycle or we have already aborted it. No need to do anything.
1965 return;
1966 }
1967
1968 // Clear all marks in the next bitmap for the next marking cycle. This will allow us to skip the next
1969 // concurrent bitmap clearing.
1970 {
1971 GCTraceTime(Debug, gc) debug("Clear Next Bitmap");
1972 clear_bitmap(_next_mark_bitmap, _g1h->workers(), false);
1973 }
1974 // Note we cannot clear the previous marking bitmap here
1975 // since VerifyDuringGC verifies the objects marked during
1976 // a full GC against the previous bitmap.
1977
1978 // Empty mark stack
1979 reset_marking_for_restart();
1980 for (uint i = 0; i < _max_num_tasks; ++i) {
1981 _tasks[i]->clear_region_fields();
1982 }
1983 _first_overflow_barrier_sync.abort();
1984 _second_overflow_barrier_sync.abort();
1985 _has_aborted = true;
1986
1987 SATBMarkQueueSet& satb_mq_set = G1BarrierSet::satb_mark_queue_set();
1988 satb_mq_set.abandon_partial_marking();
1989 // This can be called either during or outside marking, we'll read
1990 // the expected_active value from the SATB queue set.
1991 satb_mq_set.set_active_all_threads(
1992 false, /* new active value */
1993 satb_mq_set.is_active() /* expected_active */);
1994 }
1995
1996 static void print_ms_time_info(const char* prefix, const char* name,
1997 NumberSeq& ns) {
1998 log_trace(gc, marking)("%s%5d %12s: total time = %8.2f s (avg = %8.2f ms).",
1999 prefix, ns.num(), name, ns.sum()/1000.0, ns.avg());
2000 if (ns.num() > 0) {
2001 log_trace(gc, marking)("%s [std. dev = %8.2f ms, max = %8.2f ms]",
2002 prefix, ns.sd(), ns.maximum());
2003 }
2004 }
2005
|
368
369 _root_regions(_g1h->max_regions()),
370
371 _global_mark_stack(),
372
373 // _finger set in set_non_marking_state
374
375 _worker_id_offset(G1DirtyCardQueueSet::num_par_ids() + G1ConcRefinementThreads),
376 _max_num_tasks(ParallelGCThreads),
377 // _num_active_tasks set in set_non_marking_state()
378 // _tasks set inside the constructor
379
380 _task_queues(new G1CMTaskQueueSet((int) _max_num_tasks)),
381 _terminator((int) _max_num_tasks, _task_queues),
382
383 _first_overflow_barrier_sync(),
384 _second_overflow_barrier_sync(),
385
386 _has_overflown(false),
387 _concurrent(false),
388 _aborted_by_fullgc(false),
389 _aborted_by_initial_mark(false),
390 _restart_for_overflow(false),
391 _gc_timer_cm(new (ResourceObj::C_HEAP, mtGC) ConcurrentGCTimer()),
392 _gc_tracer_cm(new (ResourceObj::C_HEAP, mtGC) G1OldTracer()),
393
394 // _verbose_level set below
395
396 _init_times(),
397 _remark_times(),
398 _remark_mark_times(),
399 _remark_weak_ref_times(),
400 _cleanup_times(),
401 _total_cleanup_time(0.0),
402
403 _accum_task_vtime(NULL),
404
405 _concurrent_workers(NULL),
406 _num_concurrent_workers(0),
407 _max_concurrent_workers(0),
408
409 _region_mark_stats(NEW_C_HEAP_ARRAY(G1RegionMarkStats, _g1h->max_regions(), mtGC)),
436 _tasks = NEW_C_HEAP_ARRAY(G1CMTask*, _max_num_tasks, mtGC);
437 _accum_task_vtime = NEW_C_HEAP_ARRAY(double, _max_num_tasks, mtGC);
438
439 // so that the assertion in MarkingTaskQueue::task_queue doesn't fail
440 _num_active_tasks = _max_num_tasks;
441
442 for (uint i = 0; i < _max_num_tasks; ++i) {
443 G1CMTaskQueue* task_queue = new G1CMTaskQueue();
444 task_queue->initialize();
445 _task_queues->register_queue(i, task_queue);
446
447 _tasks[i] = new G1CMTask(i, this, task_queue, _region_mark_stats, _g1h->max_regions());
448
449 _accum_task_vtime[i] = 0.0;
450 }
451
452 reset_at_marking_complete();
453 }
454
455 void G1ConcurrentMark::reset() {
456 _aborted_by_fullgc = false;
457 _aborted_by_initial_mark = false;
458
459 reset_marking_for_restart();
460
461 // Reset all tasks, since different phases will use different number of active
462 // threads. So, it's easiest to have all of them ready.
463 for (uint i = 0; i < _max_num_tasks; ++i) {
464 _tasks[i]->reset(_next_mark_bitmap);
465 }
466
467 uint max_regions = _g1h->max_regions();
468 for (uint i = 0; i < max_regions; i++) {
469 _top_at_rebuild_starts[i] = NULL;
470 _region_mark_stats[i].clear();
471 }
472 }
473
474 void G1ConcurrentMark::clear_statistics_in_region(uint region_idx) {
475 for (uint j = 0; j < _max_num_tasks; ++j) {
476 _tasks[j]->clear_mark_stats_cache(region_idx);
477 }
889 // It's possible that has_aborted() is true here without actually
890 // aborting the survivor scan earlier. This is OK as it's
891 // mainly used for sanity checking.
892 root_regions()->scan_finished();
893 }
894 }
895
896 void G1ConcurrentMark::concurrent_cycle_start() {
897 _gc_timer_cm->register_gc_start();
898
899 _gc_tracer_cm->report_gc_start(GCCause::_no_gc /* first parameter is not used */, _gc_timer_cm->gc_start());
900
901 _g1h->trace_heap_before_gc(_gc_tracer_cm);
902 }
903
904 void G1ConcurrentMark::concurrent_cycle_end() {
905 _g1h->collector_state()->set_clearing_next_bitmap(false);
906
907 _g1h->trace_heap_after_gc(_gc_tracer_cm);
908
909 if (aborted_by_fullgc()) {
910 log_info(gc, marking)("Concurrent Mark Abort due to Full GC");
911 _gc_tracer_cm->report_concurrent_mode_failure();
912 } else if (aborted_by_initial_mark()) {
913 log_info(gc, marking)("Concurrent Mark Abort due to Humongous Reclaim");
914 }
915
916 _gc_timer_cm->register_gc_end();
917
918 _gc_tracer_cm->report_gc_end(_gc_timer_cm->gc_end(), _gc_timer_cm->time_partitions());
919 }
920
921 void G1ConcurrentMark::mark_from_roots() {
922 _restart_for_overflow = false;
923
924 _num_concurrent_workers = calc_active_marking_workers();
925
926 uint active_workers = MAX2(1U, _num_concurrent_workers);
927
928 // Setting active workers is not guaranteed since fewer
929 // worker threads may currently exist and more may not be
930 // available.
931 active_workers = _concurrent_workers->update_active_workers(active_workers);
932 log_info(gc, task)("Using %u workers of %u for marking", active_workers, _concurrent_workers->total_workers());
933
1946 }
1947 }
1948 }
1949 #endif // PRODUCT
1950
1951 void G1ConcurrentMark::rebuild_rem_set_concurrently() {
1952 _g1h->rem_set()->rebuild_rem_set(this, _concurrent_workers, _worker_id_offset);
1953 }
1954
1955 void G1ConcurrentMark::print_stats() {
1956 if (!log_is_enabled(Debug, gc, stats)) {
1957 return;
1958 }
1959 log_debug(gc, stats)("---------------------------------------------------------------------");
1960 for (size_t i = 0; i < _num_active_tasks; ++i) {
1961 _tasks[i]->print_stats();
1962 log_debug(gc, stats)("---------------------------------------------------------------------");
1963 }
1964 }
1965
1966 void G1ConcurrentMark::concurrent_cycle_abort_by_initial_mark() {
1967 _aborted_by_initial_mark = true;
1968 _g1h->collector_state()->set_in_initial_mark_gc(false);
1969 }
1970
1971 void G1ConcurrentMark::concurrent_cycle_abort_by_fullgc() {
1972 if (!cm_thread()->during_cycle() || _aborted_by_fullgc) {
1973 // We haven't started a concurrent cycle or we have already aborted it. No need to do anything.
1974 return;
1975 }
1976
1977 // Clear all marks in the next bitmap for the next marking cycle. This will allow us to skip the next
1978 // concurrent bitmap clearing.
1979 {
1980 GCTraceTime(Debug, gc) debug("Clear Next Bitmap");
1981 clear_bitmap(_next_mark_bitmap, _g1h->workers(), false);
1982 }
1983 // Note we cannot clear the previous marking bitmap here
1984 // since VerifyDuringGC verifies the objects marked during
1985 // a full GC against the previous bitmap.
1986
1987 // Empty mark stack
1988 reset_marking_for_restart();
1989 for (uint i = 0; i < _max_num_tasks; ++i) {
1990 _tasks[i]->clear_region_fields();
1991 }
1992 _first_overflow_barrier_sync.abort();
1993 _second_overflow_barrier_sync.abort();
1994 _aborted_by_fullgc = true;
1995
1996 SATBMarkQueueSet& satb_mq_set = G1BarrierSet::satb_mark_queue_set();
1997 satb_mq_set.abandon_partial_marking();
1998 // This can be called either during or outside marking, we'll read
1999 // the expected_active value from the SATB queue set.
2000 satb_mq_set.set_active_all_threads(
2001 false, /* new active value */
2002 satb_mq_set.is_active() /* expected_active */);
2003 }
2004
2005 static void print_ms_time_info(const char* prefix, const char* name,
2006 NumberSeq& ns) {
2007 log_trace(gc, marking)("%s%5d %12s: total time = %8.2f s (avg = %8.2f ms).",
2008 prefix, ns.num(), name, ns.sum()/1000.0, ns.avg());
2009 if (ns.num() > 0) {
2010 log_trace(gc, marking)("%s [std. dev = %8.2f ms, max = %8.2f ms]",
2011 prefix, ns.sd(), ns.maximum());
2012 }
2013 }
2014
|