--- old/src/share/vm/gc_implementation/concurrentMarkSweep/compactibleFreeListSpace.cpp 2015-01-27 17:28:12.918323936 -0500 +++ new/src/share/vm/gc_implementation/concurrentMarkSweep/compactibleFreeListSpace.cpp 2015-01-27 17:28:12.634307515 -0500 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2001, 2014, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2001, 2015, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -762,8 +762,11 @@ void CompactibleFreeListSpace::blk_iterate_careful(BlkClosureCareful* cl) { assert_lock_strong(freelistLock()); HeapWord *cur, *limit; - for (cur = bottom(), limit = end(); cur < limit; - cur += cl->do_blk_careful(cur)); + size_t res; + for (cur = bottom(), limit = end(); cur < limit; cur += res) { + res = cl->do_blk_careful(cur); + assert(cur + res > cur, "Not monotonically increasing ?"); + } } // Apply the given closure to each block in the space. --- old/src/share/vm/gc_implementation/g1/g1Allocator.cpp 2015-01-27 17:28:13.530359321 -0500 +++ new/src/share/vm/gc_implementation/g1/g1Allocator.cpp 2015-01-27 17:28:13.426353307 -0500 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2014, 2015, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -65,7 +65,7 @@ // we allocate to in the region sets. We'll re-add it later, when // it's retired again. _g1h->_old_set.remove(retained_region); - bool during_im = _g1h->g1_policy()->during_initial_mark_pause(); + bool during_im = _g1h->g1_policy()->collector_state()->during_initial_mark_pause(); retained_region->note_start_of_copying(during_im); old->set(retained_region); _g1h->_hr_printer.reuse(retained_region); --- old/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp 2015-01-27 17:28:14.222399331 -0500 +++ new/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp 2015-01-27 17:28:13.870378979 -0500 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2001, 2014, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2001, 2015, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -361,6 +361,23 @@ reset_from_card_cache(start_idx, num_regions); } +////////////////////// G1CollectedHeap methods //////////////////////////////// + +// Records the fact that a marking phase is no longer in progress. +void G1CollectedHeap::set_marking_complete() { + g1_policy()->collector_state()->set_marking_complete(); +} + +// Records the fact that a marking phase has commenced. +void G1CollectedHeap::set_marking_started() { + g1_policy()->collector_state()->set_marking_started(); +} + +// Returns whether a marking phase is currently in progress. +bool G1CollectedHeap::mark_in_progress() { + return g1_policy()->collector_state()->mark_in_progress(); +} + void G1CollectedHeap::push_dirty_cards_region(HeapRegion* hr) { // Claim the right to put the region on the dirty cards region list @@ -1076,7 +1093,7 @@ } else { HeapWord* result = humongous_obj_allocate(word_size, context); if (result != NULL && g1_policy()->need_to_start_conc_mark("STW humongous allocation")) { - g1_policy()->set_initiate_conc_mark_if_possible(); + g1_policy()->collector_state()->set_initiate_conc_mark_if_possible(true); } return result; } @@ -1288,7 +1305,7 @@ g1_policy()->stop_incremental_cset_building(); tear_down_region_sets(false /* free_list_only */); - g1_policy()->set_gcs_are_young(true); + g1_policy()->collector_state()->set_gcs_are_young(true); // See the comments in g1CollectedHeap.hpp and // G1CollectedHeap::ref_processing_init() about @@ -1770,7 +1787,6 @@ _process_strong_tasks(new SubTasksDone(G1H_PS_NumElements)), _bot_shared(NULL), _evac_failure_scan_stack(NULL), - _mark_in_progress(false), _cg1r(NULL), _g1mm(NULL), _refine_cte_cl(NULL), @@ -2378,8 +2394,8 @@ } G1YCType G1CollectedHeap::yc_type() { - bool is_young = g1_policy()->gcs_are_young(); - bool is_initial_mark = g1_policy()->during_initial_mark_pause(); + bool is_young = g1_policy()->collector_state()->gcs_are_young(); + bool is_initial_mark = g1_policy()->collector_state()->during_initial_mark_pause(); bool is_during_mark = mark_in_progress(); if (is_initial_mark) { @@ -3663,8 +3679,8 @@ gclog_or_tty->gclog_stamp(_gc_tracer_stw->gc_id()); GCCauseString gc_cause_str = GCCauseString("GC pause", gc_cause()) - .append(g1_policy()->gcs_are_young() ? "(young)" : "(mixed)") - .append(g1_policy()->during_initial_mark_pause() ? " (initial-mark)" : ""); + .append(g1_policy()->collector_state()->gcs_are_young() ? "(young)" : "(mixed)") + .append(g1_policy()->collector_state()->during_initial_mark_pause() ? " (initial-mark)" : ""); gclog_or_tty->print("[%s", (const char*)gc_cause_str); } @@ -3720,22 +3736,22 @@ g1_policy()->decide_on_conc_mark_initiation(); // We do not allow initial-mark to be piggy-backed on a mixed GC. - assert(!g1_policy()->during_initial_mark_pause() || - g1_policy()->gcs_are_young(), "sanity"); + assert(!g1_policy()->collector_state()->during_initial_mark_pause() || + g1_policy()->collector_state()->gcs_are_young(), "sanity"); // We also do not allow mixed GCs during marking. - assert(!mark_in_progress() || g1_policy()->gcs_are_young(), "sanity"); + assert(!mark_in_progress() || g1_policy()->collector_state()->gcs_are_young(), "sanity"); // Record whether this pause is an initial mark. When the current // thread has completed its logging output and it's safe to signal // the CM thread, the flag's value in the policy has been reset. - bool should_start_conc_mark = g1_policy()->during_initial_mark_pause(); + bool should_start_conc_mark = g1_policy()->collector_state()->during_initial_mark_pause(); // Inner scope for scope based logging, timers, and stats collection { EvacuationInfo evacuation_info; - if (g1_policy()->during_initial_mark_pause()) { + if (g1_policy()->collector_state()->during_initial_mark_pause()) { // We are about to start a marking cycle, so we increment the // full collection counter. increment_old_marking_cycles_started(); @@ -3842,7 +3858,7 @@ _young_list->print(); #endif // YOUNG_LIST_VERBOSE - if (g1_policy()->during_initial_mark_pause()) { + if (g1_policy()->collector_state()->during_initial_mark_pause()) { concurrent_mark()->checkpointRootsInitialPre(); } @@ -3946,7 +3962,7 @@ _allocator->increase_used(g1_policy()->bytes_copied_during_gc()); } - if (g1_policy()->during_initial_mark_pause()) { + if (g1_policy()->collector_state()->during_initial_mark_pause()) { // We have to do this before we notify the CM threads that // they can start working to make sure that all the // appropriate initialization is done on the CM object. @@ -4495,7 +4511,7 @@ pss.set_evac_failure_closure(&evac_failure_cl); - bool only_young = _g1h->g1_policy()->gcs_are_young(); + bool only_young = _g1h->g1_policy()->collector_state()->gcs_are_young(); // Non-IM young GC. G1ParCopyClosure scan_only_root_cl(_g1h, &pss, rp); @@ -4524,7 +4540,7 @@ CLDClosure* weak_cld_cl; CodeBlobClosure* strong_code_cl; - if (_g1h->g1_policy()->during_initial_mark_pause()) { + if (_g1h->g1_policy()->collector_state()->during_initial_mark_pause()) { // We also need to mark copied objects. strong_root_cl = &scan_mark_root_cl; strong_cld_cl = &scan_mark_cld_cl; @@ -4605,7 +4621,7 @@ double ext_roots_start = os::elapsedTime(); double closure_app_time_sec = 0.0; - bool during_im = _g1h->g1_policy()->during_initial_mark_pause(); + bool during_im = _g1h->g1_policy()->collector_state()->during_initial_mark_pause(); bool trace_metadata = during_im && ClassUnloadingWithConcurrentMark; BufferingOopClosure buf_scan_non_heap_roots(scan_non_heap_roots); @@ -5295,7 +5311,7 @@ OopClosure* copy_non_heap_cl = &only_copy_non_heap_cl; - if (_g1h->g1_policy()->during_initial_mark_pause()) { + if (_g1h->g1_policy()->collector_state()->during_initial_mark_pause()) { // We also need to mark copied objects. copy_non_heap_cl = ©_mark_non_heap_cl; } @@ -5400,7 +5416,7 @@ OopClosure* copy_non_heap_cl = &only_copy_non_heap_cl; - if (_g1h->g1_policy()->during_initial_mark_pause()) { + if (_g1h->g1_policy()->collector_state()->during_initial_mark_pause()) { // We also need to mark copied objects. copy_non_heap_cl = ©_mark_non_heap_cl; } @@ -5515,7 +5531,7 @@ OopClosure* copy_non_heap_cl = &only_copy_non_heap_cl; - if (_g1h->g1_policy()->during_initial_mark_pause()) { + if (_g1h->g1_policy()->collector_state()->during_initial_mark_pause()) { // We also need to mark copied objects. copy_non_heap_cl = ©_mark_non_heap_cl; } @@ -5633,7 +5649,7 @@ { StrongRootsScope srs(this); // InitialMark needs claim bits to keep track of the marked-through CLDs. - if (g1_policy()->during_initial_mark_pause()) { + if (g1_policy()->collector_state()->during_initial_mark_pause()) { ClassLoaderDataGraph::clear_claimed_marks(); } @@ -6586,7 +6602,7 @@ _hr_printer.alloc(new_alloc_region, G1HRPrinter::Old); check_bitmaps("Old Region Allocation", new_alloc_region); } - bool during_im = g1_policy()->during_initial_mark_pause(); + bool during_im = g1_policy()->collector_state()->during_initial_mark_pause(); new_alloc_region->note_start_of_copying(during_im); return new_alloc_region; } @@ -6597,7 +6613,7 @@ void G1CollectedHeap::retire_gc_alloc_region(HeapRegion* alloc_region, size_t allocated_bytes, InCSetState dest) { - bool during_im = g1_policy()->during_initial_mark_pause(); + bool during_im = g1_policy()->collector_state()->during_initial_mark_pause(); alloc_region->note_end_of_copying(during_im); g1_policy()->record_bytes_copied_during_gc(allocated_bytes); if (dest.is_young()) { --- old/src/share/vm/gc_implementation/g1/g1CollectedHeap.hpp 2015-01-27 17:28:14.890437954 -0500 +++ new/src/share/vm/gc_implementation/g1/g1CollectedHeap.hpp 2015-01-27 17:28:14.778431479 -0500 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2001, 2014, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2001, 2015, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -800,7 +800,6 @@ // The concurrent marker (and the thread it runs in.) ConcurrentMark* _cm; ConcurrentMarkThread* _cmThread; - bool _mark_in_progress; // The concurrent refiner. ConcurrentG1Refine* _cg1r; @@ -1443,15 +1442,9 @@ void markModUnionRange(MemRegion mr); // Records the fact that a marking phase is no longer in progress. - void set_marking_complete() { - _mark_in_progress = false; - } - void set_marking_started() { - _mark_in_progress = true; - } - bool mark_in_progress() { - return _mark_in_progress; - } + void set_marking_complete(); + void set_marking_started(); + bool mark_in_progress(); // Print the maximum heap capacity. virtual size_t max_capacity() const; --- old/src/share/vm/gc_implementation/g1/g1CollectedHeap.inline.hpp 2015-01-27 17:28:15.478471952 -0500 +++ new/src/share/vm/gc_implementation/g1/g1CollectedHeap.inline.hpp 2015-01-27 17:28:15.370465707 -0500 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2001, 2014, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2001, 2015, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -284,8 +284,8 @@ _evacuation_failure_alot_for_current_gc = (elapsed_gcs >= G1EvacuationFailureALotInterval); // Now check if G1EvacuationFailureALot is enabled for the current GC type. - const bool gcs_are_young = g1_policy()->gcs_are_young(); - const bool during_im = g1_policy()->during_initial_mark_pause(); + const bool gcs_are_young = g1_policy()->collector_state()->gcs_are_young(); + const bool during_im = g1_policy()->collector_state()->during_initial_mark_pause(); const bool during_marking = mark_in_progress(); _evacuation_failure_alot_for_current_gc &= --- old/src/share/vm/gc_implementation/g1/g1CollectorPolicy.cpp 2015-01-27 17:28:15.902496467 -0500 +++ new/src/share/vm/gc_implementation/g1/g1CollectorPolicy.cpp 2015-01-27 17:28:15.790489991 -0500 @@ -112,22 +112,11 @@ _pause_time_target_ms((double) MaxGCPauseMillis), - _gcs_are_young(true), - - _during_marking(false), - _in_marking_window(false), - _in_marking_window_im(false), - _recent_prev_end_times_for_all_gcs_sec( new TruncatedSeq(NumPrevPausesForHeuristics)), _recent_avg_pause_time_ratio(0.0), - _initiate_conc_mark_if_possible(false), - _during_initial_mark_pause(false), - _last_young_gc(false), - _last_gc_was_young(false), - _eden_used_bytes_before_gc(0), _survivor_used_bytes_before_gc(0), _heap_used_bytes_before_gc(0), @@ -567,7 +556,7 @@ uint young_list_target_length = 0; if (adaptive_young_list_length()) { - if (gcs_are_young()) { + if (collector_state()->gcs_are_young()) { young_list_target_length = calculate_young_list_target_length(rs_lengths, base_min_length, @@ -609,7 +598,7 @@ uint desired_min_length, uint desired_max_length) { assert(adaptive_young_list_length(), "pre-condition"); - assert(gcs_are_young(), "only call this for young GCs"); + assert(collector_state()->gcs_are_young(), "only call this for young GCs"); // In case some edge-condition makes the desired max length too small... if (desired_max_length <= desired_min_length) { @@ -712,7 +701,7 @@ for (HeapRegion * r = _recorded_survivor_head; r != NULL && r != _recorded_survivor_tail->get_next_young_region(); r = r->get_next_young_region()) { - survivor_regions_evac_time += predict_region_elapsed_time_ms(r, gcs_are_young()); + survivor_regions_evac_time += predict_region_elapsed_time_ms(r, collector_state()->gcs_are_young()); } return survivor_regions_evac_time; } @@ -815,12 +804,12 @@ // "Nuke" the heuristics that control the young/mixed GC // transitions and make sure we start with young GCs after the Full GC. - set_gcs_are_young(true); - _last_young_gc = false; - clear_initiate_conc_mark_if_possible(); - clear_during_initial_mark_pause(); - _in_marking_window = false; - _in_marking_window_im = false; + collector_state()->set_gcs_are_young(true); + collector_state()->set_last_young_gc(false); + collector_state()->set_initiate_conc_mark_if_possible(false); + collector_state()->set_during_initial_mark_pause(false); + collector_state()->set_in_marking_window(false); + collector_state()->set_in_marking_window_im(false); _short_lived_surv_rate_group->start_adding_regions(); // also call this on any additional surv rate groups @@ -860,7 +849,7 @@ _collection_set_bytes_used_before = 0; _bytes_copied_during_gc = 0; - _last_gc_was_young = false; + collector_state()->set_last_gc_was_young(false); // do that for any other surv rate groups _short_lived_surv_rate_group->stop_adding_regions(); @@ -871,15 +860,15 @@ void G1CollectorPolicy::record_concurrent_mark_init_end(double mark_init_elapsed_time_ms) { - _during_marking = true; - assert(!initiate_conc_mark_if_possible(), "we should have cleared it by now"); - clear_during_initial_mark_pause(); + collector_state()->set_during_marking(true); + assert(!collector_state()->initiate_conc_mark_if_possible(), "we should have cleared it by now"); + collector_state()->set_during_initial_mark_pause(false); _cur_mark_stop_world_time_ms = mark_init_elapsed_time_ms; } void G1CollectorPolicy::record_concurrent_mark_remark_start() { _mark_remark_start_sec = os::elapsedTime(); - _during_marking = false; + collector_state()->set_during_marking(false); } void G1CollectorPolicy::record_concurrent_mark_remark_end() { @@ -897,8 +886,8 @@ } void G1CollectorPolicy::record_concurrent_mark_cleanup_completed() { - _last_young_gc = true; - _in_marking_window = false; + collector_state()->set_last_young_gc(true); + collector_state()->set_in_marking_window(false); } void G1CollectorPolicy::record_concurrent_pause() { @@ -919,7 +908,7 @@ size_t alloc_byte_size = alloc_word_size * HeapWordSize; if ((cur_used_bytes + alloc_byte_size) > marking_initiating_used_threshold) { - if (gcs_are_young() && !_last_young_gc) { + if (collector_state()->gcs_are_young() && !collector_state()->last_young_gc()) { ergo_verbose5(ErgoConcCycles, "request concurrent cycle initiation", ergo_format_reason("occupancy higher than threshold") @@ -974,14 +963,14 @@ } #endif // PRODUCT - last_pause_included_initial_mark = during_initial_mark_pause(); + last_pause_included_initial_mark = collector_state()->during_initial_mark_pause(); if (last_pause_included_initial_mark) { record_concurrent_mark_init_end(0.0); } else if (need_to_start_conc_mark("end of GC")) { // Note: this might have already been set, if during the last // pause we decided to start a cycle but at the beginning of // this pause we decided to postpone it. That's OK. - set_initiate_conc_mark_if_possible(); + collector_state()->set_initiate_conc_mark_if_possible(true); } _mmu_tracker->add_pause(end_time_sec - pause_time_ms/1000.0, @@ -1043,37 +1032,37 @@ } } - bool new_in_marking_window = _in_marking_window; + bool new_in_marking_window = collector_state()->in_marking_window(); bool new_in_marking_window_im = false; if (last_pause_included_initial_mark) { new_in_marking_window = true; new_in_marking_window_im = true; } - if (_last_young_gc) { + if (collector_state()->last_young_gc()) { // This is supposed to to be the "last young GC" before we start // doing mixed GCs. Here we decide whether to start mixed GCs or not. if (!last_pause_included_initial_mark) { if (next_gc_should_be_mixed("start mixed GCs", "do not start mixed GCs")) { - set_gcs_are_young(false); + collector_state()->set_gcs_are_young(false); } } else { ergo_verbose0(ErgoMixedGCs, "do not start mixed GCs", ergo_format_reason("concurrent cycle is about to start")); } - _last_young_gc = false; + collector_state()->set_last_young_gc(false); } - if (!_last_gc_was_young) { + if (!collector_state()->last_gc_was_young()) { // This is a mixed GC. Here we decide whether to continue doing // mixed GCs or not. if (!next_gc_should_be_mixed("continue mixed GCs", "do not continue mixed GCs")) { - set_gcs_are_young(true); + collector_state()->set_gcs_are_young(true); } } @@ -1092,7 +1081,7 @@ double cost_per_entry_ms = 0.0; if (cards_scanned > 10) { cost_per_entry_ms = phase_times()->average_last_scan_rs_time() / (double) cards_scanned; - if (_last_gc_was_young) { + if (collector_state()->last_gc_was_young()) { _cost_per_entry_ms_seq->add(cost_per_entry_ms); } else { _mixed_cost_per_entry_ms_seq->add(cost_per_entry_ms); @@ -1102,7 +1091,7 @@ if (_max_rs_lengths > 0) { double cards_per_entry_ratio = (double) cards_scanned / (double) _max_rs_lengths; - if (_last_gc_was_young) { + if (collector_state()->last_gc_was_young()) { _young_cards_per_entry_ratio_seq->add(cards_per_entry_ratio); } else { _mixed_cards_per_entry_ratio_seq->add(cards_per_entry_ratio); @@ -1134,7 +1123,7 @@ if (copied_bytes > 0) { cost_per_byte_ms = phase_times()->average_last_obj_copy_time() / (double) copied_bytes; - if (_in_marking_window) { + if (collector_state()->in_marking_window()) { _cost_per_byte_ms_during_cm_seq->add(cost_per_byte_ms); } else { _cost_per_byte_ms_seq->add(cost_per_byte_ms); @@ -1177,8 +1166,8 @@ _rs_lengths_seq->add((double) _max_rs_lengths); } - _in_marking_window = new_in_marking_window; - _in_marking_window_im = new_in_marking_window_im; + collector_state()->set_in_marking_window(new_in_marking_window); + collector_state()->set_in_marking_window_im(new_in_marking_window_im); _free_regions_at_end_of_collection = _g1->num_free_regions(); update_young_list_target_length(); @@ -1306,7 +1295,7 @@ G1CollectorPolicy::predict_base_elapsed_time_ms(size_t pending_cards) { size_t rs_length = predict_rs_length_diff(); size_t card_num; - if (gcs_are_young()) { + if (collector_state()->gcs_are_young()) { card_num = predict_young_card_num(rs_length); } else { card_num = predict_non_young_card_num(rs_length); @@ -1472,7 +1461,7 @@ ergo_format_reason("requested by GC cause") ergo_format_str("GC cause"), GCCause::to_string(gc_cause)); - set_initiate_conc_mark_if_possible(); + collector_state()->set_initiate_conc_mark_if_possible(true); return true; } else { ergo_verbose1(ErgoConcCycles, @@ -1489,13 +1478,13 @@ // We are about to decide on whether this pause will be an // initial-mark pause. - // First, during_initial_mark_pause() should not be already set. We + // First, collector_state()->during_initial_mark_pause() should not be already set. We // will set it here if we have to. However, it should be cleared by // the end of the pause (it's only set for the duration of an // initial-mark pause). - assert(!during_initial_mark_pause(), "pre-condition"); + assert(!collector_state()->during_initial_mark_pause(), "pre-condition"); - if (initiate_conc_mark_if_possible()) { + if (collector_state()->initiate_conc_mark_if_possible()) { // We had noticed on a previous pause that the heap occupancy has // gone over the initiating threshold and we should start a // concurrent marking cycle. So we might initiate one. @@ -1506,10 +1495,10 @@ // it has completed the last one. So we can go ahead and // initiate a new cycle. - set_during_initial_mark_pause(); + collector_state()->set_during_initial_mark_pause(true); // We do not allow mixed GCs during marking. - if (!gcs_are_young()) { - set_gcs_are_young(true); + if (!collector_state()->gcs_are_young()) { + collector_state()->set_gcs_are_young(true); ergo_verbose0(ErgoMixedGCs, "end mixed GCs", ergo_format_reason("concurrent cycle is about to start")); @@ -1517,7 +1506,7 @@ // And we can now clear initiate_conc_mark_if_possible() as // we've already acted on it. - clear_initiate_conc_mark_if_possible(); + collector_state()->set_initiate_conc_mark_if_possible(false); ergo_verbose0(ErgoConcCycles, "initiate concurrent cycle", @@ -1689,7 +1678,7 @@ // retiring the current allocation region) or a concurrent // refine thread (RSet sampling). - double region_elapsed_time_ms = predict_region_elapsed_time_ms(hr, gcs_are_young()); + double region_elapsed_time_ms = predict_region_elapsed_time_ms(hr, collector_state()->gcs_are_young()); size_t used_bytes = hr->used(); _inc_cset_recorded_rs_lengths += rs_length; _inc_cset_predicted_elapsed_time_ms += region_elapsed_time_ms; @@ -1724,7 +1713,7 @@ _inc_cset_recorded_rs_lengths_diffs += rs_lengths_diff; double old_elapsed_time_ms = hr->predicted_elapsed_time_ms(); - double new_region_elapsed_time_ms = predict_region_elapsed_time_ms(hr, gcs_are_young()); + double new_region_elapsed_time_ms = predict_region_elapsed_time_ms(hr, collector_state()->gcs_are_young()); double elapsed_ms_diff = new_region_elapsed_time_ms - old_elapsed_time_ms; _inc_cset_predicted_elapsed_time_ms_diffs += elapsed_ms_diff; @@ -1919,9 +1908,9 @@ ergo_format_ms("target pause time"), _pending_cards, base_time_ms, time_remaining_ms, target_pause_time_ms); - _last_gc_was_young = gcs_are_young() ? true : false; + collector_state()->set_last_gc_was_young(collector_state()->gcs_are_young() ? true : false); - if (_last_gc_was_young) { + if (collector_state()->last_gc_was_young()) { _trace_young_gen_time_data.increment_young_collection_count(); } else { _trace_young_gen_time_data.increment_mixed_collection_count(); @@ -1972,7 +1961,7 @@ // Set the start of the non-young choice time. double non_young_start_time_sec = young_end_time_sec; - if (!gcs_are_young()) { + if (!collector_state()->gcs_are_young()) { CollectionSetChooser* cset_chooser = _collectionSetChooser; cset_chooser->verify(); const uint min_old_cset_length = calc_min_old_cset_length(); @@ -2018,7 +2007,7 @@ break; } - double predicted_time_ms = predict_region_elapsed_time_ms(hr, gcs_are_young()); + double predicted_time_ms = predict_region_elapsed_time_ms(hr, collector_state()->gcs_are_young()); if (check_time_remaining) { if (predicted_time_ms > time_remaining_ms) { // Too expensive for the current CSet. --- old/src/share/vm/gc_implementation/g1/g1CollectorPolicy.hpp 2015-01-27 17:28:16.910554748 -0500 +++ new/src/share/vm/gc_implementation/g1/g1CollectorPolicy.hpp 2015-01-27 17:28:16.802548504 -0500 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2001, 2013, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2001, 2015, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -159,6 +159,103 @@ } }; +class G1CollectorState VALUE_OBJ_CLASS_SPEC { + // Various boolean state variables that indicate + // the phase of the G1 collection. + bool _in_young_gc_mode; + // indicates whether we are in full young or partially young GC mode + bool _gcs_are_young; + bool _last_gc_was_young; + bool _last_young_gc; + + // If initiate_conc_mark_if_possible() is set at the beginning of a + // pause, it is a suggestion that the pause should start a marking + // cycle by doing the initial-mark work. However, it is possible + // that the concurrent marking thread is still finishing up the + // previous marking cycle (e.g., clearing the next marking + // bitmap). If that is the case we cannot start a new cycle and + // we'll have to wait for the concurrent marking thread to finish + // what it is doing. In this case we will postpone the marking cycle + // initiation decision for the next pause. When we eventually decide + // to start a cycle, we will set _during_initial_mark_pause which + // will stay true until the end of the initial-mark pause and it's + // the condition that indicates that a pause is doing the + // initial-mark work. + volatile bool _during_initial_mark_pause; + + // At the end of a pause we check the heap occupancy and we decide + // whether we will start a marking cycle during the next pause. If + // we decide that we want to do that, we will set this parameter to + // true. So, this parameter will stay true between the end of a + // pause and the beginning of a subsequent pause (not necessarily + // the next one, see the comments on the next field) when we decide + // that we will indeed start a marking cycle and do the initial-mark + // work. + volatile bool _initiate_conc_mark_if_possible; + + // NOTE: if some of these are synonyms for others, + // the redundant fields should be eliminated. XXX + bool _during_marking; + bool _mark_in_progress; + bool _in_marking_window; + bool _in_marking_window_im; + + public: + G1CollectorState() : + _in_young_gc_mode(false), + _gcs_are_young(true), + _last_gc_was_young(false), + _last_young_gc(false), + + _during_initial_mark_pause(false), + _initiate_conc_mark_if_possible(false), + + _during_marking(false), + _mark_in_progress(false), + _in_marking_window(false), + _in_marking_window_im(false) {} + + + // Setters + void set_in_young_gc_mode(bool v) { _in_young_gc_mode = v; } + void set_gcs_are_young(bool v) { _gcs_are_young = v; } + void set_last_gc_was_young(bool v) { _last_gc_was_young = v; } + void set_last_young_gc(bool v) { _last_young_gc = v; } + void set_during_initial_mark_pause(bool v) { _during_initial_mark_pause = v; } + void set_initiate_conc_mark_if_possible(bool v) { _initiate_conc_mark_if_possible = v; } + void set_during_marking(bool v) { _during_marking = v; } + void set_mark_in_progress(bool v) { _mark_in_progress = v; } + void set_in_marking_window(bool v) { _in_marking_window = v; } + void set_in_marking_window_im(bool v) { _in_marking_window_im = v; } + + // Puns + //////// + void set_marking_complete() { set_mark_in_progress(false); } + void set_marking_started() { set_mark_in_progress(true); } + + // Getters + bool in_young_gc_mode() { return _in_young_gc_mode; } + bool gcs_are_young() { return _gcs_are_young; } + bool last_gc_was_young() { return _last_gc_was_young; } + bool last_young_gc() { return _last_young_gc; } + bool during_initial_mark_pause() { return _during_initial_mark_pause; } + bool initiate_conc_mark_if_possible() { return _initiate_conc_mark_if_possible; } + bool during_marking() { return _during_marking; } + bool mark_in_progress() { return _mark_in_progress; } + bool in_marking_window() { return _in_marking_window; } + bool in_marking_window_im() { return _in_marking_window_im; } + + + // Composite booleans (clients worry about flickering) + bool during_concurrent_mark() { + return (_in_marking_window && !_in_marking_window_im); + } + + bool should_propagate() { // XXX should have a more suitable state name or abstraction for this + return (_last_young_gc && !_in_marking_window); + } +}; + class G1CollectorPolicy: public CollectorPolicy { private: // either equal to the number of parallel threads, if ParallelGCThreads @@ -194,7 +291,7 @@ double _stop_world_start; // indicates whether we are in young or mixed GC mode - bool _gcs_are_young; + G1CollectorState _collector_state; uint _young_list_target_length; uint _young_list_fixed_length; @@ -203,12 +300,6 @@ // locker is active. This should be >= _young_list_target_length; uint _young_list_max_length; - bool _last_gc_was_young; - - bool _during_marking; - bool _in_marking_window; - bool _in_marking_window_im; - SurvRateGroup* _short_lived_surv_rate_group; SurvRateGroup* _survivor_surv_rate_group; // add here any more surv rate groups @@ -218,10 +309,6 @@ double _reserve_factor; uint _reserve_regions; - bool during_marking() { - return _during_marking; - } - enum PredictionConstants { TruncatedSeqLength = 10 }; @@ -363,7 +450,7 @@ } double predict_rs_scan_time_ms(size_t card_num) { - if (gcs_are_young()) { + if (collector_state()->gcs_are_young()) { return (double) card_num * get_new_prediction(_cost_per_entry_ms_seq); } else { return predict_mixed_rs_scan_time_ms(card_num); @@ -390,7 +477,7 @@ } double predict_object_copy_time_ms(size_t bytes_to_copy) { - if (_in_marking_window && !_in_marking_window_im) { + if (collector_state()->during_concurrent_mark()) { return predict_object_copy_time_ms_during_cm(bytes_to_copy); } else { return (double) bytes_to_copy * @@ -428,7 +515,7 @@ double predict_survivor_regions_evac_time(); void cset_regions_freed() { - bool propagate = _last_gc_was_young && !_in_marking_window; + bool propagate = collector_state()->should_propagate(); _short_lived_surv_rate_group->all_surviving_words_recorded(propagate); _survivor_surv_rate_group->all_surviving_words_recorded(propagate); // also call it on any more surv rate groups @@ -552,33 +639,6 @@ return _recent_avg_pause_time_ratio; } - // At the end of a pause we check the heap occupancy and we decide - // whether we will start a marking cycle during the next pause. If - // we decide that we want to do that, we will set this parameter to - // true. So, this parameter will stay true between the end of a - // pause and the beginning of a subsequent pause (not necessarily - // the next one, see the comments on the next field) when we decide - // that we will indeed start a marking cycle and do the initial-mark - // work. - volatile bool _initiate_conc_mark_if_possible; - - // If initiate_conc_mark_if_possible() is set at the beginning of a - // pause, it is a suggestion that the pause should start a marking - // cycle by doing the initial-mark work. However, it is possible - // that the concurrent marking thread is still finishing up the - // previous marking cycle (e.g., clearing the next marking - // bitmap). If that is the case we cannot start a new cycle and - // we'll have to wait for the concurrent marking thread to finish - // what it is doing. In this case we will postpone the marking cycle - // initiation decision for the next pause. When we eventually decide - // to start a cycle, we will set _during_initial_mark_pause which - // will stay true until the end of the initial-mark pause and it's - // the condition that indicates that a pause is doing the - // initial-mark work. - volatile bool _during_initial_mark_pause; - - bool _last_young_gc; - // This set of variables tracks the collector efficiency, in order to // determine whether we should initiate a new marking. double _cur_mark_stop_world_time_ms; @@ -647,6 +707,8 @@ return CollectorPolicy::G1CollectorPolicyKind; } + G1CollectorState* collector_state() { return &_collector_state; } + G1GCPhaseTimes* phase_times() const { return _phase_times; } // Check the current value of the young list RSet lengths and @@ -784,14 +846,6 @@ void print_collection_set(HeapRegion* list_head, outputStream* st); #endif // !PRODUCT - bool initiate_conc_mark_if_possible() { return _initiate_conc_mark_if_possible; } - void set_initiate_conc_mark_if_possible() { _initiate_conc_mark_if_possible = true; } - void clear_initiate_conc_mark_if_possible() { _initiate_conc_mark_if_possible = false; } - - bool during_initial_mark_pause() { return _during_initial_mark_pause; } - void set_during_initial_mark_pause() { _during_initial_mark_pause = true; } - void clear_during_initial_mark_pause(){ _during_initial_mark_pause = false; } - // This sets the initiate_conc_mark_if_possible() flag to start a // new cycle, as long as we are not already in one. It's best if it // is called during a safepoint when the test whether a cycle is in @@ -835,13 +889,6 @@ return _young_list_max_length; } - bool gcs_are_young() { - return _gcs_are_young; - } - void set_gcs_are_young(bool gcs_are_young) { - _gcs_are_young = gcs_are_young; - } - bool adaptive_young_list_length() { return _young_gen_sizer->adaptive_young_list_length(); } --- old/src/share/vm/gc_implementation/g1/g1EvacFailure.hpp 2015-01-27 17:28:17.494588515 -0500 +++ new/src/share/vm/gc_implementation/g1/g1EvacFailure.hpp 2015-01-27 17:28:17.386582270 -0500 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2012, 2013, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2012, 2015, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -191,7 +191,7 @@ } bool doHeapRegion(HeapRegion *hr) { - bool during_initial_mark = _g1h->g1_policy()->during_initial_mark_pause(); + bool during_initial_mark = _g1h->g1_policy()->collector_state()->during_initial_mark_pause(); bool during_conc_mark = _g1h->mark_in_progress(); assert(!hr->is_humongous(), "sanity"); --- old/src/share/vm/gc_implementation/shared/vmGCOperations.cpp 2015-01-27 17:28:17.906612336 -0500 +++ new/src/share/vm/gc_implementation/shared/vmGCOperations.cpp 2015-01-27 17:28:17.798606092 -0500 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2005, 2014, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2005, 2015, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -205,7 +205,7 @@ if (UseG1GC && ClassUnloadingWithConcurrentMark) { G1CollectedHeap* g1h = G1CollectedHeap::heap(); - g1h->g1_policy()->set_initiate_conc_mark_if_possible(); + g1h->g1_policy()->collector_state()->set_initiate_conc_mark_if_possible(true); GCCauseSetter x(g1h, _gc_cause);