< prev index next >

src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp

Print this page

        

*** 1,7 **** /* ! * Copyright (c) 2001, 2014, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License version 2 only, as * published by the Free Software Foundation. --- 1,7 ---- /* ! * Copyright (c) 2001, 2015, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License version 2 only, as * published by the Free Software Foundation.
*** 359,368 **** --- 359,385 ---- // The from card cache is not the memory that is actually committed. So we cannot // take advantage of the zero_filled parameter. reset_from_card_cache(start_idx, num_regions); } + ////////////////////// G1CollectedHeap methods //////////////////////////////// + + // Records the fact that a marking phase is no longer in progress. + void G1CollectedHeap::set_marking_complete() { + g1_policy()->collector_state()->set_marking_complete(); + } + + // Records the fact that a marking phase has commenced. + void G1CollectedHeap::set_marking_started() { + g1_policy()->collector_state()->set_marking_started(); + } + + // Returns whether a marking phase is currently in progress. + bool G1CollectedHeap::mark_in_progress() { + return g1_policy()->collector_state()->mark_in_progress(); + } + void G1CollectedHeap::push_dirty_cards_region(HeapRegion* hr) { // Claim the right to put the region on the dirty cards region list // by installing a self pointer. HeapRegion* next = hr->get_next_dirty_cards_region();
*** 1074,1084 **** return _allocator->mutator_alloc_region(context)->attempt_allocation_locked(word_size, false /* bot_updates */); } else { HeapWord* result = humongous_obj_allocate(word_size, context); if (result != NULL && g1_policy()->need_to_start_conc_mark("STW humongous allocation")) { ! g1_policy()->set_initiate_conc_mark_if_possible(); } return result; } ShouldNotReachHere(); --- 1091,1101 ---- return _allocator->mutator_alloc_region(context)->attempt_allocation_locked(word_size, false /* bot_updates */); } else { HeapWord* result = humongous_obj_allocate(word_size, context); if (result != NULL && g1_policy()->need_to_start_conc_mark("STW humongous allocation")) { ! g1_policy()->collector_state()->set_initiate_conc_mark_if_possible(true); } return result; } ShouldNotReachHere();
*** 1286,1296 **** abandon_collection_set(g1_policy()->inc_cset_head()); g1_policy()->clear_incremental_cset(); g1_policy()->stop_incremental_cset_building(); tear_down_region_sets(false /* free_list_only */); ! g1_policy()->set_gcs_are_young(true); // See the comments in g1CollectedHeap.hpp and // G1CollectedHeap::ref_processing_init() about // how reference processing currently works in G1. --- 1303,1313 ---- abandon_collection_set(g1_policy()->inc_cset_head()); g1_policy()->clear_incremental_cset(); g1_policy()->stop_incremental_cset_building(); tear_down_region_sets(false /* free_list_only */); ! g1_policy()->collector_state()->set_gcs_are_young(true); // See the comments in g1CollectedHeap.hpp and // G1CollectedHeap::ref_processing_init() about // how reference processing currently works in G1.
*** 1768,1778 **** _ref_processor_cm(NULL), _ref_processor_stw(NULL), _process_strong_tasks(new SubTasksDone(G1H_PS_NumElements)), _bot_shared(NULL), _evac_failure_scan_stack(NULL), - _mark_in_progress(false), _cg1r(NULL), _g1mm(NULL), _refine_cte_cl(NULL), _full_collection(false), _secondary_free_list("Secondary Free List", new SecondaryFreeRegionListMtSafeChecker()), --- 1785,1794 ----
*** 2376,2387 **** } } } G1YCType G1CollectedHeap::yc_type() { ! bool is_young = g1_policy()->gcs_are_young(); ! bool is_initial_mark = g1_policy()->during_initial_mark_pause(); bool is_during_mark = mark_in_progress(); if (is_initial_mark) { return InitialMark; } else if (is_during_mark) { --- 2392,2403 ---- } } } G1YCType G1CollectedHeap::yc_type() { ! bool is_young = g1_policy()->collector_state()->gcs_are_young(); ! bool is_initial_mark = g1_policy()->collector_state()->during_initial_mark_pause(); bool is_during_mark = mark_in_progress(); if (is_initial_mark) { return InitialMark; } else if (is_during_mark) {
*** 3661,3672 **** } gclog_or_tty->gclog_stamp(_gc_tracer_stw->gc_id()); GCCauseString gc_cause_str = GCCauseString("GC pause", gc_cause()) ! .append(g1_policy()->gcs_are_young() ? "(young)" : "(mixed)") ! .append(g1_policy()->during_initial_mark_pause() ? " (initial-mark)" : ""); gclog_or_tty->print("[%s", (const char*)gc_cause_str); } void G1CollectedHeap::log_gc_footer(double pause_time_sec) { --- 3677,3688 ---- } gclog_or_tty->gclog_stamp(_gc_tracer_stw->gc_id()); GCCauseString gc_cause_str = GCCauseString("GC pause", gc_cause()) ! .append(g1_policy()->collector_state()->gcs_are_young() ? "(young)" : "(mixed)") ! .append(g1_policy()->collector_state()->during_initial_mark_pause() ? " (initial-mark)" : ""); gclog_or_tty->print("[%s", (const char*)gc_cause_str); } void G1CollectedHeap::log_gc_footer(double pause_time_sec) {
*** 3718,3743 **** // pause. If it is, during_initial_mark_pause() will return true // for the duration of this pause. g1_policy()->decide_on_conc_mark_initiation(); // We do not allow initial-mark to be piggy-backed on a mixed GC. ! assert(!g1_policy()->during_initial_mark_pause() || ! g1_policy()->gcs_are_young(), "sanity"); // We also do not allow mixed GCs during marking. ! assert(!mark_in_progress() || g1_policy()->gcs_are_young(), "sanity"); // Record whether this pause is an initial mark. When the current // thread has completed its logging output and it's safe to signal // the CM thread, the flag's value in the policy has been reset. ! bool should_start_conc_mark = g1_policy()->during_initial_mark_pause(); // Inner scope for scope based logging, timers, and stats collection { EvacuationInfo evacuation_info; ! if (g1_policy()->during_initial_mark_pause()) { // We are about to start a marking cycle, so we increment the // full collection counter. increment_old_marking_cycles_started(); register_concurrent_cycle_start(_gc_timer_stw->gc_start()); } --- 3734,3759 ---- // pause. If it is, during_initial_mark_pause() will return true // for the duration of this pause. g1_policy()->decide_on_conc_mark_initiation(); // We do not allow initial-mark to be piggy-backed on a mixed GC. ! assert(!g1_policy()->collector_state()->during_initial_mark_pause() || ! g1_policy()->collector_state()->gcs_are_young(), "sanity"); // We also do not allow mixed GCs during marking. ! assert(!mark_in_progress() || g1_policy()->collector_state()->gcs_are_young(), "sanity"); // Record whether this pause is an initial mark. When the current // thread has completed its logging output and it's safe to signal // the CM thread, the flag's value in the policy has been reset. ! bool should_start_conc_mark = g1_policy()->collector_state()->during_initial_mark_pause(); // Inner scope for scope based logging, timers, and stats collection { EvacuationInfo evacuation_info; ! if (g1_policy()->collector_state()->during_initial_mark_pause()) { // We are about to start a marking cycle, so we increment the // full collection counter. increment_old_marking_cycles_started(); register_concurrent_cycle_start(_gc_timer_stw->gc_start()); }
*** 3840,3850 **** #if YOUNG_LIST_VERBOSE gclog_or_tty->print_cr("\nAfter recording pause start.\nYoung_list:"); _young_list->print(); #endif // YOUNG_LIST_VERBOSE ! if (g1_policy()->during_initial_mark_pause()) { concurrent_mark()->checkpointRootsInitialPre(); } #if YOUNG_LIST_VERBOSE gclog_or_tty->print_cr("\nBefore choosing collection set.\nYoung_list:"); --- 3856,3866 ---- #if YOUNG_LIST_VERBOSE gclog_or_tty->print_cr("\nAfter recording pause start.\nYoung_list:"); _young_list->print(); #endif // YOUNG_LIST_VERBOSE ! if (g1_policy()->collector_state()->during_initial_mark_pause()) { concurrent_mark()->checkpointRootsInitialPre(); } #if YOUNG_LIST_VERBOSE gclog_or_tty->print_cr("\nBefore choosing collection set.\nYoung_list:");
*** 3944,3954 **** // The "used" of the the collection set have already been subtracted // when they were freed. Add in the bytes evacuated. _allocator->increase_used(g1_policy()->bytes_copied_during_gc()); } ! if (g1_policy()->during_initial_mark_pause()) { // We have to do this before we notify the CM threads that // they can start working to make sure that all the // appropriate initialization is done on the CM object. concurrent_mark()->checkpointRootsInitialPost(); set_marking_started(); --- 3960,3970 ---- // The "used" of the the collection set have already been subtracted // when they were freed. Add in the bytes evacuated. _allocator->increase_used(g1_policy()->bytes_copied_during_gc()); } ! if (g1_policy()->collector_state()->during_initial_mark_pause()) { // We have to do this before we notify the CM threads that // they can start working to make sure that all the // appropriate initialization is done on the CM object. concurrent_mark()->checkpointRootsInitialPost(); set_marking_started();
*** 4493,4503 **** G1ParScanThreadState pss(_g1h, worker_id, rp); G1ParScanHeapEvacFailureClosure evac_failure_cl(_g1h, &pss, rp); pss.set_evac_failure_closure(&evac_failure_cl); ! bool only_young = _g1h->g1_policy()->gcs_are_young(); // Non-IM young GC. G1ParCopyClosure<G1BarrierNone, G1MarkNone> scan_only_root_cl(_g1h, &pss, rp); G1CLDClosure<G1MarkNone> scan_only_cld_cl(&scan_only_root_cl, only_young, // Only process dirty klasses. --- 4509,4519 ---- G1ParScanThreadState pss(_g1h, worker_id, rp); G1ParScanHeapEvacFailureClosure evac_failure_cl(_g1h, &pss, rp); pss.set_evac_failure_closure(&evac_failure_cl); ! bool only_young = _g1h->g1_policy()->collector_state()->gcs_are_young(); // Non-IM young GC. G1ParCopyClosure<G1BarrierNone, G1MarkNone> scan_only_root_cl(_g1h, &pss, rp); G1CLDClosure<G1MarkNone> scan_only_cld_cl(&scan_only_root_cl, only_young, // Only process dirty klasses.
*** 4522,4532 **** OopClosure* weak_root_cl; CLDClosure* strong_cld_cl; CLDClosure* weak_cld_cl; CodeBlobClosure* strong_code_cl; ! if (_g1h->g1_policy()->during_initial_mark_pause()) { // We also need to mark copied objects. strong_root_cl = &scan_mark_root_cl; strong_cld_cl = &scan_mark_cld_cl; strong_code_cl = &scan_mark_code_cl; if (ClassUnloadingWithConcurrentMark) { --- 4538,4548 ---- OopClosure* weak_root_cl; CLDClosure* strong_cld_cl; CLDClosure* weak_cld_cl; CodeBlobClosure* strong_code_cl; ! if (_g1h->g1_policy()->collector_state()->during_initial_mark_pause()) { // We also need to mark copied objects. strong_root_cl = &scan_mark_root_cl; strong_cld_cl = &scan_mark_cld_cl; strong_code_cl = &scan_mark_code_cl; if (ClassUnloadingWithConcurrentMark) {
*** 4603,4613 **** // First scan the shared roots. double ext_roots_start = os::elapsedTime(); double closure_app_time_sec = 0.0; ! bool during_im = _g1h->g1_policy()->during_initial_mark_pause(); bool trace_metadata = during_im && ClassUnloadingWithConcurrentMark; BufferingOopClosure buf_scan_non_heap_roots(scan_non_heap_roots); BufferingOopClosure buf_scan_non_heap_weak_roots(scan_non_heap_weak_roots); --- 4619,4629 ---- // First scan the shared roots. double ext_roots_start = os::elapsedTime(); double closure_app_time_sec = 0.0; ! bool during_im = _g1h->g1_policy()->collector_state()->during_initial_mark_pause(); bool trace_metadata = during_im && ClassUnloadingWithConcurrentMark; BufferingOopClosure buf_scan_non_heap_roots(scan_non_heap_roots); BufferingOopClosure buf_scan_non_heap_weak_roots(scan_non_heap_weak_roots);
*** 5293,5303 **** G1ParScanAndMarkExtRootClosure copy_mark_non_heap_cl(_g1h, &pss, NULL); OopClosure* copy_non_heap_cl = &only_copy_non_heap_cl; ! if (_g1h->g1_policy()->during_initial_mark_pause()) { // We also need to mark copied objects. copy_non_heap_cl = &copy_mark_non_heap_cl; } // Keep alive closure. --- 5309,5319 ---- G1ParScanAndMarkExtRootClosure copy_mark_non_heap_cl(_g1h, &pss, NULL); OopClosure* copy_non_heap_cl = &only_copy_non_heap_cl; ! if (_g1h->g1_policy()->collector_state()->during_initial_mark_pause()) { // We also need to mark copied objects. copy_non_heap_cl = &copy_mark_non_heap_cl; } // Keep alive closure.
*** 5398,5408 **** G1ParScanAndMarkExtRootClosure copy_mark_non_heap_cl(_g1h, &pss, NULL); OopClosure* copy_non_heap_cl = &only_copy_non_heap_cl; ! if (_g1h->g1_policy()->during_initial_mark_pause()) { // We also need to mark copied objects. copy_non_heap_cl = &copy_mark_non_heap_cl; } // Is alive closure --- 5414,5424 ---- G1ParScanAndMarkExtRootClosure copy_mark_non_heap_cl(_g1h, &pss, NULL); OopClosure* copy_non_heap_cl = &only_copy_non_heap_cl; ! if (_g1h->g1_policy()->collector_state()->during_initial_mark_pause()) { // We also need to mark copied objects. copy_non_heap_cl = &copy_mark_non_heap_cl; } // Is alive closure
*** 5513,5523 **** G1ParScanAndMarkExtRootClosure copy_mark_non_heap_cl(this, &pss, NULL); OopClosure* copy_non_heap_cl = &only_copy_non_heap_cl; ! if (_g1h->g1_policy()->during_initial_mark_pause()) { // We also need to mark copied objects. copy_non_heap_cl = &copy_mark_non_heap_cl; } // Keep alive closure. --- 5529,5539 ---- G1ParScanAndMarkExtRootClosure copy_mark_non_heap_cl(this, &pss, NULL); OopClosure* copy_non_heap_cl = &only_copy_non_heap_cl; ! if (_g1h->g1_policy()->collector_state()->during_initial_mark_pause()) { // We also need to mark copied objects. copy_non_heap_cl = &copy_mark_non_heap_cl; } // Keep alive closure.
*** 5631,5641 **** double end_par_time_sec; { StrongRootsScope srs(this); // InitialMark needs claim bits to keep track of the marked-through CLDs. ! if (g1_policy()->during_initial_mark_pause()) { ClassLoaderDataGraph::clear_claimed_marks(); } // The individual threads will set their evac-failure closures. if (PrintTerminationStats) G1ParScanThreadState::print_termination_stats_hdr(); --- 5647,5657 ---- double end_par_time_sec; { StrongRootsScope srs(this); // InitialMark needs claim bits to keep track of the marked-through CLDs. ! if (g1_policy()->collector_state()->during_initial_mark_pause()) { ClassLoaderDataGraph::clear_claimed_marks(); } // The individual threads will set their evac-failure closures. if (PrintTerminationStats) G1ParScanThreadState::print_termination_stats_hdr();
*** 6584,6605 **** } else { new_alloc_region->set_old(); _hr_printer.alloc(new_alloc_region, G1HRPrinter::Old); check_bitmaps("Old Region Allocation", new_alloc_region); } ! bool during_im = g1_policy()->during_initial_mark_pause(); new_alloc_region->note_start_of_copying(during_im); return new_alloc_region; } } return NULL; } void G1CollectedHeap::retire_gc_alloc_region(HeapRegion* alloc_region, size_t allocated_bytes, InCSetState dest) { ! bool during_im = g1_policy()->during_initial_mark_pause(); alloc_region->note_end_of_copying(during_im); g1_policy()->record_bytes_copied_during_gc(allocated_bytes); if (dest.is_young()) { young_list()->add_survivor_region(alloc_region); } else { --- 6600,6621 ---- } else { new_alloc_region->set_old(); _hr_printer.alloc(new_alloc_region, G1HRPrinter::Old); check_bitmaps("Old Region Allocation", new_alloc_region); } ! bool during_im = g1_policy()->collector_state()->during_initial_mark_pause(); new_alloc_region->note_start_of_copying(during_im); return new_alloc_region; } } return NULL; } void G1CollectedHeap::retire_gc_alloc_region(HeapRegion* alloc_region, size_t allocated_bytes, InCSetState dest) { ! bool during_im = g1_policy()->collector_state()->during_initial_mark_pause(); alloc_region->note_end_of_copying(during_im); g1_policy()->record_bytes_copied_during_gc(allocated_bytes); if (dest.is_young()) { young_list()->add_survivor_region(alloc_region); } else {
< prev index next >