Print this page
8236073: G1: Use SoftMaxHeapSize to guide GC heuristics

*** 28,37 **** --- 28,38 ---- #include "gc/g1/g1BarrierSet.hpp" #include "gc/g1/g1BiasedArray.hpp" #include "gc/g1/g1CardTable.hpp" #include "gc/g1/g1CollectionSet.hpp" #include "gc/g1/g1CollectorState.hpp" + #include "gc/g1/g1ConcurrentHeapResize.hpp" #include "gc/g1/g1ConcurrentMark.hpp" #include "gc/g1/g1EdenRegions.hpp" #include "gc/g1/g1EvacFailure.hpp" #include "gc/g1/g1EvacStats.hpp" #include "gc/g1/g1EvacuationInfo.hpp"
*** 78,87 **** --- 79,90 ---- class G1CollectionSet; class G1Policy; class G1HotCardCache; class G1RemSet; class G1YoungRemSetSamplingThread; + class G1ConcurrentHeapResize; + class G1ConcurrentHeapResizeThread; class G1ConcurrentMark; class G1ConcurrentMarkThread; class G1ConcurrentRefine; class GenerationCounters; class STWGCTimer;
*** 233,242 **** --- 236,249 ---- // that subsequent expansion attempts will also fail if one fails). // Currently, it is only consulted during GC and it's reset at the // start of each GC. bool _expand_heap_after_alloc_failure; + // The previous value of SoftMaxHeapSize to indicate if a new value is assigned + // The default value is MaxHeapSize + size_t _prev_soft_max_heap_size; + // Helper for monitoring and management support. G1MonitoringSupport* _g1mm; // Records whether the region at the given index is (still) a // candidate for eager reclaim. Only valid for humongous start
*** 509,523 **** // Internal helpers used during full GC to split it up to // increase readability. void abort_concurrent_cycle(); void verify_before_full_collection(bool explicit_gc); void prepare_heap_for_full_collection(); ! void prepare_heap_for_mutators(); void abort_refinement(); void verify_after_full_collection(); void print_heap_after_full_collection(G1HeapTransition* heap_transition); // Helper method for satisfy_failed_allocation() HeapWord* satisfy_failed_allocation_helper(size_t word_size, bool do_gc, bool clear_all_soft_refs, bool expect_null_mutator_alloc_region, --- 516,533 ---- // Internal helpers used during full GC to split it up to // increase readability. void abort_concurrent_cycle(); void verify_before_full_collection(bool explicit_gc); void prepare_heap_for_full_collection(); ! void prepare_heap_for_mutators(bool explicit_gc); void abort_refinement(); void verify_after_full_collection(); void print_heap_after_full_collection(G1HeapTransition* heap_transition); + // Check if SoftMaxHeapSize has changed in runtime + bool check_soft_max_heap_size_changed(); + // Helper method for satisfy_failed_allocation() HeapWord* satisfy_failed_allocation_helper(size_t word_size, bool do_gc, bool clear_all_soft_refs, bool expect_null_mutator_alloc_region,
*** 563,573 **** G1MonitoringSupport* g1mm() { assert(_g1mm != NULL, "should have been initialized"); return _g1mm; } ! void resize_heap_if_necessary(); G1NUMA* numa() const { return _numa; } // Expand the garbage-first heap by at least the given size (in bytes!). // Returns true if the heap was expanded by the requested amount; --- 573,583 ---- G1MonitoringSupport* g1mm() { assert(_g1mm != NULL, "should have been initialized"); return _g1mm; } ! void resize_heap_if_necessary(bool limit_within_soft_max_heap_size); G1NUMA* numa() const { return _numa; } // Expand the garbage-first heap by at least the given size (in bytes!). // Returns true if the heap was expanded by the requested amount;
*** 779,789 **** --- 789,802 ---- void pre_evacuate_collection_set(G1EvacuationInfo& evacuation_info, G1ParScanThreadStateSet* pss); void post_evacuate_collection_set(G1EvacuationInfo& evacuation_info, G1RedirtyCardsQueueSet* rdcqs, G1ParScanThreadStateSet* pss); + void adjust_heap_after_young_collection(); void expand_heap_after_young_collection(); + void shrink_heap_after_young_collection(); + // Update object copying statistics. void record_obj_copy_mem_stats(); // The hot card cache for remembered set insertion optimization. G1HotCardCache* _hot_card_cache;
*** 804,813 **** --- 817,830 ---- G1ConcurrentMarkThread* _cm_thread; // The concurrent refiner. G1ConcurrentRefine* _cr; + // The concurrent heap resizer (and the thread it runs in.) + G1ConcurrentHeapResize* _concurrent_heap_resize; + G1ConcurrentHeapResizeThread* _concurrent_heap_resize_thread; + // The parallel task queues RefToScanQueueSet *_task_queues; // True iff a evacuation has failed in the current collection. bool _evacuation_failed;
*** 1076,1086 **** inline void old_set_add(HeapRegion* hr); inline void old_set_remove(HeapRegion* hr); inline void archive_set_add(HeapRegion* hr); ! size_t non_young_capacity_bytes() { return (old_regions_count() + _archive_set.length() + humongous_regions_count()) * HeapRegion::GrainBytes; } // Determine whether the given region is one that we are using as an // old GC alloc region. --- 1093,1103 ---- inline void old_set_add(HeapRegion* hr); inline void old_set_remove(HeapRegion* hr); inline void archive_set_add(HeapRegion* hr); ! size_t non_young_capacity_bytes() const { return (old_regions_count() + _archive_set.length() + humongous_regions_count()) * HeapRegion::GrainBytes; } // Determine whether the given region is one that we are using as an // old GC alloc region.
*** 1356,1365 **** --- 1373,1385 ---- // Refinement G1ConcurrentRefine* concurrent_refine() const { return _cr; } + // Concurrent heap resizing + G1ConcurrentHeapResize* concurrent_heap_resize() const { return _concurrent_heap_resize; } + // Optimized nmethod scanning support routines // Register the given nmethod with the G1 heap. virtual void register_nmethod(nmethod* nm);