Print this page
8236073: G1: Use SoftMaxHeapSize to guide GC heuristics

*** 80,89 **** --- 80,99 ---- CHeapBitMap _available_map; // The number of regions committed in the heap. uint _num_committed; + // Each bit in this bitmap indicates that the corresponding region is in concurrent + // resizing + CHeapBitMap _concurrent_resizing_map; + + #ifdef ASSERT + // The number of regions for concurrent resizing which is treated as *committed* but + // not counted in length(). + uint _num_concurrent_resizing; + #endif + // Internal only. The highest heap region +1 we allocated a HeapRegion instance for. uint _allocated_heapregions_length; HeapWord* heap_bottom() const { return _regions.bottom_address_mapped(); } HeapWord* heap_end() const {return _regions.end_address_mapped(); }
*** 115,129 **** --- 125,151 ---- G1HeapRegionTable _regions; G1RegionToSpaceMapper* _heap_mapper; G1RegionToSpaceMapper* _prev_bitmap_mapper; G1RegionToSpaceMapper* _next_bitmap_mapper; FreeRegionList _free_list; + FreeRegionList _concurrent_uncommitting_list; void make_regions_available(uint index, uint num_regions = 1, WorkGang* pretouch_gang = NULL); void uncommit_regions(uint index, size_t num_regions = 1); // Allocate a new HeapRegion for the given index. HeapRegion* new_heap_region(uint hrm_index); + + // Set/clear region of concurrent resizing state + void set_region_for_concurrent_resizing(uint index); + void clear_region_for_concurrent_resizing(uint index); + + // Uncommit regions memory in concurrent thread + void concurrent_uncommit_regions_memory(uint start, size_t num_regions); + + // Uncommit regions memory in VM thread while in synchronization + void synchronize_uncommit_regions_memory(uint start, size_t num_regions); + #ifdef ASSERT public: bool is_free(HeapRegion* hr) const; #endif public:
*** 159,168 **** --- 181,196 ---- inline HeapRegion* at_or_null(uint index) const; // Returns whether the given region is available for allocation. bool is_available(uint region) const; + // Returns whether the given region is in concurrent resizing + bool is_in_concurrent_resizing(uint region) const; + + // Return true if neither available nor in concurrent resizing + bool is_unavailable_for_allocation(uint region) const; + // Return the next region (by index) that is part of the same // humongous object that hr is part of. inline HeapRegion* next_region_in_humongous(HeapRegion* hr) const; // If addr is within the committed space return its corresponding
*** 201,210 **** --- 229,243 ---- size_t total_free_bytes() const { return num_free_regions() * HeapRegion::GrainBytes; } + // Return the number of free regions in the heap. + uint num_of_concurrent_resizing_regions() const { + return _concurrent_uncommitting_list.length(); + } + // Return the number of available (uncommitted) regions. uint available() const { return max_length() - length(); } // Return the number of regions that have been committed in the heap. uint length() const { return _num_committed; }
*** 268,277 **** --- 301,319 ---- virtual void verify(); // Do some sanity checking. void verify_optional() PRODUCT_RETURN; + + // Uncommit regions memory in concurrent thread + void concurrent_uncommit_regions(); + + // Prepare the regions to uncommit concurrently + void prepare_concurrent_uncommit_regions(uint num_regions_to_remove); + + // Synchhronize the regions which are resized concurrentlly in safepoint + void synchronize_concurrent_resizing_regions(); }; // The HeapRegionClaimer is used during parallel iteration over heap regions, // allowing workers to claim heap regions, gaining exclusive rights to these regions. class HeapRegionClaimer : public StackObj {