--- old/src/share/vm/gc_implementation/g1/g1CollectedHeap.hpp 2015-03-05 15:35:37.256426193 +0100 +++ new/src/share/vm/gc_implementation/g1/g1CollectedHeap.hpp 2015-03-05 15:35:37.189424201 +0100 @@ -34,6 +34,7 @@ #include "gc_implementation/g1/g1HRPrinter.hpp" #include "gc_implementation/g1/g1InCSetState.hpp" #include "gc_implementation/g1/g1MonitoringSupport.hpp" +#include "gc_implementation/g1/g1EvacStats.hpp" #include "gc_implementation/g1/g1SATBCardTableModRefBS.hpp" #include "gc_implementation/g1/g1YCTypes.hpp" #include "gc_implementation/g1/heapRegionManager.hpp" @@ -55,6 +56,7 @@ class HRRSCleanupTask; class GenerationSpec; class OopsInHeapRegionClosure; +class G1ParScanThreadState; class G1KlassScanClosure; class ObjectClosure; class SpaceClosure; @@ -181,15 +183,10 @@ friend class VM_G1CollectFull; friend class VM_G1IncCollectionPause; friend class VMStructs; - friend class MutatorAllocRegion; - friend class SurvivorGCAllocRegion; - friend class OldGCAllocRegion; - friend class G1Allocator; // Closures used in implementation. friend class G1ParScanThreadState; friend class G1ParTask; - friend class G1ParGCAllocator; friend class G1PrepareCompactClosure; // Other related classes. @@ -246,18 +243,17 @@ // The sequence of all heap regions in the heap. HeapRegionManager _hrm; - // Class that handles the different kinds of allocations. + // Manages all kinds of allocations within regions. This excludes only + // humongous object allocations. G1Allocator* _allocator; + // Outside of GC pauses, the number of bytes used in all regions other + // than the current allocation region(s). + size_t _summary_bytes_used; + // Statistics for each allocation context AllocationContextStats _allocation_context_stats; - // PLAB sizing policy for survivors. - PLABStats _survivor_plab_stats; - - // PLAB sizing policy for tenured objects. - PLABStats _old_plab_stats; - // It specifies whether we should attempt to expand the heap after a // region allocation failure. If heap expansion fails we set this to // false so that we don't re-attempt the heap expansion (it's likely @@ -266,22 +262,6 @@ // start of each GC. bool _expand_heap_after_alloc_failure; - // It resets the mutator alloc region before new allocations can take place. - void init_mutator_alloc_region(); - - // It releases the mutator alloc region. - void release_mutator_alloc_region(); - - // It initializes the GC alloc regions at the start of a GC. - void init_gc_alloc_regions(EvacuationInfo& evacuation_info); - - // It releases the GC alloc regions at the end of a GC. - void release_gc_alloc_regions(uint no_of_gc_workers, EvacuationInfo& evacuation_info); - - // It does any cleanup that needs to be done on the GC alloc regions - // before a Full GC. - void abandon_gc_alloc_regions(); - // Helper for monitoring and management support. G1MonitoringSupport* _g1mm; @@ -522,31 +502,13 @@ AllocationContext_t context, bool expect_null_mutator_alloc_region); + public: // It dirties the cards that cover the block so that so that the post // write barrier never queues anything when updating objects on this // block. It is assumed (and in fact we assert) that the block // belongs to a young region. inline void dirty_young_block(HeapWord* start, size_t word_size); - // Allocate blocks during garbage collection. Will ensure an - // allocation region, either by picking one or expanding the - // heap, and then allocate a block of the given size. The block - // may not be a humongous - it must fit into a single heap region. - inline HeapWord* par_allocate_during_gc(InCSetState dest, - size_t word_size, - AllocationContext_t context); - // Ensure that no further allocations can happen in "r", bearing in mind - // that parallel threads might be attempting allocations. - void par_allocate_remaining_space(HeapRegion* r); - - // Allocation attempt during GC for a survivor object / PLAB. - inline HeapWord* survivor_attempt_allocation(size_t word_size, - AllocationContext_t context); - - // Allocation attempt during GC for an old object / PLAB. - inline HeapWord* old_attempt_allocation(size_t word_size, - AllocationContext_t context); - // These methods are the "callbacks" from the G1AllocRegion class. // For mutator alloc regions. @@ -559,7 +521,7 @@ InCSetState dest); void retire_gc_alloc_region(HeapRegion* alloc_region, size_t allocated_bytes, InCSetState dest); - + private: // - if explicit_gc is true, the GC is for a System.gc() or a heap // inspection request and should collect the entire heap // - if clear_all_soft_refs is true, all soft references should be @@ -596,7 +558,7 @@ // Process any reference objects discovered during // an incremental evacuation pause. - void process_discovered_references(uint no_of_gc_workers); + void process_discovered_references(G1ParScanThreadState** pss, uint no_of_gc_workers); // Enqueue any remaining discovered references // after processing. @@ -619,12 +581,6 @@ // (Rounds up to a HeapRegion boundary.) bool expand(size_t expand_bytes); - // Returns the PLAB statistics for a given destination. - inline PLABStats* alloc_buffer_stats(InCSetState dest); - - // Determines PLAB size for a given destination. - inline size_t desired_plab_sz(InCSetState dest); - inline AllocationContextStats& allocation_context_stats(); // Do anything common to GC's. @@ -698,6 +654,9 @@ G1HRPrinter* hr_printer() { return &_hr_printer; } + // Allocates a new heap region instance. + virtual HeapRegion* new_heap_region(uint hrs_index, MemRegion mr); + // Frees a non-humongous region by initializing its contents and // adding it to the free list that's passed as a parameter (this is // usually a local list which will be appended to the master free @@ -756,7 +715,22 @@ // Actually do the work of evacuating the collection set. void evacuate_collection_set(EvacuationInfo& evacuation_info); - + + // Print the header for the per-thread termination statistics. + static void print_termination_stats_hdr(outputStream* const st); + // Print actual per-thread termination statistics. + void print_termination_stats(outputStream* const st, + uint worker_id, + double elapsed_ms, + double strong_roots_ms, + double term_ms, + size_t term_attempts, + size_t alloc_buffer_waste, + size_t undo_waste) const; + // Update object copying statistics. + void record_obj_copy_mem_stats(); + void record_obj_copy_mem_stats(InCSetState which); + // The g1 remembered set of the heap. G1RemSet* _g1_rem_set; @@ -1100,6 +1074,16 @@ size_t used_unlocked() const; size_t recalculate_used() const; + void increase_used(size_t bytes) { _summary_bytes_used += bytes; } + void set_used(size_t bytes) { _summary_bytes_used = bytes; } + + void decrease_used(size_t bytes) { + assert(_summary_bytes_used >= bytes, + err_msg("invariant: _summary_bytes_used: "SIZE_FORMAT" should be >= bytes: "SIZE_FORMAT, + _summary_bytes_used, bytes)); + _summary_bytes_used -= bytes; + } + // These virtual functions do the actual allocation. // Some heaps may offer a contiguous region for shared non-blocking // allocation, via inlined code (by exporting the address of the top and @@ -1434,6 +1418,8 @@ return word_size > _humongous_object_threshold_in_words; } + static size_t humongous_object_threshold_in_words() { return _humongous_object_threshold_in_words; } + // Update mod union table with the set of dirty cards. void updateModUnion();