< prev index next >

src/share/vm/gc/g1/g1CollectedHeap.hpp

Print this page
rev 11593 : imported patch 8034842-par-free-cset-old
rev 11595 : imported patch 8034842-erikh-jmasa-review


 101 // (optional) _is_alive_non_header closure in the STW
 102 // reference processor. It is also extensively used during
 103 // reference processing during STW evacuation pauses.
 104 class G1STWIsAliveClosure: public BoolObjectClosure {
 105   G1CollectedHeap* _g1;
 106 public:
 107   G1STWIsAliveClosure(G1CollectedHeap* g1) : _g1(g1) {}
 108   bool do_object_b(oop p);
 109 };
 110 
 111 class RefineCardTableEntryClosure;
 112 
 113 class G1RegionMappingChangedListener : public G1MappingChangedListener {
 114  private:
 115   void reset_from_card_cache(uint start_idx, size_t num_regions);
 116  public:
 117   virtual void on_commit(uint start_idx, size_t num_regions, bool zero_filled);
 118 };
 119 
 120 class G1CollectedHeap : public CollectedHeap {

 121   friend class VM_CollectForMetadataAllocation;
 122   friend class VM_G1CollectForAllocation;
 123   friend class VM_G1CollectFull;
 124   friend class VM_G1IncCollectionPause;
 125   friend class VMStructs;
 126   friend class MutatorAllocRegion;
 127   friend class G1GCAllocRegion;
 128   friend class G1HeapVerifier;
 129 
 130   // Closures used in implementation.
 131   friend class G1ParScanThreadState;
 132   friend class G1ParScanThreadStateSet;
 133   friend class G1ParTask;
 134   friend class G1PLABAllocator;
 135   friend class G1PrepareCompactClosure;
 136 
 137   // Other related classes.
 138   friend class HeapRegionClaimer;
 139 
 140   // Testing classes.


 625   // +ExplicitGCInvokesConcurrent).
 626   void increment_old_marking_cycles_completed(bool concurrent);
 627 
 628   uint old_marking_cycles_completed() {
 629     return _old_marking_cycles_completed;
 630   }
 631 
 632   G1HRPrinter* hr_printer() { return &_hr_printer; }
 633 
 634   // Allocates a new heap region instance.
 635   HeapRegion* new_heap_region(uint hrs_index, MemRegion mr);
 636 
 637   // Allocate the highest free region in the reserved heap. This will commit
 638   // regions as necessary.
 639   HeapRegion* alloc_highest_free_region();
 640 
 641   // Frees a non-humongous region by initializing its contents and
 642   // adding it to the free list that's passed as a parameter (this is
 643   // usually a local list which will be appended to the master free
 644   // list later). The used bytes of freed regions are accumulated in
 645   // pre_used. If par is true, the region's RSet will not be freed
 646   // up. The assumption is that this will be done later.

 647   // The locked parameter indicates if the caller has already taken
 648   // care of proper synchronization. This may allow some optimizations.
 649   void free_region(HeapRegion* hr,
 650                    FreeRegionList* free_list,
 651                    bool par,

 652                    bool locked = false);
 653 
 654   // It dirties the cards that cover the block so that the post
 655   // write barrier never queues anything when updating objects on this
 656   // block. It is assumed (and in fact we assert) that the block
 657   // belongs to a young region.
 658   inline void dirty_young_block(HeapWord* start, size_t word_size);
 659 
 660   // Frees a humongous region by collapsing it into individual regions
 661   // and calling free_region() for each of them. The freed regions
 662   // will be added to the free list that's passed as a parameter (this
 663   // is usually a local list which will be appended to the master free
 664   // list later). The used bytes of freed regions are accumulated in
 665   // pre_used. If par is true, the region's RSet will not be freed
 666   // up. The assumption is that this will be done later.
 667   void free_humongous_region(HeapRegion* hr,
 668                              FreeRegionList* free_list,
 669                              bool par);
 670 
 671   // Facility for allocating in 'archive' regions in high heap memory and
 672   // recording the allocated ranges. These should all be called from the
 673   // VM thread at safepoints, without the heap lock held. They can be used
 674   // to create and archive a set of heap regions which can be mapped at the
 675   // same fixed addresses in a subsequent JVM invocation.
 676   void begin_archive_alloc_range();
 677 
 678   // Check if the requested size would be too large for an archive allocation.
 679   bool is_archive_alloc_too_large(size_t word_size);
 680 
 681   // Allocate memory of the requested size from the archive region. This will
 682   // return NULL if the size is too large or if no memory is available. It
 683   // does not trigger a garbage collection.
 684   HeapWord* archive_mem_allocate(size_t word_size);
 685 
 686   // Optionally aligns the end address and returns the allocated ranges in
 687   // an array of MemRegions in order of ascending addresses.
 688   void end_archive_alloc_range(GrowableArray<MemRegion>* ranges,
 689                                size_t end_alignment_in_bytes = 0);




 101 // (optional) _is_alive_non_header closure in the STW
 102 // reference processor. It is also extensively used during
 103 // reference processing during STW evacuation pauses.
 104 class G1STWIsAliveClosure: public BoolObjectClosure {
 105   G1CollectedHeap* _g1;
 106 public:
 107   G1STWIsAliveClosure(G1CollectedHeap* g1) : _g1(g1) {}
 108   bool do_object_b(oop p);
 109 };
 110 
 111 class RefineCardTableEntryClosure;
 112 
 113 class G1RegionMappingChangedListener : public G1MappingChangedListener {
 114  private:
 115   void reset_from_card_cache(uint start_idx, size_t num_regions);
 116  public:
 117   virtual void on_commit(uint start_idx, size_t num_regions, bool zero_filled);
 118 };
 119 
 120 class G1CollectedHeap : public CollectedHeap {
 121   friend class G1FreeCollectionSetTask;
 122   friend class VM_CollectForMetadataAllocation;
 123   friend class VM_G1CollectForAllocation;
 124   friend class VM_G1CollectFull;
 125   friend class VM_G1IncCollectionPause;
 126   friend class VMStructs;
 127   friend class MutatorAllocRegion;
 128   friend class G1GCAllocRegion;
 129   friend class G1HeapVerifier;
 130 
 131   // Closures used in implementation.
 132   friend class G1ParScanThreadState;
 133   friend class G1ParScanThreadStateSet;
 134   friend class G1ParTask;
 135   friend class G1PLABAllocator;
 136   friend class G1PrepareCompactClosure;
 137 
 138   // Other related classes.
 139   friend class HeapRegionClaimer;
 140 
 141   // Testing classes.


 626   // +ExplicitGCInvokesConcurrent).
 627   void increment_old_marking_cycles_completed(bool concurrent);
 628 
 629   uint old_marking_cycles_completed() {
 630     return _old_marking_cycles_completed;
 631   }
 632 
 633   G1HRPrinter* hr_printer() { return &_hr_printer; }
 634 
 635   // Allocates a new heap region instance.
 636   HeapRegion* new_heap_region(uint hrs_index, MemRegion mr);
 637 
 638   // Allocate the highest free region in the reserved heap. This will commit
 639   // regions as necessary.
 640   HeapRegion* alloc_highest_free_region();
 641 
 642   // Frees a non-humongous region by initializing its contents and
 643   // adding it to the free list that's passed as a parameter (this is
 644   // usually a local list which will be appended to the master free
 645   // list later). The used bytes of freed regions are accumulated in
 646   // pre_used. If skip_remset is true, the region's RSet will not be freed
 647   // up. If skip_hot_card_cache is true, the region's hot card cache will not
 648   // be freed up. The assumption is that this will be done later.
 649   // The locked parameter indicates if the caller has already taken
 650   // care of proper synchronization. This may allow some optimizations.
 651   void free_region(HeapRegion* hr,
 652                    FreeRegionList* free_list,
 653                    bool skip_remset,
 654                    bool skip_hot_card_cache = false,
 655                    bool locked = false);
 656 
 657   // It dirties the cards that cover the block so that the post
 658   // write barrier never queues anything when updating objects on this
 659   // block. It is assumed (and in fact we assert) that the block
 660   // belongs to a young region.
 661   inline void dirty_young_block(HeapWord* start, size_t word_size);
 662 
 663   // Frees a humongous region by collapsing it into individual regions
 664   // and calling free_region() for each of them. The freed regions
 665   // will be added to the free list that's passed as a parameter (this
 666   // is usually a local list which will be appended to the master free
 667   // list later). The used bytes of freed regions are accumulated in
 668   // pre_used. If skip_remset is true, the region's RSet will not be freed
 669   // up. The assumption is that this will be done later.
 670   void free_humongous_region(HeapRegion* hr,
 671                              FreeRegionList* free_list,
 672                              bool skip_remset);
 673 
 674   // Facility for allocating in 'archive' regions in high heap memory and
 675   // recording the allocated ranges. These should all be called from the
 676   // VM thread at safepoints, without the heap lock held. They can be used
 677   // to create and archive a set of heap regions which can be mapped at the
 678   // same fixed addresses in a subsequent JVM invocation.
 679   void begin_archive_alloc_range();
 680 
 681   // Check if the requested size would be too large for an archive allocation.
 682   bool is_archive_alloc_too_large(size_t word_size);
 683 
 684   // Allocate memory of the requested size from the archive region. This will
 685   // return NULL if the size is too large or if no memory is available. It
 686   // does not trigger a garbage collection.
 687   HeapWord* archive_mem_allocate(size_t word_size);
 688 
 689   // Optionally aligns the end address and returns the allocated ranges in
 690   // an array of MemRegions in order of ascending addresses.
 691   void end_archive_alloc_range(GrowableArray<MemRegion>* ranges,
 692                                size_t end_alignment_in_bytes = 0);


< prev index next >