< prev index next >

src/share/vm/gc/g1/g1CollectedHeap.hpp

Print this page




 660   // block. It is assumed (and in fact we assert) that the block
 661   // belongs to a young region.
 662   inline void dirty_young_block(HeapWord* start, size_t word_size);
 663 
 664   // Frees a humongous region by collapsing it into individual regions
 665   // and calling free_region() for each of them. The freed regions
 666   // will be added to the free list that's passed as a parameter (this
 667   // is usually a local list which will be appended to the master free
 668   // list later). The used bytes of freed regions are accumulated in
 669   // pre_used. If skip_remset is true, the region's RSet will not be freed
 670   // up. The assumption is that this will be done later.
 671   void free_humongous_region(HeapRegion* hr,
 672                              FreeRegionList* free_list,
 673                              bool skip_remset);
 674 
 675   // Facility for allocating in 'archive' regions in high heap memory and
 676   // recording the allocated ranges. These should all be called from the
 677   // VM thread at safepoints, without the heap lock held. They can be used
 678   // to create and archive a set of heap regions which can be mapped at the
 679   // same fixed addresses in a subsequent JVM invocation.
 680   void begin_archive_alloc_range();
 681 
 682   // Check if the requested size would be too large for an archive allocation.
 683   bool is_archive_alloc_too_large(size_t word_size);
 684 
 685   // Allocate memory of the requested size from the archive region. This will
 686   // return NULL if the size is too large or if no memory is available. It
 687   // does not trigger a garbage collection.
 688   HeapWord* archive_mem_allocate(size_t word_size);
 689 
 690   // Optionally aligns the end address and returns the allocated ranges in
 691   // an array of MemRegions in order of ascending addresses.
 692   void end_archive_alloc_range(GrowableArray<MemRegion>* ranges,
 693                                size_t end_alignment_in_bytes = 0);
 694 
 695   // Facility for allocating a fixed range within the heap and marking
 696   // the containing regions as 'archive'. For use at JVM init time, when the
 697   // caller may mmap archived heap data at the specified range(s).
 698   // Verify that the MemRegions specified in the argument array are within the
 699   // reserved heap.
 700   bool check_archive_addresses(MemRegion* range, size_t count);
 701 
 702   // Commit the appropriate G1 regions containing the specified MemRegions
 703   // and mark them as 'archive' regions. The regions in the array must be
 704   // non-overlapping and in order of ascending address.
 705   bool alloc_archive_regions(MemRegion* range, size_t count);
 706 
 707   // Insert any required filler objects in the G1 regions around the specified
 708   // ranges to make the regions parseable. This must be called after
 709   // alloc_archive_regions, and after class loading has occurred.
 710   void fill_archive_regions(MemRegion* range, size_t count);
 711 
 712   // For each of the specified MemRegions, uncommit the containing G1 regions
 713   // which had been allocated by alloc_archive_regions. This should be called
 714   // rather than fill_archive_regions at JVM init time if the archive file
 715   // mapping failed, with the same non-overlapping and sorted MemRegion array.
 716   void dealloc_archive_regions(MemRegion* range, size_t count);
 717 
 718 protected:
 719 
 720   // Shrink the garbage-first heap by at most the given size (in bytes!).
 721   // (Rounds down to a HeapRegion boundary.)
 722   virtual void shrink(size_t expand_bytes);
 723   void shrink_helper(size_t expand_bytes);
 724 
 725   #if TASKQUEUE_STATS




 660   // block. It is assumed (and in fact we assert) that the block
 661   // belongs to a young region.
 662   inline void dirty_young_block(HeapWord* start, size_t word_size);
 663 
 664   // Frees a humongous region by collapsing it into individual regions
 665   // and calling free_region() for each of them. The freed regions
 666   // will be added to the free list that's passed as a parameter (this
 667   // is usually a local list which will be appended to the master free
 668   // list later). The used bytes of freed regions are accumulated in
 669   // pre_used. If skip_remset is true, the region's RSet will not be freed
 670   // up. The assumption is that this will be done later.
 671   void free_humongous_region(HeapRegion* hr,
 672                              FreeRegionList* free_list,
 673                              bool skip_remset);
 674 
 675   // Facility for allocating in 'archive' regions in high heap memory and
 676   // recording the allocated ranges. These should all be called from the
 677   // VM thread at safepoints, without the heap lock held. They can be used
 678   // to create and archive a set of heap regions which can be mapped at the
 679   // same fixed addresses in a subsequent JVM invocation.
 680   void begin_archive_alloc_range(bool open = false);
 681 
 682   // Check if the requested size would be too large for an archive allocation.
 683   bool is_archive_alloc_too_large(size_t word_size);
 684 
 685   // Allocate memory of the requested size from the archive region. This will
 686   // return NULL if the size is too large or if no memory is available. It
 687   // does not trigger a garbage collection.
 688   HeapWord* archive_mem_allocate(size_t word_size);
 689 
 690   // Optionally aligns the end address and returns the allocated ranges in
 691   // an array of MemRegions in order of ascending addresses.
 692   void end_archive_alloc_range(GrowableArray<MemRegion>* ranges,
 693                                size_t end_alignment_in_bytes = 0);
 694 
 695   // Facility for allocating a fixed range within the heap and marking
 696   // the containing regions as 'archive'. For use at JVM init time, when the
 697   // caller may mmap archived heap data at the specified range(s).
 698   // Verify that the MemRegions specified in the argument array are within the
 699   // reserved heap.
 700   bool check_archive_addresses(MemRegion* range, size_t count);
 701 
 702   // Commit the appropriate G1 regions containing the specified MemRegions
 703   // and mark them as 'archive' regions. The regions in the array must be
 704   // non-overlapping and in order of ascending address.
 705   bool alloc_archive_regions(MemRegion* range, size_t count, bool open);
 706 
 707   // Insert any required filler objects in the G1 regions around the specified
 708   // ranges to make the regions parseable. This must be called after
 709   // alloc_archive_regions, and after class loading has occurred.
 710   void fill_archive_regions(MemRegion* range, size_t count);
 711 
 712   // For each of the specified MemRegions, uncommit the containing G1 regions
 713   // which had been allocated by alloc_archive_regions. This should be called
 714   // rather than fill_archive_regions at JVM init time if the archive file
 715   // mapping failed, with the same non-overlapping and sorted MemRegion array.
 716   void dealloc_archive_regions(MemRegion* range, size_t count);
 717 
 718 protected:
 719 
 720   // Shrink the garbage-first heap by at most the given size (in bytes!).
 721   // (Rounds down to a HeapRegion boundary.)
 722   virtual void shrink(size_t expand_bytes);
 723   void shrink_helper(size_t expand_bytes);
 724 
 725   #if TASKQUEUE_STATS


< prev index next >