< prev index next >

src/share/vm/gc/g1/g1CollectedHeap.hpp

Print this page
rev 8824 : [mq]: rev1
rev 8825 : [mq]: rev2


 734   void end_archive_alloc_range(GrowableArray<MemRegion>* ranges,
 735                                size_t end_alignment_in_bytes = 0);
 736 
 737   // Facility for allocating a fixed range within the heap and marking
 738   // the containing regions as 'archive'. For use at JVM init time, when the
 739   // caller may mmap archived heap data at the specified range(s).
 740   // Verify that the MemRegions specified in the argument array are within the
 741   // reserved heap.
 742   bool check_archive_addresses(MemRegion* range, size_t count);
 743 
 744   // Commit the appropriate G1 regions containing the specified MemRegions
 745   // and mark them as 'archive' regions. The regions in the array must be
 746   // non-overlapping and in order of ascending address.
 747   bool alloc_archive_regions(MemRegion* range, size_t count);
 748 
 749   // Insert any required filler objects in the G1 regions around the specified
 750   // ranges to make the regions parseable. This must be called after
 751   // alloc_archive_regions, and after class loading has occurred.
 752   void fill_archive_regions(MemRegion* range, size_t count);
 753 
 754   // For each of the specified MemRegions, free the containing G1 regions 
 755   // which had been allocated by alloc_archive_regions. This should be called
 756   // rather than fill_archive_regions at JVM init time if the archive file 
 757   // mapping failed, with the same non-overlapping and sorted MemRegion array.
 758   void free_archive_regions(MemRegion* range, size_t count);
 759 
 760 protected:
 761 
 762   // Shrink the garbage-first heap by at most the given size (in bytes!).
 763   // (Rounds down to a HeapRegion boundary.)
 764   virtual void shrink(size_t expand_bytes);
 765   void shrink_helper(size_t expand_bytes);
 766 
 767   #if TASKQUEUE_STATS
 768   static void print_taskqueue_stats_hdr(outputStream* const st = gclog_or_tty);
 769   void print_taskqueue_stats(outputStream* const st = gclog_or_tty) const;
 770   void reset_taskqueue_stats();
 771   #endif // TASKQUEUE_STATS
 772 
 773   // Schedule the VM operation that will do an evacuation pause to
 774   // satisfy an allocation request of word_size. *succeeded will
 775   // return whether the VM operation was successful (it did do an
 776   // evacuation pause) or not (another thread beat us to it or the GC
 777   // locker was active). Given that we should not be holding the
 778   // Heap_lock when we enter this method, we will pass the




 734   void end_archive_alloc_range(GrowableArray<MemRegion>* ranges,
 735                                size_t end_alignment_in_bytes = 0);
 736 
 737   // Facility for allocating a fixed range within the heap and marking
 738   // the containing regions as 'archive'. For use at JVM init time, when the
 739   // caller may mmap archived heap data at the specified range(s).
 740   // Verify that the MemRegions specified in the argument array are within the
 741   // reserved heap.
 742   bool check_archive_addresses(MemRegion* range, size_t count);
 743 
 744   // Commit the appropriate G1 regions containing the specified MemRegions
 745   // and mark them as 'archive' regions. The regions in the array must be
 746   // non-overlapping and in order of ascending address.
 747   bool alloc_archive_regions(MemRegion* range, size_t count);
 748 
 749   // Insert any required filler objects in the G1 regions around the specified
 750   // ranges to make the regions parseable. This must be called after
 751   // alloc_archive_regions, and after class loading has occurred.
 752   void fill_archive_regions(MemRegion* range, size_t count);
 753 
 754   // For each of the specified MemRegions, uncommit the containing G1 regions 
 755   // which had been allocated by alloc_archive_regions. This should be called
 756   // rather than fill_archive_regions at JVM init time if the archive file 
 757   // mapping failed, with the same non-overlapping and sorted MemRegion array.
 758   void dealloc_archive_regions(MemRegion* range, size_t count);
 759 
 760 protected:
 761 
 762   // Shrink the garbage-first heap by at most the given size (in bytes!).
 763   // (Rounds down to a HeapRegion boundary.)
 764   virtual void shrink(size_t expand_bytes);
 765   void shrink_helper(size_t expand_bytes);
 766 
 767   #if TASKQUEUE_STATS
 768   static void print_taskqueue_stats_hdr(outputStream* const st = gclog_or_tty);
 769   void print_taskqueue_stats(outputStream* const st = gclog_or_tty) const;
 770   void reset_taskqueue_stats();
 771   #endif // TASKQUEUE_STATS
 772 
 773   // Schedule the VM operation that will do an evacuation pause to
 774   // satisfy an allocation request of word_size. *succeeded will
 775   // return whether the VM operation was successful (it did do an
 776   // evacuation pause) or not (another thread beat us to it or the GC
 777   // locker was active). Given that we should not be holding the
 778   // Heap_lock when we enter this method, we will pass the


< prev index next >