< prev index next >

src/share/vm/gc/g1/g1CollectedHeap.hpp

Print this page
rev 8461 : imported patch webrev1.patch
rev 8462 : [mq]: version3


 558   inline HeapWord* survivor_attempt_allocation(size_t word_size,
 559                                                AllocationContext_t context);
 560 
 561   // Allocation attempt during GC for an old object / PLAB.
 562   inline HeapWord* old_attempt_allocation(size_t word_size,
 563                                           AllocationContext_t context);
 564 
 565   // These methods are the "callbacks" from the G1AllocRegion class.
 566 
 567   // For mutator alloc regions.
 568   HeapRegion* new_mutator_alloc_region(size_t word_size, bool force);
 569   void retire_mutator_alloc_region(HeapRegion* alloc_region,
 570                                    size_t allocated_bytes);
 571 
 572   // For GC alloc regions.
 573   HeapRegion* new_gc_alloc_region(size_t word_size, uint count,
 574                                   InCSetState dest);
 575   void retire_gc_alloc_region(HeapRegion* alloc_region,
 576                               size_t allocated_bytes, InCSetState dest);
 577 




 578   // - if explicit_gc is true, the GC is for a System.gc() or a heap
 579   //   inspection request and should collect the entire heap
 580   // - if clear_all_soft_refs is true, all soft references should be
 581   //   cleared during the GC
 582   // - if explicit_gc is false, word_size describes the allocation that
 583   //   the GC should attempt (at least) to satisfy
 584   // - it returns false if it is unable to do the collection due to the
 585   //   GC locker being active, true otherwise
 586   bool do_collection(bool explicit_gc,
 587                      bool clear_all_soft_refs,
 588                      size_t word_size);
 589 
 590   // Callback from VM_G1CollectFull operation.
 591   // Perform a full collection.
 592   virtual void do_full_collection(bool clear_all_soft_refs);
 593 
 594   // Resize the heap if necessary after a full collection.  If this is
 595   // after a collect-for allocation, "word_size" is the allocation size,
 596   // and will be considered part of the used portion of the heap.
 597   void resize_if_necessary_after_full_collection(size_t word_size);


 716   // pre_used. If par is true, the region's RSet will not be freed
 717   // up. The assumption is that this will be done later.
 718   // The locked parameter indicates if the caller has already taken
 719   // care of proper synchronization. This may allow some optimizations.
 720   void free_region(HeapRegion* hr,
 721                    FreeRegionList* free_list,
 722                    bool par,
 723                    bool locked = false);
 724 
 725   // Frees a humongous region by collapsing it into individual regions
 726   // and calling free_region() for each of them. The freed regions
 727   // will be added to the free list that's passed as a parameter (this
 728   // is usually a local list which will be appended to the master free
 729   // list later). The used bytes of freed regions are accumulated in
 730   // pre_used. If par is true, the region's RSet will not be freed
 731   // up. The assumption is that this will be done later.
 732   void free_humongous_region(HeapRegion* hr,
 733                              FreeRegionList* free_list,
 734                              bool par);
 735 
 736   // Facility for allocating in 'archive' regions in high heap memory from 
 737   // the VM thread, and recording the allocated ranges.  The end_ call 
 738   // optionally aligns the end address and returns the allocated ranges as
 739   // an ascending array of MemRegions.  This can be used to create and 
 740   // archive a heap region which can be mapped at the same fixed addresses
 741   // in a future JVM instance.
 742   void begin_archive_alloc_range();
 743   void end_archive_alloc_range(GrowableArray<MemRegion>* ranges,
 744                                uint end_alignment = 0);
 745   bool is_archive_alloc_too_large(size_t word_size);




 746   HeapWord* archive_mem_allocate(size_t word_size);
 747   HeapRegion* alloc_highest_available_region();
 748 
 749   // Facility for allocating a fixed range within the heap and marking
 750   // the containing regions as 'archive'.  For use at JVM init time, when the caller
 751   // may mmap archived heap data at the specified range(s). The check_ call
 752   // verifies that the regions are within the reserved heap.  The alloc_ call
 753   // commits the appropriate regions and marks them as 'archive," after which
 754   // the caller can perform the mmap.  The fill_ call (which must occur after class 
 755   // loading) inserts any required filler objects around the specified ranges
 756   // to make the regions parseable.
 757   bool check_archive_addresses(MemRegion* range, uint count);
 758   bool alloc_archive_regions(MemRegion* range, uint count);
 759   void fill_archive_regions(MemRegion* range, uint count);
 760 
 761   // Fill the requested space without creating any humongous objects.
 762   static void fill_with_non_humongous_objects(HeapWord* base_address, size_t word_size);














 763 
 764 protected:
 765 
 766   // Shrink the garbage-first heap by at most the given size (in bytes!).
 767   // (Rounds down to a HeapRegion boundary.)
 768   virtual void shrink(size_t expand_bytes);
 769   void shrink_helper(size_t expand_bytes);
 770 
 771   #if TASKQUEUE_STATS
 772   static void print_taskqueue_stats_hdr(outputStream* const st = gclog_or_tty);
 773   void print_taskqueue_stats(outputStream* const st = gclog_or_tty) const;
 774   void reset_taskqueue_stats();
 775   #endif // TASKQUEUE_STATS
 776 
 777   // Schedule the VM operation that will do an evacuation pause to
 778   // satisfy an allocation request of word_size. *succeeded will
 779   // return whether the VM operation was successful (it did do an
 780   // evacuation pause) or not (another thread beat us to it or the GC
 781   // locker was active). Given that we should not be holding the
 782   // Heap_lock when we enter this method, we will pass the




 558   inline HeapWord* survivor_attempt_allocation(size_t word_size,
 559                                                AllocationContext_t context);
 560 
 561   // Allocation attempt during GC for an old object / PLAB.
 562   inline HeapWord* old_attempt_allocation(size_t word_size,
 563                                           AllocationContext_t context);
 564 
 565   // These methods are the "callbacks" from the G1AllocRegion class.
 566 
 567   // For mutator alloc regions.
 568   HeapRegion* new_mutator_alloc_region(size_t word_size, bool force);
 569   void retire_mutator_alloc_region(HeapRegion* alloc_region,
 570                                    size_t allocated_bytes);
 571 
 572   // For GC alloc regions.
 573   HeapRegion* new_gc_alloc_region(size_t word_size, uint count,
 574                                   InCSetState dest);
 575   void retire_gc_alloc_region(HeapRegion* alloc_region,
 576                               size_t allocated_bytes, InCSetState dest);
 577 
 578   // Allocate the highest free region in the reserved heap. This will commit
 579   // regions as necessary.
 580   HeapRegion* alloc_highest_free_region();
 581 
 582   // - if explicit_gc is true, the GC is for a System.gc() or a heap
 583   //   inspection request and should collect the entire heap
 584   // - if clear_all_soft_refs is true, all soft references should be
 585   //   cleared during the GC
 586   // - if explicit_gc is false, word_size describes the allocation that
 587   //   the GC should attempt (at least) to satisfy
 588   // - it returns false if it is unable to do the collection due to the
 589   //   GC locker being active, true otherwise
 590   bool do_collection(bool explicit_gc,
 591                      bool clear_all_soft_refs,
 592                      size_t word_size);
 593 
 594   // Callback from VM_G1CollectFull operation.
 595   // Perform a full collection.
 596   virtual void do_full_collection(bool clear_all_soft_refs);
 597 
 598   // Resize the heap if necessary after a full collection.  If this is
 599   // after a collect-for allocation, "word_size" is the allocation size,
 600   // and will be considered part of the used portion of the heap.
 601   void resize_if_necessary_after_full_collection(size_t word_size);


 720   // pre_used. If par is true, the region's RSet will not be freed
 721   // up. The assumption is that this will be done later.
 722   // The locked parameter indicates if the caller has already taken
 723   // care of proper synchronization. This may allow some optimizations.
 724   void free_region(HeapRegion* hr,
 725                    FreeRegionList* free_list,
 726                    bool par,
 727                    bool locked = false);
 728 
 729   // Frees a humongous region by collapsing it into individual regions
 730   // and calling free_region() for each of them. The freed regions
 731   // will be added to the free list that's passed as a parameter (this
 732   // is usually a local list which will be appended to the master free
 733   // list later). The used bytes of freed regions are accumulated in
 734   // pre_used. If par is true, the region's RSet will not be freed
 735   // up. The assumption is that this will be done later.
 736   void free_humongous_region(HeapRegion* hr,
 737                              FreeRegionList* free_list,
 738                              bool par);
 739 
 740   // Facility for allocating in 'archive' regions in high heap memory and
 741   // recording the allocated ranges. These should all be called from the 
 742   // VM thread at safepoints, without the heap lock held. They can be used
 743   // to create and archive a set of heap regions which can be mapped at the
 744   // same fixed addresses in a subsequent JVM invocation.

 745   void begin_archive_alloc_range();
 746 
 747   // Check if the requested size would be too large for an archive allocation.
 748   bool is_archive_alloc_too_large(size_t word_size);
 749 
 750   // Allocate memory of the requested size from the archive region. This will
 751   // return NULL if the size is too large or if no memory is available. It
 752   // does not trigger a garbage collection.
 753   HeapWord* archive_mem_allocate(size_t word_size);

 754 
 755   // Optionally aligns the end address and returns the allocated ranges in
 756   // an array of MemRegions in order of ascending addresses.
 757   void end_archive_alloc_range(GrowableArray<MemRegion>* ranges,
 758                                size_t end_alignment_in_bytes = 0);







 759 
 760   // Facility for allocating a fixed range within the heap and marking
 761   // the containing regions as 'archive'. For use at JVM init time, when the 
 762   // caller may mmap archived heap data at the specified range(s).
 763   // Verify that the MemRegions specified in the argument array are within the 
 764   // reserved heap. 
 765   bool check_archive_addresses(MemRegion* range, size_t count);
 766 
 767   // Commit the appropriate G1 regions containing the specified MemRegions
 768   // and mark them as 'archive' regions. The regions in the array must be
 769   // non-overlapping and in order of ascending address.
 770   bool alloc_archive_regions(MemRegion* range, size_t count);
 771 
 772   // Insert any required filler objects in the G1 regions around the specified 
 773   // ranges to make the regions parseable. This must be called after
 774   // alloc_archive_regions, and after class loading has occurred.
 775   void fill_archive_regions(MemRegion* range, size_t count);
 776 
 777 protected:
 778 
 779   // Shrink the garbage-first heap by at most the given size (in bytes!).
 780   // (Rounds down to a HeapRegion boundary.)
 781   virtual void shrink(size_t expand_bytes);
 782   void shrink_helper(size_t expand_bytes);
 783 
 784   #if TASKQUEUE_STATS
 785   static void print_taskqueue_stats_hdr(outputStream* const st = gclog_or_tty);
 786   void print_taskqueue_stats(outputStream* const st = gclog_or_tty) const;
 787   void reset_taskqueue_stats();
 788   #endif // TASKQUEUE_STATS
 789 
 790   // Schedule the VM operation that will do an evacuation pause to
 791   // satisfy an allocation request of word_size. *succeeded will
 792   // return whether the VM operation was successful (it did do an
 793   // evacuation pause) or not (another thread beat us to it or the GC
 794   // locker was active). Given that we should not be holding the
 795   // Heap_lock when we enter this method, we will pass the


< prev index next >