< prev index next >

src/share/vm/gc/g1/g1CollectedHeap.hpp

Print this page
rev 8813 : imported patch 8003237-no-wait-for-free-list
rev 8814 : imported patch jon-fast-evac-failure
rev 8816 : [mq]: 8133470-fix-plab-inline
rev 8817 : imported patch 8073013-add-detailed-information-about-plab-memory-usage
rev 8818 : imported patch jon-review-statistics
rev 8821 : imported patch move-jfr-event-to-extra-cr
rev 8822 : imported patch 8133530-add-jfr-event-for-evacuation
rev 8823 : imported patch 8040162-avoid-reallocating-plab-allocators
rev 8825 : imported patch mikael-suggestions-pss-alloc


  38 #include "gc/g1/g1YCTypes.hpp"
  39 #include "gc/g1/hSpaceCounters.hpp"
  40 #include "gc/g1/heapRegionManager.hpp"
  41 #include "gc/g1/heapRegionSet.hpp"
  42 #include "gc/shared/barrierSet.hpp"
  43 #include "gc/shared/collectedHeap.hpp"
  44 #include "gc/shared/plab.hpp"
  45 #include "memory/memRegion.hpp"
  46 #include "utilities/stack.hpp"
  47 
  48 // A "G1CollectedHeap" is an implementation of a java heap for HotSpot.
  49 // It uses the "Garbage First" heap organization and algorithm, which
  50 // may combine concurrent marking with parallel, incremental compaction of
  51 // heap subsets that will yield large amounts of garbage.
  52 
  53 // Forward declarations
  54 class HeapRegion;
  55 class HRRSCleanupTask;
  56 class GenerationSpec;
  57 class OopsInHeapRegionClosure;

  58 class G1KlassScanClosure;
  59 class G1ParScanThreadState;
  60 class ObjectClosure;
  61 class SpaceClosure;
  62 class CompactibleSpaceClosure;
  63 class Space;
  64 class G1CollectorPolicy;
  65 class GenRemSet;
  66 class G1RemSet;
  67 class HeapRegionRemSetIterator;
  68 class ConcurrentMark;
  69 class ConcurrentMarkThread;
  70 class ConcurrentG1Refine;
  71 class ConcurrentGCTimer;
  72 class GenerationCounters;
  73 class STWGCTimer;
  74 class G1NewTracer;
  75 class G1OldTracer;
  76 class EvacuationFailedInfo;
  77 class nmethod;


 566   // Resize the heap if necessary after a full collection.  If this is
 567   // after a collect-for allocation, "word_size" is the allocation size,
 568   // and will be considered part of the used portion of the heap.
 569   void resize_if_necessary_after_full_collection(size_t word_size);
 570 
 571   // Callback from VM_G1CollectForAllocation operation.
 572   // This function does everything necessary/possible to satisfy a
 573   // failed allocation request (including collection, expansion, etc.)
 574   HeapWord* satisfy_failed_allocation(size_t word_size,
 575                                       AllocationContext_t context,
 576                                       bool* succeeded);
 577 
 578   // Attempting to expand the heap sufficiently
 579   // to support an allocation of the given "word_size".  If
 580   // successful, perform the allocation and return the address of the
 581   // allocated block, or else "NULL".
 582   HeapWord* expand_and_allocate(size_t word_size, AllocationContext_t context);
 583 
 584   // Process any reference objects discovered during
 585   // an incremental evacuation pause.
 586   void process_discovered_references();
 587 
 588   // Enqueue any remaining discovered references
 589   // after processing.
 590   void enqueue_discovered_references();
 591 
 592 public:
 593   FlexibleWorkGang* workers() const { return _workers; }
 594 
 595   G1Allocator* allocator() {
 596     return _allocator;
 597   }
 598 
 599   G1MonitoringSupport* g1mm() {
 600     assert(_g1mm != NULL, "should have been initialized");
 601     return _g1mm;
 602   }
 603 
 604   // Expand the garbage-first heap by at least the given size (in bytes!).
 605   // Returns true if the heap was expanded by the requested amount;
 606   // false otherwise.
 607   // (Rounds up to a HeapRegion boundary.)
 608   bool expand(size_t expand_bytes);
 609 
 610   // Returns the PLAB statistics for a given destination.


 665   // in this nesting (i.e., the concurrent cycle). Further nesting is
 666   // not currently supported. The end of this call also notifies
 667   // the FullGCCount_lock in case a Java thread is waiting for a full
 668   // GC to happen (e.g., it called System.gc() with
 669   // +ExplicitGCInvokesConcurrent).
 670   void increment_old_marking_cycles_completed(bool concurrent);
 671 
 672   uint old_marking_cycles_completed() {
 673     return _old_marking_cycles_completed;
 674   }
 675 
 676   void register_concurrent_cycle_start(const Ticks& start_time);
 677   void register_concurrent_cycle_end();
 678   void trace_heap_after_concurrent_cycle();
 679 
 680   G1HRPrinter* hr_printer() { return &_hr_printer; }
 681 
 682   // Allocates a new heap region instance.
 683   HeapRegion* new_heap_region(uint hrs_index, MemRegion mr);
 684 



 685   // Allocate the highest free region in the reserved heap. This will commit
 686   // regions as necessary.
 687   HeapRegion* alloc_highest_free_region();
 688 
 689   // Frees a non-humongous region by initializing its contents and
 690   // adding it to the free list that's passed as a parameter (this is
 691   // usually a local list which will be appended to the master free
 692   // list later). The used bytes of freed regions are accumulated in
 693   // pre_used. If par is true, the region's RSet will not be freed
 694   // up. The assumption is that this will be done later.
 695   // The locked parameter indicates if the caller has already taken
 696   // care of proper synchronization. This may allow some optimizations.
 697   void free_region(HeapRegion* hr,
 698                    FreeRegionList* free_list,
 699                    bool par,
 700                    bool locked = false);
 701 
 702   // It dirties the cards that cover the block so that the post
 703   // write barrier never queues anything when updating objects on this
 704   // block. It is assumed (and in fact we assert) that the block


 774   // Heap_lock when we enter this method, we will pass the
 775   // gc_count_before (i.e., total_collections()) as a parameter since
 776   // it has to be read while holding the Heap_lock. Currently, both
 777   // methods that call do_collection_pause() release the Heap_lock
 778   // before the call, so it's easy to read gc_count_before just before.
 779   HeapWord* do_collection_pause(size_t         word_size,
 780                                 uint           gc_count_before,
 781                                 bool*          succeeded,
 782                                 GCCause::Cause gc_cause);
 783 
 784   void wait_for_root_region_scanning();
 785 
 786   // The guts of the incremental collection pause, executed by the vm
 787   // thread. It returns false if it is unable to do the collection due
 788   // to the GC locker being active, true otherwise
 789   bool do_collection_pause_at_safepoint(double target_pause_time_ms);
 790 
 791   // Actually do the work of evacuating the collection set.
 792   void evacuate_collection_set(EvacuationInfo& evacuation_info);
 793 











 794   // Update object copying statistics.
 795   void record_obj_copy_mem_stats();
 796   
 797   // The g1 remembered set of the heap.
 798   G1RemSet* _g1_rem_set;
 799 
 800   // A set of cards that cover the objects for which the Rsets should be updated
 801   // concurrently after the collection.
 802   DirtyCardQueueSet _dirty_card_queue_set;
 803 
 804   // The closure used to refine a single card.
 805   RefineCardTableEntryClosure* _refine_cte_cl;
 806 
 807   // A DirtyCardQueueSet that is used to hold cards that contain
 808   // references into the current collection set. This is used to
 809   // update the remembered sets of the regions in the collection
 810   // set in the event of an evacuation failure.
 811   DirtyCardQueueSet _into_cset_dirty_card_queue_set;
 812 
 813   // After a collection pause, make the regions in the CS into free




  38 #include "gc/g1/g1YCTypes.hpp"
  39 #include "gc/g1/hSpaceCounters.hpp"
  40 #include "gc/g1/heapRegionManager.hpp"
  41 #include "gc/g1/heapRegionSet.hpp"
  42 #include "gc/shared/barrierSet.hpp"
  43 #include "gc/shared/collectedHeap.hpp"
  44 #include "gc/shared/plab.hpp"
  45 #include "memory/memRegion.hpp"
  46 #include "utilities/stack.hpp"
  47 
  48 // A "G1CollectedHeap" is an implementation of a java heap for HotSpot.
  49 // It uses the "Garbage First" heap organization and algorithm, which
  50 // may combine concurrent marking with parallel, incremental compaction of
  51 // heap subsets that will yield large amounts of garbage.
  52 
  53 // Forward declarations
  54 class HeapRegion;
  55 class HRRSCleanupTask;
  56 class GenerationSpec;
  57 class OopsInHeapRegionClosure;
  58 class G1ParScanThreadState;
  59 class G1KlassScanClosure;
  60 class G1ParScanThreadState;
  61 class ObjectClosure;
  62 class SpaceClosure;
  63 class CompactibleSpaceClosure;
  64 class Space;
  65 class G1CollectorPolicy;
  66 class GenRemSet;
  67 class G1RemSet;
  68 class HeapRegionRemSetIterator;
  69 class ConcurrentMark;
  70 class ConcurrentMarkThread;
  71 class ConcurrentG1Refine;
  72 class ConcurrentGCTimer;
  73 class GenerationCounters;
  74 class STWGCTimer;
  75 class G1NewTracer;
  76 class G1OldTracer;
  77 class EvacuationFailedInfo;
  78 class nmethod;


 567   // Resize the heap if necessary after a full collection.  If this is
 568   // after a collect-for allocation, "word_size" is the allocation size,
 569   // and will be considered part of the used portion of the heap.
 570   void resize_if_necessary_after_full_collection(size_t word_size);
 571 
 572   // Callback from VM_G1CollectForAllocation operation.
 573   // This function does everything necessary/possible to satisfy a
 574   // failed allocation request (including collection, expansion, etc.)
 575   HeapWord* satisfy_failed_allocation(size_t word_size,
 576                                       AllocationContext_t context,
 577                                       bool* succeeded);
 578 
 579   // Attempting to expand the heap sufficiently
 580   // to support an allocation of the given "word_size".  If
 581   // successful, perform the allocation and return the address of the
 582   // allocated block, or else "NULL".
 583   HeapWord* expand_and_allocate(size_t word_size, AllocationContext_t context);
 584 
 585   // Process any reference objects discovered during
 586   // an incremental evacuation pause.
 587   void process_discovered_references(G1ParScanThreadState** per_thread_states);
 588 
 589   // Enqueue any remaining discovered references
 590   // after processing.
 591   void enqueue_discovered_references(G1ParScanThreadState** per_thread_states);
 592 
 593 public:
 594   FlexibleWorkGang* workers() const { return _workers; }
 595 
 596   G1Allocator* allocator() {
 597     return _allocator;
 598   }
 599 
 600   G1MonitoringSupport* g1mm() {
 601     assert(_g1mm != NULL, "should have been initialized");
 602     return _g1mm;
 603   }
 604 
 605   // Expand the garbage-first heap by at least the given size (in bytes!).
 606   // Returns true if the heap was expanded by the requested amount;
 607   // false otherwise.
 608   // (Rounds up to a HeapRegion boundary.)
 609   bool expand(size_t expand_bytes);
 610 
 611   // Returns the PLAB statistics for a given destination.


 666   // in this nesting (i.e., the concurrent cycle). Further nesting is
 667   // not currently supported. The end of this call also notifies
 668   // the FullGCCount_lock in case a Java thread is waiting for a full
 669   // GC to happen (e.g., it called System.gc() with
 670   // +ExplicitGCInvokesConcurrent).
 671   void increment_old_marking_cycles_completed(bool concurrent);
 672 
 673   uint old_marking_cycles_completed() {
 674     return _old_marking_cycles_completed;
 675   }
 676 
 677   void register_concurrent_cycle_start(const Ticks& start_time);
 678   void register_concurrent_cycle_end();
 679   void trace_heap_after_concurrent_cycle();
 680 
 681   G1HRPrinter* hr_printer() { return &_hr_printer; }
 682 
 683   // Allocates a new heap region instance.
 684   HeapRegion* new_heap_region(uint hrs_index, MemRegion mr);
 685 
 686   // Allocates a new per thread par scan state for the given thread id.
 687   G1ParScanThreadState* new_par_scan_state(uint worker_id);
 688 
 689   // Allocate the highest free region in the reserved heap. This will commit
 690   // regions as necessary.
 691   HeapRegion* alloc_highest_free_region();
 692 
 693   // Frees a non-humongous region by initializing its contents and
 694   // adding it to the free list that's passed as a parameter (this is
 695   // usually a local list which will be appended to the master free
 696   // list later). The used bytes of freed regions are accumulated in
 697   // pre_used. If par is true, the region's RSet will not be freed
 698   // up. The assumption is that this will be done later.
 699   // The locked parameter indicates if the caller has already taken
 700   // care of proper synchronization. This may allow some optimizations.
 701   void free_region(HeapRegion* hr,
 702                    FreeRegionList* free_list,
 703                    bool par,
 704                    bool locked = false);
 705 
 706   // It dirties the cards that cover the block so that the post
 707   // write barrier never queues anything when updating objects on this
 708   // block. It is assumed (and in fact we assert) that the block


 778   // Heap_lock when we enter this method, we will pass the
 779   // gc_count_before (i.e., total_collections()) as a parameter since
 780   // it has to be read while holding the Heap_lock. Currently, both
 781   // methods that call do_collection_pause() release the Heap_lock
 782   // before the call, so it's easy to read gc_count_before just before.
 783   HeapWord* do_collection_pause(size_t         word_size,
 784                                 uint           gc_count_before,
 785                                 bool*          succeeded,
 786                                 GCCause::Cause gc_cause);
 787 
 788   void wait_for_root_region_scanning();
 789 
 790   // The guts of the incremental collection pause, executed by the vm
 791   // thread. It returns false if it is unable to do the collection due
 792   // to the GC locker being active, true otherwise
 793   bool do_collection_pause_at_safepoint(double target_pause_time_ms);
 794 
 795   // Actually do the work of evacuating the collection set.
 796   void evacuate_collection_set(EvacuationInfo& evacuation_info);
 797   
 798   // Print the header for the per-thread termination statistics.
 799   static void print_termination_stats_hdr(outputStream* const st);
 800   // Print actual per-thread termination statistics.
 801   void print_termination_stats(outputStream* const st,
 802                                uint worker_id,
 803                                double elapsed_ms,
 804                                double strong_roots_ms,
 805                                double term_ms,
 806                                size_t term_attempts,
 807                                size_t alloc_buffer_waste,
 808                                size_t undo_waste) const;
 809   // Update object copying statistics.
 810   void record_obj_copy_mem_stats();
 811   
 812   // The g1 remembered set of the heap.
 813   G1RemSet* _g1_rem_set;
 814 
 815   // A set of cards that cover the objects for which the Rsets should be updated
 816   // concurrently after the collection.
 817   DirtyCardQueueSet _dirty_card_queue_set;
 818 
 819   // The closure used to refine a single card.
 820   RefineCardTableEntryClosure* _refine_cte_cl;
 821 
 822   // A DirtyCardQueueSet that is used to hold cards that contain
 823   // references into the current collection set. This is used to
 824   // update the remembered sets of the regions in the collection
 825   // set in the event of an evacuation failure.
 826   DirtyCardQueueSet _into_cset_dirty_card_queue_set;
 827 
 828   // After a collection pause, make the regions in the CS into free


< prev index next >