< prev index next >

src/hotspot/share/gc/g1/g1CollectedHeap.hpp

Print this page
rev 49912 : imported patch 8201492-properly-implement-non-contiguous-reference-processing


  90 class WorkGang;
  91 class G1Allocator;
  92 class G1ArchiveAllocator;
  93 class G1FullGCScope;
  94 class G1HeapVerifier;
  95 class G1HeapSizingPolicy;
  96 class G1HeapSummary;
  97 class G1EvacSummary;
  98 
  99 typedef OverflowTaskQueue<StarTask, mtGC>         RefToScanQueue;
 100 typedef GenericTaskQueueSet<RefToScanQueue, mtGC> RefToScanQueueSet;
 101 
 102 typedef int RegionIdx_t;   // needs to hold [ 0..max_regions() )
 103 typedef int CardIdx_t;     // needs to hold [ 0..CardsPerRegion )
 104 
 105 // The G1 STW is alive closure.
 106 // An instance is embedded into the G1CH and used as the
 107 // (optional) _is_alive_non_header closure in the STW
 108 // reference processor. It is also extensively used during
 109 // reference processing during STW evacuation pauses.
 110 class G1STWIsAliveClosure: public BoolObjectClosure {
 111   G1CollectedHeap* _g1h;
 112 public:
 113   G1STWIsAliveClosure(G1CollectedHeap* g1h) : _g1h(g1h) {}
 114   bool do_object_b(oop p);
 115 };
 116 







 117 class G1RegionMappingChangedListener : public G1MappingChangedListener {
 118  private:
 119   void reset_from_card_cache(uint start_idx, size_t num_regions);
 120  public:
 121   virtual void on_commit(uint start_idx, size_t num_regions, bool zero_filled);
 122 };
 123 
 124 class G1CollectedHeap : public CollectedHeap {
 125   friend class G1FreeCollectionSetTask;
 126   friend class VM_CollectForMetadataAllocation;
 127   friend class VM_G1CollectForAllocation;
 128   friend class VM_G1CollectFull;
 129   friend class VMStructs;
 130   friend class MutatorAllocRegion;
 131   friend class G1FullCollector;
 132   friend class G1GCAllocRegion;
 133   friend class G1HeapVerifier;
 134 
 135   // Closures used in implementation.
 136   friend class G1ParScanThreadState;


 485   void verify_before_full_collection(bool explicit_gc);
 486   void prepare_heap_for_full_collection();
 487   void prepare_heap_for_mutators();
 488   void abort_refinement();
 489   void verify_after_full_collection();
 490   void print_heap_after_full_collection(G1HeapTransition* heap_transition);
 491 
 492   // Helper method for satisfy_failed_allocation()
 493   HeapWord* satisfy_failed_allocation_helper(size_t word_size,
 494                                              bool do_gc,
 495                                              bool clear_all_soft_refs,
 496                                              bool expect_null_mutator_alloc_region,
 497                                              bool* gc_succeeded);
 498 
 499   // Attempting to expand the heap sufficiently
 500   // to support an allocation of the given "word_size".  If
 501   // successful, perform the allocation and return the address of the
 502   // allocated block, or else "NULL".
 503   HeapWord* expand_and_allocate(size_t word_size);
 504 
 505   // Preserve any referents discovered by concurrent marking that have not yet been
 506   // copied by the STW pause.
 507   void preserve_cm_referents(G1ParScanThreadStateSet* per_thread_states);
 508   // Process any reference objects discovered during
 509   // an incremental evacuation pause.
 510   void process_discovered_references(G1ParScanThreadStateSet* per_thread_states);
 511 
 512   // Enqueue any remaining discovered references
 513   // after processing.
 514   void enqueue_discovered_references(G1ParScanThreadStateSet* per_thread_states);
 515 
 516   // Merges the information gathered on a per-thread basis for all worker threads
 517   // during GC into global variables.
 518   void merge_per_thread_state_info(G1ParScanThreadStateSet* per_thread_states);
 519 public:
 520   G1YoungRemSetSamplingThread* sampling_thread() const { return _young_gen_sampling_thread; }
 521 
 522   WorkGang* workers() const { return _workers; }
 523 
 524   G1Allocator* allocator() {
 525     return _allocator;
 526   }
 527 


 876   //    lists (also checked as a precondition during initial marking).
 877 
 878   // The (stw) reference processor...
 879   ReferenceProcessor* _ref_processor_stw;
 880 
 881   // During reference object discovery, the _is_alive_non_header
 882   // closure (if non-null) is applied to the referent object to
 883   // determine whether the referent is live. If so then the
 884   // reference object does not need to be 'discovered' and can
 885   // be treated as a regular oop. This has the benefit of reducing
 886   // the number of 'discovered' reference objects that need to
 887   // be processed.
 888   //
 889   // Instance of the is_alive closure for embedding into the
 890   // STW reference processor as the _is_alive_non_header field.
 891   // Supplying a value for the _is_alive_non_header field is
 892   // optional but doing so prevents unnecessary additions to
 893   // the discovered lists during reference discovery.
 894   G1STWIsAliveClosure _is_alive_closure_stw;
 895 


 896   // The (concurrent marking) reference processor...
 897   ReferenceProcessor* _ref_processor_cm;
 898 
 899   // Instance of the concurrent mark is_alive closure for embedding
 900   // into the Concurrent Marking reference processor as the
 901   // _is_alive_non_header field. Supplying a value for the
 902   // _is_alive_non_header field is optional but doing so prevents
 903   // unnecessary additions to the discovered lists during reference
 904   // discovery.
 905   G1CMIsAliveClosure _is_alive_closure_cm;
 906 

 907 public:
 908 
 909   RefToScanQueue *task_queue(uint i) const;
 910 
 911   uint num_task_queues() const;
 912 
 913   // A set of cards where updates happened during the GC
 914   DirtyCardQueueSet& dirty_card_queue_set() { return _dirty_card_queue_set; }
 915 
 916   // Create a G1CollectedHeap with the specified policy.
 917   // Must call the initialize method afterwards.
 918   // May not return if something goes wrong.
 919   G1CollectedHeap(G1CollectorPolicy* policy);
 920 
 921 private:
 922   jint initialize_concurrent_refinement();
 923   jint initialize_young_gen_sampling_thread();
 924 public:
 925   // Initialize the G1CollectedHeap to have the initial and
 926   // maximum sizes and remembered and barrier sets




  90 class WorkGang;
  91 class G1Allocator;
  92 class G1ArchiveAllocator;
  93 class G1FullGCScope;
  94 class G1HeapVerifier;
  95 class G1HeapSizingPolicy;
  96 class G1HeapSummary;
  97 class G1EvacSummary;
  98 
  99 typedef OverflowTaskQueue<StarTask, mtGC>         RefToScanQueue;
 100 typedef GenericTaskQueueSet<RefToScanQueue, mtGC> RefToScanQueueSet;
 101 
 102 typedef int RegionIdx_t;   // needs to hold [ 0..max_regions() )
 103 typedef int CardIdx_t;     // needs to hold [ 0..CardsPerRegion )
 104 
 105 // The G1 STW is alive closure.
 106 // An instance is embedded into the G1CH and used as the
 107 // (optional) _is_alive_non_header closure in the STW
 108 // reference processor. It is also extensively used during
 109 // reference processing during STW evacuation pauses.
 110 class G1STWIsAliveClosure : public BoolObjectClosure {
 111   G1CollectedHeap* _g1h;
 112 public:
 113   G1STWIsAliveClosure(G1CollectedHeap* g1h) : _g1h(g1h) {}
 114   bool do_object_b(oop p);
 115 };
 116 
 117 class G1STWSubjectToDiscoveryClosure : public BoolObjectClosure {
 118   G1CollectedHeap* _g1h;
 119 public:
 120   G1STWSubjectToDiscoveryClosure(G1CollectedHeap* g1h) : _g1h(g1h) {}
 121   bool do_object_b(oop p);
 122 };
 123 
 124 class G1RegionMappingChangedListener : public G1MappingChangedListener {
 125  private:
 126   void reset_from_card_cache(uint start_idx, size_t num_regions);
 127  public:
 128   virtual void on_commit(uint start_idx, size_t num_regions, bool zero_filled);
 129 };
 130 
 131 class G1CollectedHeap : public CollectedHeap {
 132   friend class G1FreeCollectionSetTask;
 133   friend class VM_CollectForMetadataAllocation;
 134   friend class VM_G1CollectForAllocation;
 135   friend class VM_G1CollectFull;
 136   friend class VMStructs;
 137   friend class MutatorAllocRegion;
 138   friend class G1FullCollector;
 139   friend class G1GCAllocRegion;
 140   friend class G1HeapVerifier;
 141 
 142   // Closures used in implementation.
 143   friend class G1ParScanThreadState;


 492   void verify_before_full_collection(bool explicit_gc);
 493   void prepare_heap_for_full_collection();
 494   void prepare_heap_for_mutators();
 495   void abort_refinement();
 496   void verify_after_full_collection();
 497   void print_heap_after_full_collection(G1HeapTransition* heap_transition);
 498 
 499   // Helper method for satisfy_failed_allocation()
 500   HeapWord* satisfy_failed_allocation_helper(size_t word_size,
 501                                              bool do_gc,
 502                                              bool clear_all_soft_refs,
 503                                              bool expect_null_mutator_alloc_region,
 504                                              bool* gc_succeeded);
 505 
 506   // Attempting to expand the heap sufficiently
 507   // to support an allocation of the given "word_size".  If
 508   // successful, perform the allocation and return the address of the
 509   // allocated block, or else "NULL".
 510   HeapWord* expand_and_allocate(size_t word_size);
 511 



 512   // Process any reference objects discovered during
 513   // an incremental evacuation pause.
 514   void process_discovered_references(G1ParScanThreadStateSet* per_thread_states);
 515 
 516   // Enqueue any remaining discovered references
 517   // after processing.
 518   void enqueue_discovered_references(G1ParScanThreadStateSet* per_thread_states);
 519 
 520   // Merges the information gathered on a per-thread basis for all worker threads
 521   // during GC into global variables.
 522   void merge_per_thread_state_info(G1ParScanThreadStateSet* per_thread_states);
 523 public:
 524   G1YoungRemSetSamplingThread* sampling_thread() const { return _young_gen_sampling_thread; }
 525 
 526   WorkGang* workers() const { return _workers; }
 527 
 528   G1Allocator* allocator() {
 529     return _allocator;
 530   }
 531 


 880   //    lists (also checked as a precondition during initial marking).
 881 
 882   // The (stw) reference processor...
 883   ReferenceProcessor* _ref_processor_stw;
 884 
 885   // During reference object discovery, the _is_alive_non_header
 886   // closure (if non-null) is applied to the referent object to
 887   // determine whether the referent is live. If so then the
 888   // reference object does not need to be 'discovered' and can
 889   // be treated as a regular oop. This has the benefit of reducing
 890   // the number of 'discovered' reference objects that need to
 891   // be processed.
 892   //
 893   // Instance of the is_alive closure for embedding into the
 894   // STW reference processor as the _is_alive_non_header field.
 895   // Supplying a value for the _is_alive_non_header field is
 896   // optional but doing so prevents unnecessary additions to
 897   // the discovered lists during reference discovery.
 898   G1STWIsAliveClosure _is_alive_closure_stw;
 899 
 900   G1STWSubjectToDiscoveryClosure _is_subject_to_discovery_stw;
 901   
 902   // The (concurrent marking) reference processor...
 903   ReferenceProcessor* _ref_processor_cm;
 904 
 905   // Instance of the concurrent mark is_alive closure for embedding
 906   // into the Concurrent Marking reference processor as the
 907   // _is_alive_non_header field. Supplying a value for the
 908   // _is_alive_non_header field is optional but doing so prevents
 909   // unnecessary additions to the discovered lists during reference
 910   // discovery.
 911   G1CMIsAliveClosure _is_alive_closure_cm;
 912 
 913   G1CMSubjectToDiscoveryClosure _is_subject_to_discovery_cm;
 914 public:
 915 
 916   RefToScanQueue *task_queue(uint i) const;
 917 
 918   uint num_task_queues() const;
 919 
 920   // A set of cards where updates happened during the GC
 921   DirtyCardQueueSet& dirty_card_queue_set() { return _dirty_card_queue_set; }
 922 
 923   // Create a G1CollectedHeap with the specified policy.
 924   // Must call the initialize method afterwards.
 925   // May not return if something goes wrong.
 926   G1CollectedHeap(G1CollectorPolicy* policy);
 927 
 928 private:
 929   jint initialize_concurrent_refinement();
 930   jint initialize_young_gen_sampling_thread();
 931 public:
 932   // Initialize the G1CollectedHeap to have the initial and
 933   // maximum sizes and remembered and barrier sets


< prev index next >