94 class G1EvacSummary;
95
96 typedef OverflowTaskQueue<StarTask, mtGC> RefToScanQueue;
97 typedef GenericTaskQueueSet<RefToScanQueue, mtGC> RefToScanQueueSet;
98
99 typedef int RegionIdx_t; // needs to hold [ 0..max_regions() )
100 typedef int CardIdx_t; // needs to hold [ 0..CardsPerRegion )
101
102 // The G1 STW is alive closure.
103 // An instance is embedded into the G1CH and used as the
104 // (optional) _is_alive_non_header closure in the STW
105 // reference processor. It is also extensively used during
106 // reference processing during STW evacuation pauses.
107 class G1STWIsAliveClosure: public BoolObjectClosure {
108 G1CollectedHeap* _g1;
109 public:
110 G1STWIsAliveClosure(G1CollectedHeap* g1) : _g1(g1) {}
111 bool do_object_b(oop p);
112 };
113
114 class RefineCardTableEntryClosure;
115
116 class G1RegionMappingChangedListener : public G1MappingChangedListener {
117 private:
118 void reset_from_card_cache(uint start_idx, size_t num_regions);
119 public:
120 virtual void on_commit(uint start_idx, size_t num_regions, bool zero_filled);
121 };
122
123 class G1CollectedHeap : public CollectedHeap {
124 friend class G1FreeCollectionSetTask;
125 friend class VM_CollectForMetadataAllocation;
126 friend class VM_G1CollectForAllocation;
127 friend class VM_G1CollectFull;
128 friend class VM_G1IncCollectionPause;
129 friend class VMStructs;
130 friend class MutatorAllocRegion;
131 friend class G1GCAllocRegion;
132 friend class G1HeapVerifier;
133
134 // Closures used in implementation.
135 friend class G1ParScanThreadState;
777 void print_termination_stats(uint worker_id,
778 double elapsed_ms,
779 double strong_roots_ms,
780 double term_ms,
781 size_t term_attempts,
782 size_t alloc_buffer_waste,
783 size_t undo_waste) const;
784 // Update object copying statistics.
785 void record_obj_copy_mem_stats();
786
787 // The hot card cache for remembered set insertion optimization.
788 G1HotCardCache* _hot_card_cache;
789
790 // The g1 remembered set of the heap.
791 G1RemSet* _g1_rem_set;
792
793 // A set of cards that cover the objects for which the Rsets should be updated
794 // concurrently after the collection.
795 DirtyCardQueueSet _dirty_card_queue_set;
796
797 // The closure used to refine a single card.
798 RefineCardTableEntryClosure* _refine_cte_cl;
799
800 // After a collection pause, convert the regions in the collection set into free
801 // regions.
802 void free_collection_set(G1CollectionSet* collection_set, EvacuationInfo& evacuation_info, const size_t* surviving_young_words);
803
804 // Abandon the current collection set without recording policy
805 // statistics or updating free lists.
806 void abandon_collection_set(G1CollectionSet* collection_set);
807
808 // The concurrent marker (and the thread it runs in.)
809 G1ConcurrentMark* _cm;
810 ConcurrentMarkThread* _cmThread;
811
812 // The concurrent refiner.
813 ConcurrentG1Refine* _cg1r;
814
815 // The parallel task queues
816 RefToScanQueueSet *_task_queues;
817
818 // True iff a evacuation has failed in the current collection.
819 bool _evacuation_failed;
935 // STW reference processor as the _is_alive_non_header field.
936 // Supplying a value for the _is_alive_non_header field is
937 // optional but doing so prevents unnecessary additions to
938 // the discovered lists during reference discovery.
939 G1STWIsAliveClosure _is_alive_closure_stw;
940
941 // The (concurrent marking) reference processor...
942 ReferenceProcessor* _ref_processor_cm;
943
944 // Instance of the concurrent mark is_alive closure for embedding
945 // into the Concurrent Marking reference processor as the
946 // _is_alive_non_header field. Supplying a value for the
947 // _is_alive_non_header field is optional but doing so prevents
948 // unnecessary additions to the discovered lists during reference
949 // discovery.
950 G1CMIsAliveClosure _is_alive_closure_cm;
951
952 volatile bool _free_regions_coming;
953
954 public:
955
956 void set_refine_cte_cl_concurrency(bool concurrent);
957
958 RefToScanQueue *task_queue(uint i) const;
959
960 uint num_task_queues() const;
961
962 // A set of cards where updates happened during the GC
963 DirtyCardQueueSet& dirty_card_queue_set() { return _dirty_card_queue_set; }
964
965 // Create a G1CollectedHeap with the specified policy.
966 // Must call the initialize method afterwards.
967 // May not return if something goes wrong.
968 G1CollectedHeap(G1CollectorPolicy* policy);
969
970 private:
971 jint initialize_concurrent_refinement();
972 public:
973 // Initialize the G1CollectedHeap to have the initial and
974 // maximum sizes and remembered and barrier sets
975 // specified by the policy object.
976 jint initialize();
|
94 class G1EvacSummary;
95
96 typedef OverflowTaskQueue<StarTask, mtGC> RefToScanQueue;
97 typedef GenericTaskQueueSet<RefToScanQueue, mtGC> RefToScanQueueSet;
98
99 typedef int RegionIdx_t; // needs to hold [ 0..max_regions() )
100 typedef int CardIdx_t; // needs to hold [ 0..CardsPerRegion )
101
102 // The G1 STW is alive closure.
103 // An instance is embedded into the G1CH and used as the
104 // (optional) _is_alive_non_header closure in the STW
105 // reference processor. It is also extensively used during
106 // reference processing during STW evacuation pauses.
107 class G1STWIsAliveClosure: public BoolObjectClosure {
108 G1CollectedHeap* _g1;
109 public:
110 G1STWIsAliveClosure(G1CollectedHeap* g1) : _g1(g1) {}
111 bool do_object_b(oop p);
112 };
113
114 class G1RegionMappingChangedListener : public G1MappingChangedListener {
115 private:
116 void reset_from_card_cache(uint start_idx, size_t num_regions);
117 public:
118 virtual void on_commit(uint start_idx, size_t num_regions, bool zero_filled);
119 };
120
121 class G1CollectedHeap : public CollectedHeap {
122 friend class G1FreeCollectionSetTask;
123 friend class VM_CollectForMetadataAllocation;
124 friend class VM_G1CollectForAllocation;
125 friend class VM_G1CollectFull;
126 friend class VM_G1IncCollectionPause;
127 friend class VMStructs;
128 friend class MutatorAllocRegion;
129 friend class G1GCAllocRegion;
130 friend class G1HeapVerifier;
131
132 // Closures used in implementation.
133 friend class G1ParScanThreadState;
775 void print_termination_stats(uint worker_id,
776 double elapsed_ms,
777 double strong_roots_ms,
778 double term_ms,
779 size_t term_attempts,
780 size_t alloc_buffer_waste,
781 size_t undo_waste) const;
782 // Update object copying statistics.
783 void record_obj_copy_mem_stats();
784
785 // The hot card cache for remembered set insertion optimization.
786 G1HotCardCache* _hot_card_cache;
787
788 // The g1 remembered set of the heap.
789 G1RemSet* _g1_rem_set;
790
791 // A set of cards that cover the objects for which the Rsets should be updated
792 // concurrently after the collection.
793 DirtyCardQueueSet _dirty_card_queue_set;
794
795 // After a collection pause, convert the regions in the collection set into free
796 // regions.
797 void free_collection_set(G1CollectionSet* collection_set, EvacuationInfo& evacuation_info, const size_t* surviving_young_words);
798
799 // Abandon the current collection set without recording policy
800 // statistics or updating free lists.
801 void abandon_collection_set(G1CollectionSet* collection_set);
802
803 // The concurrent marker (and the thread it runs in.)
804 G1ConcurrentMark* _cm;
805 ConcurrentMarkThread* _cmThread;
806
807 // The concurrent refiner.
808 ConcurrentG1Refine* _cg1r;
809
810 // The parallel task queues
811 RefToScanQueueSet *_task_queues;
812
813 // True iff a evacuation has failed in the current collection.
814 bool _evacuation_failed;
930 // STW reference processor as the _is_alive_non_header field.
931 // Supplying a value for the _is_alive_non_header field is
932 // optional but doing so prevents unnecessary additions to
933 // the discovered lists during reference discovery.
934 G1STWIsAliveClosure _is_alive_closure_stw;
935
936 // The (concurrent marking) reference processor...
937 ReferenceProcessor* _ref_processor_cm;
938
939 // Instance of the concurrent mark is_alive closure for embedding
940 // into the Concurrent Marking reference processor as the
941 // _is_alive_non_header field. Supplying a value for the
942 // _is_alive_non_header field is optional but doing so prevents
943 // unnecessary additions to the discovered lists during reference
944 // discovery.
945 G1CMIsAliveClosure _is_alive_closure_cm;
946
947 volatile bool _free_regions_coming;
948
949 public:
950
951 RefToScanQueue *task_queue(uint i) const;
952
953 uint num_task_queues() const;
954
955 // A set of cards where updates happened during the GC
956 DirtyCardQueueSet& dirty_card_queue_set() { return _dirty_card_queue_set; }
957
958 // Create a G1CollectedHeap with the specified policy.
959 // Must call the initialize method afterwards.
960 // May not return if something goes wrong.
961 G1CollectedHeap(G1CollectorPolicy* policy);
962
963 private:
964 jint initialize_concurrent_refinement();
965 public:
966 // Initialize the G1CollectedHeap to have the initial and
967 // maximum sizes and remembered and barrier sets
968 // specified by the policy object.
969 jint initialize();
|