14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #ifndef SHARE_GC_G1_G1COLLECTEDHEAP_HPP
26 #define SHARE_GC_G1_G1COLLECTEDHEAP_HPP
27
28 #include "gc/g1/g1BarrierSet.hpp"
29 #include "gc/g1/g1BiasedArray.hpp"
30 #include "gc/g1/g1CardTable.hpp"
31 #include "gc/g1/g1CollectionSet.hpp"
32 #include "gc/g1/g1CollectorState.hpp"
33 #include "gc/g1/g1ConcurrentMark.hpp"
34 #include "gc/g1/g1EdenRegions.hpp"
35 #include "gc/g1/g1EvacFailure.hpp"
36 #include "gc/g1/g1EvacStats.hpp"
37 #include "gc/g1/g1EvacuationInfo.hpp"
38 #include "gc/g1/g1GCPhaseTimes.hpp"
39 #include "gc/g1/g1HeapTransition.hpp"
40 #include "gc/g1/g1HeapVerifier.hpp"
41 #include "gc/g1/g1HRPrinter.hpp"
42 #include "gc/g1/g1InCSetState.hpp"
43 #include "gc/g1/g1MonitoringSupport.hpp"
44 #include "gc/g1/g1SurvivorRegions.hpp"
45 #include "gc/g1/g1YCTypes.hpp"
46 #include "gc/g1/heapRegionManager.hpp"
47 #include "gc/g1/heapRegionSet.hpp"
48 #include "gc/g1/heterogeneousHeapRegionManager.hpp"
49 #include "gc/shared/barrierSet.hpp"
50 #include "gc/shared/collectedHeap.hpp"
51 #include "gc/shared/gcHeapSummary.hpp"
52 #include "gc/shared/plab.hpp"
53 #include "gc/shared/preservedMarks.hpp"
741 static void print_termination_stats_hdr();
742 // Print actual per-thread termination statistics.
743 void print_termination_stats(uint worker_id,
744 double elapsed_ms,
745 double strong_roots_ms,
746 double term_ms,
747 size_t term_attempts,
748 size_t alloc_buffer_waste,
749 size_t undo_waste) const;
750 // Update object copying statistics.
751 void record_obj_copy_mem_stats();
752
753 // The hot card cache for remembered set insertion optimization.
754 G1HotCardCache* _hot_card_cache;
755
756 // The g1 remembered set of the heap.
757 G1RemSet* _g1_rem_set;
758
759 // A set of cards that cover the objects for which the Rsets should be updated
760 // concurrently after the collection.
761 DirtyCardQueueSet _dirty_card_queue_set;
762
763 // After a collection pause, convert the regions in the collection set into free
764 // regions.
765 void free_collection_set(G1CollectionSet* collection_set, G1EvacuationInfo& evacuation_info, const size_t* surviving_young_words);
766
767 // Abandon the current collection set without recording policy
768 // statistics or updating free lists.
769 void abandon_collection_set(G1CollectionSet* collection_set);
770
771 // The concurrent marker (and the thread it runs in.)
772 G1ConcurrentMark* _cm;
773 G1ConcurrentMarkThread* _cm_thread;
774
775 // The concurrent refiner.
776 G1ConcurrentRefine* _cr;
777
778 // The parallel task queues
779 RefToScanQueueSet *_task_queues;
780
781 // True iff a evacuation has failed in the current collection.
901
902 // The (concurrent marking) reference processor...
903 ReferenceProcessor* _ref_processor_cm;
904
905 // Instance of the concurrent mark is_alive closure for embedding
906 // into the Concurrent Marking reference processor as the
907 // _is_alive_non_header field. Supplying a value for the
908 // _is_alive_non_header field is optional but doing so prevents
909 // unnecessary additions to the discovered lists during reference
910 // discovery.
911 G1CMIsAliveClosure _is_alive_closure_cm;
912
913 G1CMSubjectToDiscoveryClosure _is_subject_to_discovery_cm;
914 public:
915
916 RefToScanQueue *task_queue(uint i) const;
917
918 uint num_task_queues() const;
919
920 // A set of cards where updates happened during the GC
921 DirtyCardQueueSet& dirty_card_queue_set() { return _dirty_card_queue_set; }
922
923 // Create a G1CollectedHeap with the specified policy.
924 // Must call the initialize method afterwards.
925 // May not return if something goes wrong.
926 G1CollectedHeap(G1CollectorPolicy* policy);
927
928 private:
929 jint initialize_concurrent_refinement();
930 jint initialize_young_gen_sampling_thread();
931 public:
932 // Initialize the G1CollectedHeap to have the initial and
933 // maximum sizes and remembered and barrier sets
934 // specified by the policy object.
935 jint initialize();
936
937 virtual void stop();
938 virtual void safepoint_synchronize_begin();
939 virtual void safepoint_synchronize_end();
940
941 // Return the (conservative) maximum heap alignment for any G1 heap
966 const G1CollectionSet* collection_set() const { return &_collection_set; }
967 G1CollectionSet* collection_set() { return &_collection_set; }
968
969 virtual CollectorPolicy* collector_policy() const;
970 virtual G1CollectorPolicy* g1_collector_policy() const;
971
972 virtual SoftRefPolicy* soft_ref_policy();
973
974 virtual void initialize_serviceability();
975 virtual MemoryUsage memory_usage();
976 virtual GrowableArray<GCMemoryManager*> memory_managers();
977 virtual GrowableArray<MemoryPool*> memory_pools();
978
979 // The rem set and barrier set.
980 G1RemSet* g1_rem_set() const { return _g1_rem_set; }
981
982 // Try to minimize the remembered set.
983 void scrub_rem_set();
984
985 // Apply the given closure on all cards in the Hot Card Cache, emptying it.
986 void iterate_hcc_closure(CardTableEntryClosure* cl, uint worker_i);
987
988 // Apply the given closure on all cards in the Dirty Card Queue Set, emptying it.
989 void iterate_dirty_card_closure(CardTableEntryClosure* cl, uint worker_i);
990
991 // The shared block offset table array.
992 G1BlockOffsetTable* bot() const { return _bot; }
993
994 // Reference Processing accessors
995
996 // The STW reference processor....
997 ReferenceProcessor* ref_processor_stw() const { return _ref_processor_stw; }
998
999 G1NewTracer* gc_tracer_stw() const { return _gc_tracer_stw; }
1000
1001 // The Concurrent Marking reference processor...
1002 ReferenceProcessor* ref_processor_cm() const { return _ref_processor_cm; }
1003
1004 size_t unused_committed_regions_in_bytes() const;
1005 virtual size_t capacity() const;
1006 virtual size_t used() const;
1007 // This should be called when we're not holding the heap lock. The
1008 // result might be a bit inaccurate.
1009 size_t used_unlocked() const;
|
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #ifndef SHARE_GC_G1_G1COLLECTEDHEAP_HPP
26 #define SHARE_GC_G1_G1COLLECTEDHEAP_HPP
27
28 #include "gc/g1/g1BarrierSet.hpp"
29 #include "gc/g1/g1BiasedArray.hpp"
30 #include "gc/g1/g1CardTable.hpp"
31 #include "gc/g1/g1CollectionSet.hpp"
32 #include "gc/g1/g1CollectorState.hpp"
33 #include "gc/g1/g1ConcurrentMark.hpp"
34 #include "gc/g1/g1DirtyCardQueue.hpp"
35 #include "gc/g1/g1EdenRegions.hpp"
36 #include "gc/g1/g1EvacFailure.hpp"
37 #include "gc/g1/g1EvacStats.hpp"
38 #include "gc/g1/g1EvacuationInfo.hpp"
39 #include "gc/g1/g1GCPhaseTimes.hpp"
40 #include "gc/g1/g1HeapTransition.hpp"
41 #include "gc/g1/g1HeapVerifier.hpp"
42 #include "gc/g1/g1HRPrinter.hpp"
43 #include "gc/g1/g1InCSetState.hpp"
44 #include "gc/g1/g1MonitoringSupport.hpp"
45 #include "gc/g1/g1SurvivorRegions.hpp"
46 #include "gc/g1/g1YCTypes.hpp"
47 #include "gc/g1/heapRegionManager.hpp"
48 #include "gc/g1/heapRegionSet.hpp"
49 #include "gc/g1/heterogeneousHeapRegionManager.hpp"
50 #include "gc/shared/barrierSet.hpp"
51 #include "gc/shared/collectedHeap.hpp"
52 #include "gc/shared/gcHeapSummary.hpp"
53 #include "gc/shared/plab.hpp"
54 #include "gc/shared/preservedMarks.hpp"
742 static void print_termination_stats_hdr();
743 // Print actual per-thread termination statistics.
744 void print_termination_stats(uint worker_id,
745 double elapsed_ms,
746 double strong_roots_ms,
747 double term_ms,
748 size_t term_attempts,
749 size_t alloc_buffer_waste,
750 size_t undo_waste) const;
751 // Update object copying statistics.
752 void record_obj_copy_mem_stats();
753
754 // The hot card cache for remembered set insertion optimization.
755 G1HotCardCache* _hot_card_cache;
756
757 // The g1 remembered set of the heap.
758 G1RemSet* _g1_rem_set;
759
760 // A set of cards that cover the objects for which the Rsets should be updated
761 // concurrently after the collection.
762 G1DirtyCardQueueSet _dirty_card_queue_set;
763
764 // After a collection pause, convert the regions in the collection set into free
765 // regions.
766 void free_collection_set(G1CollectionSet* collection_set, G1EvacuationInfo& evacuation_info, const size_t* surviving_young_words);
767
768 // Abandon the current collection set without recording policy
769 // statistics or updating free lists.
770 void abandon_collection_set(G1CollectionSet* collection_set);
771
772 // The concurrent marker (and the thread it runs in.)
773 G1ConcurrentMark* _cm;
774 G1ConcurrentMarkThread* _cm_thread;
775
776 // The concurrent refiner.
777 G1ConcurrentRefine* _cr;
778
779 // The parallel task queues
780 RefToScanQueueSet *_task_queues;
781
782 // True iff a evacuation has failed in the current collection.
902
903 // The (concurrent marking) reference processor...
904 ReferenceProcessor* _ref_processor_cm;
905
906 // Instance of the concurrent mark is_alive closure for embedding
907 // into the Concurrent Marking reference processor as the
908 // _is_alive_non_header field. Supplying a value for the
909 // _is_alive_non_header field is optional but doing so prevents
910 // unnecessary additions to the discovered lists during reference
911 // discovery.
912 G1CMIsAliveClosure _is_alive_closure_cm;
913
914 G1CMSubjectToDiscoveryClosure _is_subject_to_discovery_cm;
915 public:
916
917 RefToScanQueue *task_queue(uint i) const;
918
919 uint num_task_queues() const;
920
921 // A set of cards where updates happened during the GC
922 G1DirtyCardQueueSet& dirty_card_queue_set() { return _dirty_card_queue_set; }
923
924 // Create a G1CollectedHeap with the specified policy.
925 // Must call the initialize method afterwards.
926 // May not return if something goes wrong.
927 G1CollectedHeap(G1CollectorPolicy* policy);
928
929 private:
930 jint initialize_concurrent_refinement();
931 jint initialize_young_gen_sampling_thread();
932 public:
933 // Initialize the G1CollectedHeap to have the initial and
934 // maximum sizes and remembered and barrier sets
935 // specified by the policy object.
936 jint initialize();
937
938 virtual void stop();
939 virtual void safepoint_synchronize_begin();
940 virtual void safepoint_synchronize_end();
941
942 // Return the (conservative) maximum heap alignment for any G1 heap
967 const G1CollectionSet* collection_set() const { return &_collection_set; }
968 G1CollectionSet* collection_set() { return &_collection_set; }
969
970 virtual CollectorPolicy* collector_policy() const;
971 virtual G1CollectorPolicy* g1_collector_policy() const;
972
973 virtual SoftRefPolicy* soft_ref_policy();
974
975 virtual void initialize_serviceability();
976 virtual MemoryUsage memory_usage();
977 virtual GrowableArray<GCMemoryManager*> memory_managers();
978 virtual GrowableArray<MemoryPool*> memory_pools();
979
980 // The rem set and barrier set.
981 G1RemSet* g1_rem_set() const { return _g1_rem_set; }
982
983 // Try to minimize the remembered set.
984 void scrub_rem_set();
985
986 // Apply the given closure on all cards in the Hot Card Cache, emptying it.
987 void iterate_hcc_closure(G1CardTableEntryClosure* cl, uint worker_i);
988
989 // Apply the given closure on all cards in the Dirty Card Queue Set, emptying it.
990 void iterate_dirty_card_closure(G1CardTableEntryClosure* cl, uint worker_i);
991
992 // The shared block offset table array.
993 G1BlockOffsetTable* bot() const { return _bot; }
994
995 // Reference Processing accessors
996
997 // The STW reference processor....
998 ReferenceProcessor* ref_processor_stw() const { return _ref_processor_stw; }
999
1000 G1NewTracer* gc_tracer_stw() const { return _gc_tracer_stw; }
1001
1002 // The Concurrent Marking reference processor...
1003 ReferenceProcessor* ref_processor_cm() const { return _ref_processor_cm; }
1004
1005 size_t unused_committed_regions_in_bytes() const;
1006 virtual size_t capacity() const;
1007 virtual size_t used() const;
1008 // This should be called when we're not holding the heap lock. The
1009 // result might be a bit inaccurate.
1010 size_t used_unlocked() const;
|