138
139 HeapRegion* first_region() { return _head; }
140 HeapRegion* first_survivor_region() { return _survivor_head; }
141 HeapRegion* last_survivor_region() { return _survivor_tail; }
142
143 // debugging
144 bool check_list_well_formed();
145 bool check_list_empty(bool check_sample = true);
146 void print();
147 };
148
149 class MutatorAllocRegion : public G1AllocRegion {
150 protected:
151 virtual HeapRegion* allocate_new_region(size_t word_size, bool force);
152 virtual void retire_region(HeapRegion* alloc_region, size_t allocated_bytes);
153 public:
154 MutatorAllocRegion()
155 : G1AllocRegion("Mutator Alloc Region", false /* bot_updates */) { }
156 };
157
158 class SurvivorGCAllocRegion : public G1AllocRegion {
159 protected:
160 virtual HeapRegion* allocate_new_region(size_t word_size, bool force);
161 virtual void retire_region(HeapRegion* alloc_region, size_t allocated_bytes);
162 public:
163 SurvivorGCAllocRegion()
164 : G1AllocRegion("Survivor GC Alloc Region", false /* bot_updates */) { }
165 };
166
167 class OldGCAllocRegion : public G1AllocRegion {
168 protected:
169 virtual HeapRegion* allocate_new_region(size_t word_size, bool force);
170 virtual void retire_region(HeapRegion* alloc_region, size_t allocated_bytes);
171 public:
172 OldGCAllocRegion()
173 : G1AllocRegion("Old GC Alloc Region", true /* bot_updates */) { }
174 };
175
176 class RefineCardTableEntryClosure;
177 class G1CollectedHeap : public SharedHeap {
178 friend class VM_G1CollectForAllocation;
179 friend class VM_GenCollectForPermanentAllocation;
180 friend class VM_G1CollectFull;
181 friend class VM_G1IncCollectionPause;
182 friend class VMStructs;
183 friend class MutatorAllocRegion;
184 friend class SurvivorGCAllocRegion;
185 friend class OldGCAllocRegion;
186
187 // Closures used in implementation.
188 friend class G1ParCopyHelper;
189 friend class G1IsAliveClosure;
190 friend class G1EvacuateFollowersClosure;
191 friend class G1ParScanThreadState;
192 friend class G1ParScanClosureSuper;
193 friend class G1ParEvacuateFollowersClosure;
194 friend class G1ParTask;
195 friend class G1FreeGarbageRegionClosure;
196 friend class RefineCardTableEntryClosure;
556 // Callback from VM_G1CollectFull operation.
557 // Perform a full collection.
558 void do_full_collection(bool clear_all_soft_refs);
559
560 // Resize the heap if necessary after a full collection. If this is
561 // after a collect-for allocation, "word_size" is the allocation size,
562 // and will be considered part of the used portion of the heap.
563 void resize_if_necessary_after_full_collection(size_t word_size);
564
565 // Callback from VM_G1CollectForAllocation operation.
566 // This function does everything necessary/possible to satisfy a
567 // failed allocation request (including collection, expansion, etc.)
568 HeapWord* satisfy_failed_allocation(size_t word_size, bool* succeeded);
569
570 // Attempting to expand the heap sufficiently
571 // to support an allocation of the given "word_size". If
572 // successful, perform the allocation and return the address of the
573 // allocated block, or else "NULL".
574 HeapWord* expand_and_allocate(size_t word_size);
575
576 public:
577
578 G1MonitoringSupport* g1mm() { return _g1mm; }
579
580 // Expand the garbage-first heap by at least the given size (in bytes!).
581 // Returns true if the heap was expanded by the requested amount;
582 // false otherwise.
583 // (Rounds up to a HeapRegion boundary.)
584 bool expand(size_t expand_bytes);
585
586 // Do anything common to GC's.
587 virtual void gc_prologue(bool full);
588 virtual void gc_epilogue(bool full);
589
590 // We register a region with the fast "in collection set" test. We
591 // simply set to true the array slot corresponding to this region.
592 void register_region_with_in_cset_fast_test(HeapRegion* r) {
593 assert(_in_cset_fast_test_base != NULL, "sanity");
594 assert(r->in_collection_set(), "invariant");
595 size_t index = r->hrs_index();
808 // Push "obj" on the scan stack.
809 void push_on_evac_failure_scan_stack(oop obj);
810 // Process scan stack entries until the stack is empty.
811 void drain_evac_failure_scan_stack();
812 // True iff an invocation of "drain_scan_stack" is in progress; to
813 // prevent unnecessary recursion.
814 bool _drain_in_progress;
815
816 // Do any necessary initialization for evacuation-failure handling.
817 // "cl" is the closure that will be used to process evac-failure
818 // objects.
819 void init_for_evac_failure(OopsInHeapRegionClosure* cl);
820 // Do any necessary cleanup for evacuation-failure handling data
821 // structures.
822 void finalize_for_evac_failure();
823
824 // An attempt to evacuate "obj" has failed; take necessary steps.
825 oop handle_evacuation_failure_par(OopsInHeapRegionClosure* cl, oop obj);
826 void handle_evacuation_failure_common(oop obj, markOop m);
827
828 // Instance of the concurrent mark is_alive closure for embedding
829 // into the reference processor as the is_alive_non_header. This
830 // prevents unnecessary additions to the discovered lists during
831 // concurrent discovery.
832 G1CMIsAliveClosure _is_alive_closure;
833
834 // ("Weak") Reference processing support
835 ReferenceProcessor* _ref_processor;
836
837 enum G1H_process_strong_roots_tasks {
838 G1H_PS_mark_stack_oops_do,
839 G1H_PS_refProcessor_oops_do,
840 // Leave this one last.
841 G1H_PS_NumElements
842 };
843
844 SubTasksDone* _process_strong_tasks;
845
846 volatile bool _free_regions_coming;
847
848 public:
849
850 SubTasksDone* process_strong_tasks() { return _process_strong_tasks; }
851
852 void set_refine_cte_cl_concurrency(bool concurrent);
853
854 RefToScanQueue *task_queue(int i) const;
855
856 // A set of cards where updates happened during the GC
857 DirtyCardQueueSet& dirty_card_queue_set() { return _dirty_card_queue_set; }
858
859 // A DirtyCardQueueSet that is used to hold cards that contain
860 // references into the current collection set. This is used to
861 // update the remembered sets of the regions in the collection
862 // set in the event of an evacuation failure.
863 DirtyCardQueueSet& into_cset_dirty_card_queue_set()
864 { return _into_cset_dirty_card_queue_set; }
865
866 // Create a G1CollectedHeap with the specified policy.
867 // Must call the initialize method afterwards.
868 // May not return if something goes wrong.
869 G1CollectedHeap(G1CollectorPolicy* policy);
870
871 // Initialize the G1CollectedHeap to have the initial and
872 // maximum sizes, permanent generation, and remembered and barrier sets
873 // specified by the policy object.
874 jint initialize();
875
876 virtual void ref_processing_init();
877
878 void set_par_threads(int t) {
879 SharedHeap::set_par_threads(t);
880 _process_strong_tasks->set_n_threads(t);
881 }
882
883 virtual CollectedHeap::Name kind() const {
884 return CollectedHeap::G1CollectedHeap;
885 }
886
887 // The current policy object for the collector.
888 G1CollectorPolicy* g1_policy() const { return _g1_policy; }
889
890 // Adaptive size policy. No such thing for g1.
891 virtual AdaptiveSizePolicy* size_policy() { return NULL; }
892
893 // The rem set and barrier set.
894 G1RemSet* g1_rem_set() const { return _g1_rem_set; }
895 ModRefBarrierSet* mr_bs() const { return _mr_bs; }
907 return _gc_time_stamp;
908 }
909
910 void reset_gc_time_stamp() {
911 _gc_time_stamp = 0;
912 OrderAccess::fence();
913 }
914
915 void increment_gc_time_stamp() {
916 ++_gc_time_stamp;
917 OrderAccess::fence();
918 }
919
920 void iterate_dirty_card_closure(CardTableEntryClosure* cl,
921 DirtyCardQueue* into_cset_dcq,
922 bool concurrent, int worker_i);
923
924 // The shared block offset table array.
925 G1BlockOffsetSharedArray* bot_shared() const { return _bot_shared; }
926
927 // Reference Processing accessor
928 ReferenceProcessor* ref_processor() { return _ref_processor; }
929
930 virtual size_t capacity() const;
931 virtual size_t used() const;
932 // This should be called when we're not holding the heap lock. The
933 // result might be a bit inaccurate.
934 size_t used_unlocked() const;
935 size_t recalculate_used() const;
936
937 // These virtual functions do the actual allocation.
938 // Some heaps may offer a contiguous region for shared non-blocking
939 // allocation, via inlined code (by exporting the address of the top and
940 // end fields defining the extent of the contiguous allocation region.)
941 // But G1CollectedHeap doesn't yet support this.
942
943 // Return an estimate of the maximum allocation that could be performed
944 // without triggering any collection or expansion activity. In a
945 // generational collector, for example, this is probably the largest
946 // allocation that could be supported (without expansion) in the youngest
947 // generation. It is "unsafe" because no locks are taken; the result
948 // should be treated as an approximation, not a guarantee, for use in
|
138
139 HeapRegion* first_region() { return _head; }
140 HeapRegion* first_survivor_region() { return _survivor_head; }
141 HeapRegion* last_survivor_region() { return _survivor_tail; }
142
143 // debugging
144 bool check_list_well_formed();
145 bool check_list_empty(bool check_sample = true);
146 void print();
147 };
148
149 class MutatorAllocRegion : public G1AllocRegion {
150 protected:
151 virtual HeapRegion* allocate_new_region(size_t word_size, bool force);
152 virtual void retire_region(HeapRegion* alloc_region, size_t allocated_bytes);
153 public:
154 MutatorAllocRegion()
155 : G1AllocRegion("Mutator Alloc Region", false /* bot_updates */) { }
156 };
157
158 // The G1 STW is alive closure.
159 // An instance is embedded into the G1CH and used as the
160 // (optional) _is_alive_non_header closure in the STW
161 // reference processor. It is also extensively used during
162 // refence processing during STW evacuation pauses.
163 class G1STWIsAliveClosure: public BoolObjectClosure {
164 G1CollectedHeap* _g1;
165 public:
166 G1STWIsAliveClosure(G1CollectedHeap* g1) : _g1(g1) {}
167 void do_object(oop p) { assert(false, "Do not call."); }
168 bool do_object_b(oop p);
169 };
170
171 class SurvivorGCAllocRegion : public G1AllocRegion {
172 protected:
173 virtual HeapRegion* allocate_new_region(size_t word_size, bool force);
174 virtual void retire_region(HeapRegion* alloc_region, size_t allocated_bytes);
175 public:
176 SurvivorGCAllocRegion()
177 : G1AllocRegion("Survivor GC Alloc Region", false /* bot_updates */) { }
178 };
179
180 class OldGCAllocRegion : public G1AllocRegion {
181 protected:
182 virtual HeapRegion* allocate_new_region(size_t word_size, bool force);
183 virtual void retire_region(HeapRegion* alloc_region, size_t allocated_bytes);
184 public:
185 OldGCAllocRegion()
186 : G1AllocRegion("Old GC Alloc Region", true /* bot_updates */) { }
187 };
188
189 class RefineCardTableEntryClosure;
190
191 class G1CollectedHeap : public SharedHeap {
192 friend class VM_G1CollectForAllocation;
193 friend class VM_GenCollectForPermanentAllocation;
194 friend class VM_G1CollectFull;
195 friend class VM_G1IncCollectionPause;
196 friend class VMStructs;
197 friend class MutatorAllocRegion;
198 friend class SurvivorGCAllocRegion;
199 friend class OldGCAllocRegion;
200
201 // Closures used in implementation.
202 friend class G1ParCopyHelper;
203 friend class G1IsAliveClosure;
204 friend class G1EvacuateFollowersClosure;
205 friend class G1ParScanThreadState;
206 friend class G1ParScanClosureSuper;
207 friend class G1ParEvacuateFollowersClosure;
208 friend class G1ParTask;
209 friend class G1FreeGarbageRegionClosure;
210 friend class RefineCardTableEntryClosure;
570 // Callback from VM_G1CollectFull operation.
571 // Perform a full collection.
572 void do_full_collection(bool clear_all_soft_refs);
573
574 // Resize the heap if necessary after a full collection. If this is
575 // after a collect-for allocation, "word_size" is the allocation size,
576 // and will be considered part of the used portion of the heap.
577 void resize_if_necessary_after_full_collection(size_t word_size);
578
579 // Callback from VM_G1CollectForAllocation operation.
580 // This function does everything necessary/possible to satisfy a
581 // failed allocation request (including collection, expansion, etc.)
582 HeapWord* satisfy_failed_allocation(size_t word_size, bool* succeeded);
583
584 // Attempting to expand the heap sufficiently
585 // to support an allocation of the given "word_size". If
586 // successful, perform the allocation and return the address of the
587 // allocated block, or else "NULL".
588 HeapWord* expand_and_allocate(size_t word_size);
589
590 // Process any reference objects discovered during
591 // an incremental evacuation pause.
592 void process_discovered_references();
593
594 // Enqueue any remaining discovered references
595 // after processing.
596 void enqueue_discovered_references();
597
598 public:
599
600 G1MonitoringSupport* g1mm() { return _g1mm; }
601
602 // Expand the garbage-first heap by at least the given size (in bytes!).
603 // Returns true if the heap was expanded by the requested amount;
604 // false otherwise.
605 // (Rounds up to a HeapRegion boundary.)
606 bool expand(size_t expand_bytes);
607
608 // Do anything common to GC's.
609 virtual void gc_prologue(bool full);
610 virtual void gc_epilogue(bool full);
611
612 // We register a region with the fast "in collection set" test. We
613 // simply set to true the array slot corresponding to this region.
614 void register_region_with_in_cset_fast_test(HeapRegion* r) {
615 assert(_in_cset_fast_test_base != NULL, "sanity");
616 assert(r->in_collection_set(), "invariant");
617 size_t index = r->hrs_index();
830 // Push "obj" on the scan stack.
831 void push_on_evac_failure_scan_stack(oop obj);
832 // Process scan stack entries until the stack is empty.
833 void drain_evac_failure_scan_stack();
834 // True iff an invocation of "drain_scan_stack" is in progress; to
835 // prevent unnecessary recursion.
836 bool _drain_in_progress;
837
838 // Do any necessary initialization for evacuation-failure handling.
839 // "cl" is the closure that will be used to process evac-failure
840 // objects.
841 void init_for_evac_failure(OopsInHeapRegionClosure* cl);
842 // Do any necessary cleanup for evacuation-failure handling data
843 // structures.
844 void finalize_for_evac_failure();
845
846 // An attempt to evacuate "obj" has failed; take necessary steps.
847 oop handle_evacuation_failure_par(OopsInHeapRegionClosure* cl, oop obj);
848 void handle_evacuation_failure_common(oop obj, markOop m);
849
850 // ("Weak") Reference processing support.
851 //
852 // G1 has 2 instances of the referece processor class. One
853 // (_ref_processor_cm) handles reference object discovery
854 // and subsequent processing during concurrent marking cycles.
855 //
856 // The other (_ref_processor_stw) handles reference object
857 // discovery and processing during full GCs and incremental
858 // evacuation pauses.
859 //
860 // During an incremental pause, reference discovery will be
861 // temporarily disabled for _ref_processor_cm and will be
862 // enabled for _ref_processor_stw. At the end of the evacuation
863 // pause references discovered by _ref_processor_stw will be
864 // processed and discovery will be disabled. The previous
865 // setting for reference object discovery for _ref_processor_cm
866 // will be re-instated.
867 //
868 // At the start of marking:
869 // * Discovery by the CM ref processor is verified to be inactive
870 // and it's discovered lists are empty.
871 // * Discovery by the CM ref processor is then enabled.
872 //
873 // At the end of marking:
874 // * Any references on the CM ref processor's discovered
875 // lists are processed (possibly MT).
876 //
877 // At the start of full GC we:
878 // * Disable discovery by the CM ref processor and
879 // empty CM ref processor's discovered lists
880 // (without processing any entries).
881 // * Verify that the STW ref processor is inactive and it's
882 // discovered lists are empty.
883 // * Temporarily set STW ref processor discovery as single threaded.
884 // * Temporarily clear the STW ref processor's _is_alive_non_header
885 // field.
886 // * Finally enable discovery by the STW ref processor.
887 //
888 // The STW ref processor is used to record any discovered
889 // references during the full GC.
890 //
891 // At the end of a full GC we:
892 // * Enqueue any reference objects discovered by the STW ref processor
893 // that have non-live referents. This has the side-effect of
894 // making the STW ref processor inactive by disabling discovery.
895 // * Verify that the CM ref processor is still inactive
896 // and no references have been placed on it's discovered
897 // lists (also checked as a precondition during initial marking).
898
899 // The (stw) reference processor...
900 ReferenceProcessor* _ref_processor_stw;
901
902 // During reference object discovery, the _is_alive_non_header
903 // closure (if non-null) is applied to the referent object to
904 // determine whether the referent is live. If so then the
905 // reference object does not need to be 'discovered' and can
906 // be treated as a regular oop. This has the benefit of reducing
907 // the number of 'discovered' reference objects that need to
908 // be processed.
909 //
910 // Instance of the is_alive closure for embedding into the
911 // STW reference processor as the _is_alive_non_header field.
912 // Supplying a value for the _is_alive_non_header field is
913 // optional but doing so prevents unnecessary additions to
914 // the discovered lists during reference discovery.
915 G1STWIsAliveClosure _is_alive_closure_stw;
916
917 // The (concurrent marking) reference processor...
918 ReferenceProcessor* _ref_processor_cm;
919
920 // Instance of the concurrent mark is_alive closure for embedding
921 // into the Concurrent Marking reference processor as the
922 // _is_alive_non_header field. Supplying a value for the
923 // _is_alive_non_header field is optional but doing so prevents
924 // unnecessary additions to the discovered lists during reference
925 // discovery.
926 G1CMIsAliveClosure _is_alive_closure_cm;
927
928 enum G1H_process_strong_roots_tasks {
929 G1H_PS_mark_stack_oops_do,
930 G1H_PS_refProcessor_oops_do,
931 // Leave this one last.
932 G1H_PS_NumElements
933 };
934
935 SubTasksDone* _process_strong_tasks;
936
937 volatile bool _free_regions_coming;
938
939 public:
940
941 SubTasksDone* process_strong_tasks() { return _process_strong_tasks; }
942
943 void set_refine_cte_cl_concurrency(bool concurrent);
944
945 RefToScanQueue *task_queue(int i) const;
946
947 // A set of cards where updates happened during the GC
948 DirtyCardQueueSet& dirty_card_queue_set() { return _dirty_card_queue_set; }
949
950 // A DirtyCardQueueSet that is used to hold cards that contain
951 // references into the current collection set. This is used to
952 // update the remembered sets of the regions in the collection
953 // set in the event of an evacuation failure.
954 DirtyCardQueueSet& into_cset_dirty_card_queue_set()
955 { return _into_cset_dirty_card_queue_set; }
956
957 // Create a G1CollectedHeap with the specified policy.
958 // Must call the initialize method afterwards.
959 // May not return if something goes wrong.
960 G1CollectedHeap(G1CollectorPolicy* policy);
961
962 // Initialize the G1CollectedHeap to have the initial and
963 // maximum sizes, permanent generation, and remembered and barrier sets
964 // specified by the policy object.
965 jint initialize();
966
967 // Initialize weak reference processing.
968 virtual void ref_processing_init();
969
970 void set_par_threads(int t) {
971 SharedHeap::set_par_threads(t);
972 _process_strong_tasks->set_n_threads(t);
973 }
974
975 virtual CollectedHeap::Name kind() const {
976 return CollectedHeap::G1CollectedHeap;
977 }
978
979 // The current policy object for the collector.
980 G1CollectorPolicy* g1_policy() const { return _g1_policy; }
981
982 // Adaptive size policy. No such thing for g1.
983 virtual AdaptiveSizePolicy* size_policy() { return NULL; }
984
985 // The rem set and barrier set.
986 G1RemSet* g1_rem_set() const { return _g1_rem_set; }
987 ModRefBarrierSet* mr_bs() const { return _mr_bs; }
999 return _gc_time_stamp;
1000 }
1001
1002 void reset_gc_time_stamp() {
1003 _gc_time_stamp = 0;
1004 OrderAccess::fence();
1005 }
1006
1007 void increment_gc_time_stamp() {
1008 ++_gc_time_stamp;
1009 OrderAccess::fence();
1010 }
1011
1012 void iterate_dirty_card_closure(CardTableEntryClosure* cl,
1013 DirtyCardQueue* into_cset_dcq,
1014 bool concurrent, int worker_i);
1015
1016 // The shared block offset table array.
1017 G1BlockOffsetSharedArray* bot_shared() const { return _bot_shared; }
1018
1019 // Reference Processing accessors
1020
1021 // The STW reference processor....
1022 ReferenceProcessor* ref_processor_stw() const { return _ref_processor_stw; }
1023
1024 // The Concurent Marking reference processor...
1025 ReferenceProcessor* ref_processor_cm() const { return _ref_processor_cm; }
1026
1027 virtual size_t capacity() const;
1028 virtual size_t used() const;
1029 // This should be called when we're not holding the heap lock. The
1030 // result might be a bit inaccurate.
1031 size_t used_unlocked() const;
1032 size_t recalculate_used() const;
1033
1034 // These virtual functions do the actual allocation.
1035 // Some heaps may offer a contiguous region for shared non-blocking
1036 // allocation, via inlined code (by exporting the address of the top and
1037 // end fields defining the extent of the contiguous allocation region.)
1038 // But G1CollectedHeap doesn't yet support this.
1039
1040 // Return an estimate of the maximum allocation that could be performed
1041 // without triggering any collection or expansion activity. In a
1042 // generational collector, for example, this is probably the largest
1043 // allocation that could be supported (without expansion) in the youngest
1044 // generation. It is "unsafe" because no locks are taken; the result
1045 // should be treated as an approximation, not a guarantee, for use in
|