193 class G1STWIsAliveClosure: public BoolObjectClosure {
194 G1CollectedHeap* _g1;
195 public:
196 G1STWIsAliveClosure(G1CollectedHeap* g1) : _g1(g1) {}
197 bool do_object_b(oop p);
198 };
199
200 // Instances of this class are used for quick tests on whether a reference points
201 // into the collection set. Each of the array's elements denotes whether the
202 // corresponding region is in the collection set.
203 class G1FastCSetBiasedMappedArray : public G1BiasedMappedArray<bool> {
204 protected:
205 bool default_value() const { return false; }
206 public:
207 void clear() { G1BiasedMappedArray<bool>::clear(); }
208 };
209
210 class RefineCardTableEntryClosure;
211
212 class G1CollectedHeap : public SharedHeap {
213 friend class VM_G1CollectForAllocation;
214 friend class VM_G1CollectFull;
215 friend class VM_G1IncCollectionPause;
216 friend class VMStructs;
217 friend class MutatorAllocRegion;
218 friend class SurvivorGCAllocRegion;
219 friend class OldGCAllocRegion;
220
221 // Closures used in implementation.
222 template <G1Barrier barrier, bool do_mark_object>
223 friend class G1ParCopyClosure;
224 friend class G1IsAliveClosure;
225 friend class G1EvacuateFollowersClosure;
226 friend class G1ParScanThreadState;
227 friend class G1ParScanClosureSuper;
228 friend class G1ParEvacuateFollowersClosure;
229 friend class G1ParTask;
230 friend class G1FreeGarbageRegionClosure;
231 friend class RefineCardTableEntryClosure;
232 friend class G1PrepareCompactClosure;
233 friend class RegionSorter;
234 friend class RegionResetter;
235 friend class CountRCClosure;
236 friend class EvacPopObjClosure;
237 friend class G1ParCleanupCTTask;
238
239 // Other related classes.
240 friend class G1MarkSweep;
241
242 private:
329 // Typically, it is not full so we should re-use it during the next GC.
330 HeapRegion* _retained_old_gc_alloc_region;
331
332 // It specifies whether we should attempt to expand the heap after a
333 // region allocation failure. If heap expansion fails we set this to
334 // false so that we don't re-attempt the heap expansion (it's likely
335 // that subsequent expansion attempts will also fail if one fails).
336 // Currently, it is only consulted during GC and it's reset at the
337 // start of each GC.
338 bool _expand_heap_after_alloc_failure;
339
340 // It resets the mutator alloc region before new allocations can take place.
341 void init_mutator_alloc_region();
342
343 // It releases the mutator alloc region.
344 void release_mutator_alloc_region();
345
346 // It initializes the GC alloc regions at the start of a GC.
347 void init_gc_alloc_regions(EvacuationInfo& evacuation_info);
348
349 // It releases the GC alloc regions at the end of a GC.
350 void release_gc_alloc_regions(uint no_of_gc_workers, EvacuationInfo& evacuation_info);
351
352 // It does any cleanup that needs to be done on the GC alloc regions
353 // before a Full GC.
354 void abandon_gc_alloc_regions();
355
356 // Helper for monitoring and management support.
357 G1MonitoringSupport* _g1mm;
358
359 // Determines PLAB size for a particular allocation purpose.
360 size_t desired_plab_sz(GCAllocPurpose purpose);
361
362 // Outside of GC pauses, the number of bytes used in all regions other
363 // than the current allocation region.
364 size_t _summary_bytes_used;
365
366 // This array is used for a quick test on whether a reference points into
367 // the collection set or not. Each of the array's elements denotes whether the
368 // corresponding region is in the collection set or not.
810 // update the remembered sets of the regions in the collection
811 // set in the event of an evacuation failure.
812 DirtyCardQueueSet _into_cset_dirty_card_queue_set;
813
814 // After a collection pause, make the regions in the CS into free
815 // regions.
816 void free_collection_set(HeapRegion* cs_head, EvacuationInfo& evacuation_info);
817
818 // Abandon the current collection set without recording policy
819 // statistics or updating free lists.
820 void abandon_collection_set(HeapRegion* cs_head);
821
822 // Applies "scan_non_heap_roots" to roots outside the heap,
823 // "scan_rs" to roots inside the heap (having done "set_region" to
824 // indicate the region in which the root resides),
825 // and does "scan_metadata" If "scan_rs" is
826 // NULL, then this step is skipped. The "worker_i"
827 // param is for use with parallel roots processing, and should be
828 // the "i" of the calling parallel worker thread's work(i) function.
829 // In the sequential case this param will be ignored.
830 void g1_process_strong_roots(bool is_scavenging,
831 ScanningOption so,
832 OopClosure* scan_non_heap_roots,
833 OopsInHeapRegionClosure* scan_rs,
834 G1KlassScanClosure* scan_klasses,
835 uint worker_i);
836
837 // Notifies all the necessary spaces that the committed space has
838 // been updated (either expanded or shrunk). It should be called
839 // after _g1_storage is updated.
840 void update_committed_space(HeapWord* old_end, HeapWord* new_end);
841
842 // The concurrent marker (and the thread it runs in.)
843 ConcurrentMark* _cm;
844 ConcurrentMarkThread* _cmThread;
845 bool _mark_in_progress;
846
847 // The concurrent refiner.
848 ConcurrentG1Refine* _cg1r;
849
850 // The parallel task queues
851 RefToScanQueueSet *_task_queues;
852
853 // True iff a evacuation has failed in the current collection.
854 bool _evacuation_failed;
1008 ReferenceProcessor* _ref_processor_cm;
1009
1010 // Instance of the concurrent mark is_alive closure for embedding
1011 // into the Concurrent Marking reference processor as the
1012 // _is_alive_non_header field. Supplying a value for the
1013 // _is_alive_non_header field is optional but doing so prevents
1014 // unnecessary additions to the discovered lists during reference
1015 // discovery.
1016 G1CMIsAliveClosure _is_alive_closure_cm;
1017
1018 // Cache used by G1CollectedHeap::start_cset_region_for_worker().
1019 HeapRegion** _worker_cset_start_region;
1020
1021 // Time stamp to validate the regions recorded in the cache
1022 // used by G1CollectedHeap::start_cset_region_for_worker().
1023 // The heap region entry for a given worker is valid iff
1024 // the associated time stamp value matches the current value
1025 // of G1CollectedHeap::_gc_time_stamp.
1026 unsigned int* _worker_cset_start_region_time_stamp;
1027
1028 enum G1H_process_strong_roots_tasks {
1029 G1H_PS_filter_satb_buffers,
1030 G1H_PS_refProcessor_oops_do,
1031 // Leave this one last.
1032 G1H_PS_NumElements
1033 };
1034
1035 SubTasksDone* _process_strong_tasks;
1036
1037 volatile bool _free_regions_coming;
1038
1039 public:
1040
1041 SubTasksDone* process_strong_tasks() { return _process_strong_tasks; }
1042
1043 void set_refine_cte_cl_concurrency(bool concurrent);
1044
1045 RefToScanQueue *task_queue(int i) const;
1046
1047 // A set of cards where updates happened during the GC
1048 DirtyCardQueueSet& dirty_card_queue_set() { return _dirty_card_queue_set; }
1590 void push_dirty_cards_region(HeapRegion* hr);
1591 HeapRegion* pop_dirty_cards_region();
1592
1593 // Optimized nmethod scanning support routines
1594
1595 // Register the given nmethod with the G1 heap.
1596 virtual void register_nmethod(nmethod* nm);
1597
1598 // Unregister the given nmethod from the G1 heap.
1599 virtual void unregister_nmethod(nmethod* nm);
1600
1601 // Migrate the nmethods in the code root lists of the regions
1602 // in the collection set to regions in to-space. In the event
1603 // of an evacuation failure, nmethods that reference objects
1604 // that were not successfully evacuated are not migrated.
1605 void migrate_strong_code_roots();
1606
1607 // Free up superfluous code root memory.
1608 void purge_code_root_memory();
1609
1610 // During an initial mark pause, mark all the code roots that
1611 // point into regions *not* in the collection set.
1612 void mark_strong_code_roots(uint worker_id);
1613
1614 // Rebuild the strong code root lists for each region
1615 // after a full GC.
1616 void rebuild_strong_code_roots();
1617
1618 // Delete entries for dead interned string and clean up unreferenced symbols
1619 // in symbol table, possibly in parallel.
1620 void unlink_string_and_symbol_table(BoolObjectClosure* is_alive, bool unlink_strings = true, bool unlink_symbols = true);
1621
1622 // Redirty logged cards in the refinement queue.
1623 void redirty_logged_cards();
1624 // Verification
1625
1626 // The following is just to alert the verification code
1627 // that a full collection has occurred and that the
1628 // remembered sets are no longer up to date.
1629 bool _full_collection;
1630 void set_full_collection() { _full_collection = true;}
1631 void clear_full_collection() {_full_collection = false;}
1632 bool full_collection() {return _full_collection;}
1633
1634 // Perform any cleanup actions necessary before allowing a verification.
1635 virtual void prepare_for_verify();
1636
1637 // Perform verification.
1638
1639 // vo == UsePrevMarking -> use "prev" marking information,
1640 // vo == UseNextMarking -> use "next" marking information
|
193 class G1STWIsAliveClosure: public BoolObjectClosure {
194 G1CollectedHeap* _g1;
195 public:
196 G1STWIsAliveClosure(G1CollectedHeap* g1) : _g1(g1) {}
197 bool do_object_b(oop p);
198 };
199
200 // Instances of this class are used for quick tests on whether a reference points
201 // into the collection set. Each of the array's elements denotes whether the
202 // corresponding region is in the collection set.
203 class G1FastCSetBiasedMappedArray : public G1BiasedMappedArray<bool> {
204 protected:
205 bool default_value() const { return false; }
206 public:
207 void clear() { G1BiasedMappedArray<bool>::clear(); }
208 };
209
210 class RefineCardTableEntryClosure;
211
212 class G1CollectedHeap : public SharedHeap {
213 friend class VM_CollectForMetadataAllocation;
214 friend class VM_G1CollectForAllocation;
215 friend class VM_G1CollectFull;
216 friend class VM_G1IncCollectionPause;
217 friend class VMStructs;
218 friend class MutatorAllocRegion;
219 friend class SurvivorGCAllocRegion;
220 friend class OldGCAllocRegion;
221
222 // Closures used in implementation.
223 template <G1Barrier barrier, G1Mark do_mark_object>
224 friend class G1ParCopyClosure;
225 friend class G1IsAliveClosure;
226 friend class G1EvacuateFollowersClosure;
227 friend class G1ParScanThreadState;
228 friend class G1ParScanClosureSuper;
229 friend class G1ParEvacuateFollowersClosure;
230 friend class G1ParTask;
231 friend class G1FreeGarbageRegionClosure;
232 friend class RefineCardTableEntryClosure;
233 friend class G1PrepareCompactClosure;
234 friend class RegionSorter;
235 friend class RegionResetter;
236 friend class CountRCClosure;
237 friend class EvacPopObjClosure;
238 friend class G1ParCleanupCTTask;
239
240 // Other related classes.
241 friend class G1MarkSweep;
242
243 private:
330 // Typically, it is not full so we should re-use it during the next GC.
331 HeapRegion* _retained_old_gc_alloc_region;
332
333 // It specifies whether we should attempt to expand the heap after a
334 // region allocation failure. If heap expansion fails we set this to
335 // false so that we don't re-attempt the heap expansion (it's likely
336 // that subsequent expansion attempts will also fail if one fails).
337 // Currently, it is only consulted during GC and it's reset at the
338 // start of each GC.
339 bool _expand_heap_after_alloc_failure;
340
341 // It resets the mutator alloc region before new allocations can take place.
342 void init_mutator_alloc_region();
343
344 // It releases the mutator alloc region.
345 void release_mutator_alloc_region();
346
347 // It initializes the GC alloc regions at the start of a GC.
348 void init_gc_alloc_regions(EvacuationInfo& evacuation_info);
349
350 // Setup the retained old gc alloc region as the currrent old gc alloc region.
351 void use_retained_old_gc_alloc_region(EvacuationInfo& evacuation_info);
352
353 // It releases the GC alloc regions at the end of a GC.
354 void release_gc_alloc_regions(uint no_of_gc_workers, EvacuationInfo& evacuation_info);
355
356 // It does any cleanup that needs to be done on the GC alloc regions
357 // before a Full GC.
358 void abandon_gc_alloc_regions();
359
360 // Helper for monitoring and management support.
361 G1MonitoringSupport* _g1mm;
362
363 // Determines PLAB size for a particular allocation purpose.
364 size_t desired_plab_sz(GCAllocPurpose purpose);
365
366 // Outside of GC pauses, the number of bytes used in all regions other
367 // than the current allocation region.
368 size_t _summary_bytes_used;
369
370 // This array is used for a quick test on whether a reference points into
371 // the collection set or not. Each of the array's elements denotes whether the
372 // corresponding region is in the collection set or not.
814 // update the remembered sets of the regions in the collection
815 // set in the event of an evacuation failure.
816 DirtyCardQueueSet _into_cset_dirty_card_queue_set;
817
818 // After a collection pause, make the regions in the CS into free
819 // regions.
820 void free_collection_set(HeapRegion* cs_head, EvacuationInfo& evacuation_info);
821
822 // Abandon the current collection set without recording policy
823 // statistics or updating free lists.
824 void abandon_collection_set(HeapRegion* cs_head);
825
826 // Applies "scan_non_heap_roots" to roots outside the heap,
827 // "scan_rs" to roots inside the heap (having done "set_region" to
828 // indicate the region in which the root resides),
829 // and does "scan_metadata" If "scan_rs" is
830 // NULL, then this step is skipped. The "worker_i"
831 // param is for use with parallel roots processing, and should be
832 // the "i" of the calling parallel worker thread's work(i) function.
833 // In the sequential case this param will be ignored.
834 void g1_process_roots(OopClosure* scan_non_heap_roots,
835 OopClosure* scan_non_heap_weak_roots,
836 OopsInHeapRegionClosure* scan_rs,
837 CLDClosure* scan_strong_clds,
838 CLDClosure* scan_weak_clds,
839 CodeBlobClosure* scan_strong_code,
840 uint worker_i);
841
842 // Notifies all the necessary spaces that the committed space has
843 // been updated (either expanded or shrunk). It should be called
844 // after _g1_storage is updated.
845 void update_committed_space(HeapWord* old_end, HeapWord* new_end);
846
847 // The concurrent marker (and the thread it runs in.)
848 ConcurrentMark* _cm;
849 ConcurrentMarkThread* _cmThread;
850 bool _mark_in_progress;
851
852 // The concurrent refiner.
853 ConcurrentG1Refine* _cg1r;
854
855 // The parallel task queues
856 RefToScanQueueSet *_task_queues;
857
858 // True iff a evacuation has failed in the current collection.
859 bool _evacuation_failed;
1013 ReferenceProcessor* _ref_processor_cm;
1014
1015 // Instance of the concurrent mark is_alive closure for embedding
1016 // into the Concurrent Marking reference processor as the
1017 // _is_alive_non_header field. Supplying a value for the
1018 // _is_alive_non_header field is optional but doing so prevents
1019 // unnecessary additions to the discovered lists during reference
1020 // discovery.
1021 G1CMIsAliveClosure _is_alive_closure_cm;
1022
1023 // Cache used by G1CollectedHeap::start_cset_region_for_worker().
1024 HeapRegion** _worker_cset_start_region;
1025
1026 // Time stamp to validate the regions recorded in the cache
1027 // used by G1CollectedHeap::start_cset_region_for_worker().
1028 // The heap region entry for a given worker is valid iff
1029 // the associated time stamp value matches the current value
1030 // of G1CollectedHeap::_gc_time_stamp.
1031 unsigned int* _worker_cset_start_region_time_stamp;
1032
1033 enum G1H_process_roots_tasks {
1034 G1H_PS_filter_satb_buffers,
1035 G1H_PS_refProcessor_oops_do,
1036 // Leave this one last.
1037 G1H_PS_NumElements
1038 };
1039
1040 SubTasksDone* _process_strong_tasks;
1041
1042 volatile bool _free_regions_coming;
1043
1044 public:
1045
1046 SubTasksDone* process_strong_tasks() { return _process_strong_tasks; }
1047
1048 void set_refine_cte_cl_concurrency(bool concurrent);
1049
1050 RefToScanQueue *task_queue(int i) const;
1051
1052 // A set of cards where updates happened during the GC
1053 DirtyCardQueueSet& dirty_card_queue_set() { return _dirty_card_queue_set; }
1595 void push_dirty_cards_region(HeapRegion* hr);
1596 HeapRegion* pop_dirty_cards_region();
1597
1598 // Optimized nmethod scanning support routines
1599
1600 // Register the given nmethod with the G1 heap.
1601 virtual void register_nmethod(nmethod* nm);
1602
1603 // Unregister the given nmethod from the G1 heap.
1604 virtual void unregister_nmethod(nmethod* nm);
1605
1606 // Migrate the nmethods in the code root lists of the regions
1607 // in the collection set to regions in to-space. In the event
1608 // of an evacuation failure, nmethods that reference objects
1609 // that were not successfully evacuated are not migrated.
1610 void migrate_strong_code_roots();
1611
1612 // Free up superfluous code root memory.
1613 void purge_code_root_memory();
1614
1615 // Rebuild the strong code root lists for each region
1616 // after a full GC.
1617 void rebuild_strong_code_roots();
1618
1619 // Delete entries for dead interned string and clean up unreferenced symbols
1620 // in symbol table, possibly in parallel.
1621 void unlink_string_and_symbol_table(BoolObjectClosure* is_alive, bool unlink_strings = true, bool unlink_symbols = true);
1622
1623 // Parallel phase of unloading/cleaning after G1 concurrent mark.
1624 void parallel_cleaning(BoolObjectClosure* is_alive, bool process_strings, bool process_symbols, bool class_unloading_occurred);
1625
1626 // Redirty logged cards in the refinement queue.
1627 void redirty_logged_cards();
1628 // Verification
1629
1630 // The following is just to alert the verification code
1631 // that a full collection has occurred and that the
1632 // remembered sets are no longer up to date.
1633 bool _full_collection;
1634 void set_full_collection() { _full_collection = true;}
1635 void clear_full_collection() {_full_collection = false;}
1636 bool full_collection() {return _full_collection;}
1637
1638 // Perform any cleanup actions necessary before allowing a verification.
1639 virtual void prepare_for_verify();
1640
1641 // Perform verification.
1642
1643 // vo == UsePrevMarking -> use "prev" marking information,
1644 // vo == UseNextMarking -> use "next" marking information
|