17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #ifndef SHARE_VM_GC_IMPLEMENTATION_G1_G1COLLECTEDHEAP_HPP
26 #define SHARE_VM_GC_IMPLEMENTATION_G1_G1COLLECTEDHEAP_HPP
27
28 #include "gc_implementation/g1/g1AllocationContext.hpp"
29 #include "gc_implementation/g1/g1Allocator.hpp"
30 #include "gc_implementation/g1/concurrentMark.hpp"
31 #include "gc_implementation/g1/evacuationInfo.hpp"
32 #include "gc_implementation/g1/g1AllocRegion.hpp"
33 #include "gc_implementation/g1/g1BiasedArray.hpp"
34 #include "gc_implementation/g1/g1HRPrinter.hpp"
35 #include "gc_implementation/g1/g1InCSetState.hpp"
36 #include "gc_implementation/g1/g1MonitoringSupport.hpp"
37 #include "gc_implementation/g1/g1SATBCardTableModRefBS.hpp"
38 #include "gc_implementation/g1/g1YCTypes.hpp"
39 #include "gc_implementation/g1/heapRegionManager.hpp"
40 #include "gc_implementation/g1/heapRegionSet.hpp"
41 #include "gc_implementation/shared/hSpaceCounters.hpp"
42 #include "gc_implementation/shared/parGCAllocBuffer.hpp"
43 #include "memory/barrierSet.hpp"
44 #include "memory/memRegion.hpp"
45 #include "memory/sharedHeap.hpp"
46 #include "utilities/stack.hpp"
47
48 // A "G1CollectedHeap" is an implementation of a java heap for HotSpot.
49 // It uses the "Garbage First" heap organization and algorithm, which
50 // may combine concurrent marking with parallel, incremental compaction of
51 // heap subsets that will yield large amounts of garbage.
52
53 // Forward declarations
54 class HeapRegion;
55 class HRRSCleanupTask;
56 class GenerationSpec;
57 class OopsInHeapRegionClosure;
58 class G1KlassScanClosure;
59 class ObjectClosure;
60 class SpaceClosure;
61 class CompactibleSpaceClosure;
62 class Space;
63 class G1CollectorPolicy;
64 class GenRemSet;
65 class G1RemSet;
66 class HeapRegionRemSetIterator;
67 class ConcurrentMark;
68 class ConcurrentMarkThread;
69 class ConcurrentG1Refine;
70 class ConcurrentGCTimer;
71 class GenerationCounters;
72 class STWGCTimer;
73 class G1NewTracer;
74 class G1OldTracer;
75 class EvacuationFailedInfo;
76 class nmethod;
77 class Ticks;
164 public:
165 G1STWIsAliveClosure(G1CollectedHeap* g1) : _g1(g1) {}
166 bool do_object_b(oop p);
167 };
168
169 class RefineCardTableEntryClosure;
170
171 class G1RegionMappingChangedListener : public G1MappingChangedListener {
172 private:
173 void reset_from_card_cache(uint start_idx, size_t num_regions);
174 public:
175 virtual void on_commit(uint start_idx, size_t num_regions, bool zero_filled);
176 };
177
178 class G1CollectedHeap : public SharedHeap {
179 friend class VM_CollectForMetadataAllocation;
180 friend class VM_G1CollectForAllocation;
181 friend class VM_G1CollectFull;
182 friend class VM_G1IncCollectionPause;
183 friend class VMStructs;
184 friend class MutatorAllocRegion;
185 friend class SurvivorGCAllocRegion;
186 friend class OldGCAllocRegion;
187 friend class G1Allocator;
188
189 // Closures used in implementation.
190 friend class G1ParScanThreadState;
191 friend class G1ParTask;
192 friend class G1ParGCAllocator;
193 friend class G1PrepareCompactClosure;
194
195 // Other related classes.
196 friend class HeapRegionClaimer;
197
198 // Testing classes.
199 friend class G1CheckCSetFastTableClosure;
200
201 private:
202 // The one and only G1CollectedHeap, so static functions can find it.
203 static G1CollectedHeap* _g1h;
204
205 static size_t _humongous_object_threshold_in_words;
206
207 // The secondary free list which contains regions that have been
208 // freed up during the cleanup process. This will be appended to
209 // the master free list when appropriate.
210 FreeRegionList _secondary_free_list;
211
212 // It keeps track of the old regions.
229 // only exception is the humongous set which we leave unaltered. If
230 // free_list_only is true, it will only tear down the master free
231 // list. It is called before a Full GC (free_list_only == false) or
232 // before heap shrinking (free_list_only == true).
233 void tear_down_region_sets(bool free_list_only);
234
235 // Rebuilds the region sets / lists so that they are repopulated to
236 // reflect the contents of the heap. The only exception is the
237 // humongous set which was not torn down in the first place. If
238 // free_list_only is true, it will only rebuild the master free
239 // list. It is called after a Full GC (free_list_only == false) or
240 // after heap shrinking (free_list_only == true).
241 void rebuild_region_sets(bool free_list_only);
242
243 // Callback for region mapping changed events.
244 G1RegionMappingChangedListener _listener;
245
246 // The sequence of all heap regions in the heap.
247 HeapRegionManager _hrm;
248
249 // Class that handles the different kinds of allocations.
250 G1Allocator* _allocator;
251
252 // Statistics for each allocation context
253 AllocationContextStats _allocation_context_stats;
254
255 // PLAB sizing policy for survivors.
256 PLABStats _survivor_plab_stats;
257
258 // PLAB sizing policy for tenured objects.
259 PLABStats _old_plab_stats;
260
261 // It specifies whether we should attempt to expand the heap after a
262 // region allocation failure. If heap expansion fails we set this to
263 // false so that we don't re-attempt the heap expansion (it's likely
264 // that subsequent expansion attempts will also fail if one fails).
265 // Currently, it is only consulted during GC and it's reset at the
266 // start of each GC.
267 bool _expand_heap_after_alloc_failure;
268
269 // It resets the mutator alloc region before new allocations can take place.
270 void init_mutator_alloc_region();
271
272 // It releases the mutator alloc region.
273 void release_mutator_alloc_region();
274
275 // It initializes the GC alloc regions at the start of a GC.
276 void init_gc_alloc_regions(EvacuationInfo& evacuation_info);
277
278 // It releases the GC alloc regions at the end of a GC.
279 void release_gc_alloc_regions(uint no_of_gc_workers, EvacuationInfo& evacuation_info);
280
281 // It does any cleanup that needs to be done on the GC alloc regions
282 // before a Full GC.
283 void abandon_gc_alloc_regions();
284
285 // Helper for monitoring and management support.
286 G1MonitoringSupport* _g1mm;
287
288 // Records whether the region at the given index is kept live by roots or
289 // references from the young generation.
290 class HumongousIsLiveBiasedMappedArray : public G1BiasedMappedArray<bool> {
291 protected:
292 bool default_value() const { return false; }
293 public:
294 void clear() { G1BiasedMappedArray<bool>::clear(); }
295 void set_live(uint region) {
296 set_by_index(region, true);
297 }
298 bool is_live(uint region) {
299 return get_by_index(region);
300 }
301 };
302
303 HumongousIsLiveBiasedMappedArray _humongous_is_live;
304 // Stores whether during humongous object registration we found candidate regions.
505 // pause. This should only be used for non-humongous allocations.
506 HeapWord* attempt_allocation_slow(size_t word_size,
507 AllocationContext_t context,
508 uint* gc_count_before_ret,
509 uint* gclocker_retry_count_ret);
510
511 // Takes the Heap_lock and attempts a humongous allocation. It can
512 // potentially schedule a GC pause.
513 HeapWord* attempt_allocation_humongous(size_t word_size,
514 uint* gc_count_before_ret,
515 uint* gclocker_retry_count_ret);
516
517 // Allocation attempt that should be called during safepoints (e.g.,
518 // at the end of a successful GC). expect_null_mutator_alloc_region
519 // specifies whether the mutator alloc region is expected to be NULL
520 // or not.
521 HeapWord* attempt_allocation_at_safepoint(size_t word_size,
522 AllocationContext_t context,
523 bool expect_null_mutator_alloc_region);
524
525 // It dirties the cards that cover the block so that so that the post
526 // write barrier never queues anything when updating objects on this
527 // block. It is assumed (and in fact we assert) that the block
528 // belongs to a young region.
529 inline void dirty_young_block(HeapWord* start, size_t word_size);
530
531 // Allocate blocks during garbage collection. Will ensure an
532 // allocation region, either by picking one or expanding the
533 // heap, and then allocate a block of the given size. The block
534 // may not be a humongous - it must fit into a single heap region.
535 inline HeapWord* par_allocate_during_gc(InCSetState dest,
536 size_t word_size,
537 AllocationContext_t context);
538 // Ensure that no further allocations can happen in "r", bearing in mind
539 // that parallel threads might be attempting allocations.
540 void par_allocate_remaining_space(HeapRegion* r);
541
542 // Allocation attempt during GC for a survivor object / PLAB.
543 inline HeapWord* survivor_attempt_allocation(size_t word_size,
544 AllocationContext_t context);
545
546 // Allocation attempt during GC for an old object / PLAB.
547 inline HeapWord* old_attempt_allocation(size_t word_size,
548 AllocationContext_t context);
549
550 // These methods are the "callbacks" from the G1AllocRegion class.
551
552 // For mutator alloc regions.
553 HeapRegion* new_mutator_alloc_region(size_t word_size, bool force);
554 void retire_mutator_alloc_region(HeapRegion* alloc_region,
555 size_t allocated_bytes);
556
557 // For GC alloc regions.
558 HeapRegion* new_gc_alloc_region(size_t word_size, uint count,
559 InCSetState dest);
560 void retire_gc_alloc_region(HeapRegion* alloc_region,
561 size_t allocated_bytes, InCSetState dest);
562
563 // - if explicit_gc is true, the GC is for a System.gc() or a heap
564 // inspection request and should collect the entire heap
565 // - if clear_all_soft_refs is true, all soft references should be
566 // cleared during the GC
567 // - if explicit_gc is false, word_size describes the allocation that
568 // the GC should attempt (at least) to satisfy
569 // - it returns false if it is unable to do the collection due to the
570 // GC locker being active, true otherwise
571 bool do_collection(bool explicit_gc,
572 bool clear_all_soft_refs,
573 size_t word_size);
574
575 // Callback from VM_G1CollectFull operation.
576 // Perform a full collection.
577 virtual void do_full_collection(bool clear_all_soft_refs);
578
579 // Resize the heap if necessary after a full collection. If this is
580 // after a collect-for allocation, "word_size" is the allocation size,
581 // and will be considered part of the used portion of the heap.
582 void resize_if_necessary_after_full_collection(size_t word_size);
583
584 // Callback from VM_G1CollectForAllocation operation.
585 // This function does everything necessary/possible to satisfy a
586 // failed allocation request (including collection, expansion, etc.)
587 HeapWord* satisfy_failed_allocation(size_t word_size,
588 AllocationContext_t context,
589 bool* succeeded);
590
591 // Attempting to expand the heap sufficiently
592 // to support an allocation of the given "word_size". If
593 // successful, perform the allocation and return the address of the
594 // allocated block, or else "NULL".
595 HeapWord* expand_and_allocate(size_t word_size, AllocationContext_t context);
596
597 // Process any reference objects discovered during
598 // an incremental evacuation pause.
599 void process_discovered_references(uint no_of_gc_workers);
600
601 // Enqueue any remaining discovered references
602 // after processing.
603 void enqueue_discovered_references(uint no_of_gc_workers);
604
605 public:
606
607 G1Allocator* allocator() {
608 return _allocator;
609 }
610
611 G1MonitoringSupport* g1mm() {
612 assert(_g1mm != NULL, "should have been initialized");
613 return _g1mm;
614 }
615
616 // Expand the garbage-first heap by at least the given size (in bytes!).
617 // Returns true if the heap was expanded by the requested amount;
618 // false otherwise.
619 // (Rounds up to a HeapRegion boundary.)
620 bool expand(size_t expand_bytes);
621
622 // Returns the PLAB statistics for a given destination.
623 inline PLABStats* alloc_buffer_stats(InCSetState dest);
624
625 // Determines PLAB size for a given destination.
626 inline size_t desired_plab_sz(InCSetState dest);
627
628 inline AllocationContextStats& allocation_context_stats();
629
630 // Do anything common to GC's.
631 virtual void gc_prologue(bool full);
632 virtual void gc_epilogue(bool full);
633
634 inline void set_humongous_is_live(oop obj);
635
636 bool humongous_is_live(uint region) {
637 return _humongous_is_live.is_live(region);
638 }
639
640 // Returns whether the given region (which must be a humongous (start) region)
641 // is to be considered conservatively live regardless of any other conditions.
642 bool humongous_region_is_always_live(uint index);
643 // Returns whether the given region (which must be a humongous (start) region)
644 // is considered a candidate for eager reclamation.
645 bool humongous_region_is_candidate(uint index);
646 // Register the given region to be part of the collection set.
647 inline void register_humongous_region_with_in_cset_fast_test(uint index);
681 // Full GC). If concurrent is true, the caller is the outer caller
682 // in this nesting (i.e., the concurrent cycle). Further nesting is
683 // not currently supported. The end of this call also notifies
684 // the FullGCCount_lock in case a Java thread is waiting for a full
685 // GC to happen (e.g., it called System.gc() with
686 // +ExplicitGCInvokesConcurrent).
687 void increment_old_marking_cycles_completed(bool concurrent);
688
689 uint old_marking_cycles_completed() {
690 return _old_marking_cycles_completed;
691 }
692
693 void register_concurrent_cycle_start(const Ticks& start_time);
694 void register_concurrent_cycle_end();
695 void trace_heap_after_concurrent_cycle();
696
697 G1YCType yc_type();
698
699 G1HRPrinter* hr_printer() { return &_hr_printer; }
700
701 // Frees a non-humongous region by initializing its contents and
702 // adding it to the free list that's passed as a parameter (this is
703 // usually a local list which will be appended to the master free
704 // list later). The used bytes of freed regions are accumulated in
705 // pre_used. If par is true, the region's RSet will not be freed
706 // up. The assumption is that this will be done later.
707 // The locked parameter indicates if the caller has already taken
708 // care of proper synchronization. This may allow some optimizations.
709 void free_region(HeapRegion* hr,
710 FreeRegionList* free_list,
711 bool par,
712 bool locked = false);
713
714 // Frees a humongous region by collapsing it into individual regions
715 // and calling free_region() for each of them. The freed regions
716 // will be added to the free list that's passed as a parameter (this
717 // is usually a local list which will be appended to the master free
718 // list later). The used bytes of freed regions are accumulated in
719 // pre_used. If par is true, the region's RSet will not be freed
720 // up. The assumption is that this will be done later.
740 // evacuation pause) or not (another thread beat us to it or the GC
741 // locker was active). Given that we should not be holding the
742 // Heap_lock when we enter this method, we will pass the
743 // gc_count_before (i.e., total_collections()) as a parameter since
744 // it has to be read while holding the Heap_lock. Currently, both
745 // methods that call do_collection_pause() release the Heap_lock
746 // before the call, so it's easy to read gc_count_before just before.
747 HeapWord* do_collection_pause(size_t word_size,
748 uint gc_count_before,
749 bool* succeeded,
750 GCCause::Cause gc_cause);
751
752 // The guts of the incremental collection pause, executed by the vm
753 // thread. It returns false if it is unable to do the collection due
754 // to the GC locker being active, true otherwise
755 bool do_collection_pause_at_safepoint(double target_pause_time_ms);
756
757 // Actually do the work of evacuating the collection set.
758 void evacuate_collection_set(EvacuationInfo& evacuation_info);
759
760 // The g1 remembered set of the heap.
761 G1RemSet* _g1_rem_set;
762
763 // A set of cards that cover the objects for which the Rsets should be updated
764 // concurrently after the collection.
765 DirtyCardQueueSet _dirty_card_queue_set;
766
767 // The closure used to refine a single card.
768 RefineCardTableEntryClosure* _refine_cte_cl;
769
770 // A DirtyCardQueueSet that is used to hold cards that contain
771 // references into the current collection set. This is used to
772 // update the remembered sets of the regions in the collection
773 // set in the event of an evacuation failure.
774 DirtyCardQueueSet _into_cset_dirty_card_queue_set;
775
776 // After a collection pause, make the regions in the CS into free
777 // regions.
778 void free_collection_set(HeapRegion* cs_head, EvacuationInfo& evacuation_info);
779
1083 G1BlockOffsetSharedArray* bot_shared() const { return _bot_shared; }
1084
1085 // Reference Processing accessors
1086
1087 // The STW reference processor....
1088 ReferenceProcessor* ref_processor_stw() const { return _ref_processor_stw; }
1089
1090 // The Concurrent Marking reference processor...
1091 ReferenceProcessor* ref_processor_cm() const { return _ref_processor_cm; }
1092
1093 ConcurrentGCTimer* gc_timer_cm() const { return _gc_timer_cm; }
1094 G1OldTracer* gc_tracer_cm() const { return _gc_tracer_cm; }
1095
1096 virtual size_t capacity() const;
1097 virtual size_t used() const;
1098 // This should be called when we're not holding the heap lock. The
1099 // result might be a bit inaccurate.
1100 size_t used_unlocked() const;
1101 size_t recalculate_used() const;
1102
1103 // These virtual functions do the actual allocation.
1104 // Some heaps may offer a contiguous region for shared non-blocking
1105 // allocation, via inlined code (by exporting the address of the top and
1106 // end fields defining the extent of the contiguous allocation region.)
1107 // But G1CollectedHeap doesn't yet support this.
1108
1109 virtual bool is_maximal_no_gc() const {
1110 return _hrm.available() == 0;
1111 }
1112
1113 // The current number of regions in the heap.
1114 uint num_regions() const { return _hrm.length(); }
1115
1116 // The max number of regions in the heap.
1117 uint max_regions() const { return _hrm.max_length(); }
1118
1119 // The number of regions that are completely free.
1120 uint num_free_regions() const { return _hrm.num_free_regions(); }
1121
1122 // The number of regions that are not completely free.
1417
1418 virtual bool is_scavengable(const void* addr);
1419
1420 // We don't need barriers for initializing stores to objects
1421 // in the young gen: for the SATB pre-barrier, there is no
1422 // pre-value that needs to be remembered; for the remembered-set
1423 // update logging post-barrier, we don't maintain remembered set
1424 // information for young gen objects.
1425 virtual inline bool can_elide_initializing_store_barrier(oop new_obj);
1426
1427 // Returns "true" iff the given word_size is "very large".
1428 static bool is_humongous(size_t word_size) {
1429 // Note this has to be strictly greater-than as the TLABs
1430 // are capped at the humongous threshold and we want to
1431 // ensure that we don't try to allocate a TLAB as
1432 // humongous and that we don't allocate a humongous
1433 // object in a TLAB.
1434 return word_size > _humongous_object_threshold_in_words;
1435 }
1436
1437 // Update mod union table with the set of dirty cards.
1438 void updateModUnion();
1439
1440 // Set the mod union bits corresponding to the given memRegion. Note
1441 // that this is always a safe operation, since it doesn't clear any
1442 // bits.
1443 void markModUnionRange(MemRegion mr);
1444
1445 // Records the fact that a marking phase is no longer in progress.
1446 void set_marking_complete() {
1447 _mark_in_progress = false;
1448 }
1449 void set_marking_started() {
1450 _mark_in_progress = true;
1451 }
1452 bool mark_in_progress() {
1453 return _mark_in_progress;
1454 }
1455
1456 // Print the maximum heap capacity.
|
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #ifndef SHARE_VM_GC_IMPLEMENTATION_G1_G1COLLECTEDHEAP_HPP
26 #define SHARE_VM_GC_IMPLEMENTATION_G1_G1COLLECTEDHEAP_HPP
27
28 #include "gc_implementation/g1/g1AllocationContext.hpp"
29 #include "gc_implementation/g1/g1Allocator.hpp"
30 #include "gc_implementation/g1/concurrentMark.hpp"
31 #include "gc_implementation/g1/evacuationInfo.hpp"
32 #include "gc_implementation/g1/g1AllocRegion.hpp"
33 #include "gc_implementation/g1/g1BiasedArray.hpp"
34 #include "gc_implementation/g1/g1HRPrinter.hpp"
35 #include "gc_implementation/g1/g1InCSetState.hpp"
36 #include "gc_implementation/g1/g1MonitoringSupport.hpp"
37 #include "gc_implementation/g1/g1EvacStats.hpp"
38 #include "gc_implementation/g1/g1SATBCardTableModRefBS.hpp"
39 #include "gc_implementation/g1/g1YCTypes.hpp"
40 #include "gc_implementation/g1/heapRegionManager.hpp"
41 #include "gc_implementation/g1/heapRegionSet.hpp"
42 #include "gc_implementation/shared/hSpaceCounters.hpp"
43 #include "gc_implementation/shared/parGCAllocBuffer.hpp"
44 #include "memory/barrierSet.hpp"
45 #include "memory/memRegion.hpp"
46 #include "memory/sharedHeap.hpp"
47 #include "utilities/stack.hpp"
48
49 // A "G1CollectedHeap" is an implementation of a java heap for HotSpot.
50 // It uses the "Garbage First" heap organization and algorithm, which
51 // may combine concurrent marking with parallel, incremental compaction of
52 // heap subsets that will yield large amounts of garbage.
53
54 // Forward declarations
55 class HeapRegion;
56 class HRRSCleanupTask;
57 class GenerationSpec;
58 class OopsInHeapRegionClosure;
59 class G1ParScanThreadState;
60 class G1KlassScanClosure;
61 class ObjectClosure;
62 class SpaceClosure;
63 class CompactibleSpaceClosure;
64 class Space;
65 class G1CollectorPolicy;
66 class GenRemSet;
67 class G1RemSet;
68 class HeapRegionRemSetIterator;
69 class ConcurrentMark;
70 class ConcurrentMarkThread;
71 class ConcurrentG1Refine;
72 class ConcurrentGCTimer;
73 class GenerationCounters;
74 class STWGCTimer;
75 class G1NewTracer;
76 class G1OldTracer;
77 class EvacuationFailedInfo;
78 class nmethod;
79 class Ticks;
166 public:
167 G1STWIsAliveClosure(G1CollectedHeap* g1) : _g1(g1) {}
168 bool do_object_b(oop p);
169 };
170
171 class RefineCardTableEntryClosure;
172
173 class G1RegionMappingChangedListener : public G1MappingChangedListener {
174 private:
175 void reset_from_card_cache(uint start_idx, size_t num_regions);
176 public:
177 virtual void on_commit(uint start_idx, size_t num_regions, bool zero_filled);
178 };
179
180 class G1CollectedHeap : public SharedHeap {
181 friend class VM_CollectForMetadataAllocation;
182 friend class VM_G1CollectForAllocation;
183 friend class VM_G1CollectFull;
184 friend class VM_G1IncCollectionPause;
185 friend class VMStructs;
186
187 // Closures used in implementation.
188 friend class G1ParScanThreadState;
189 friend class G1ParTask;
190 friend class G1PrepareCompactClosure;
191
192 // Other related classes.
193 friend class HeapRegionClaimer;
194
195 // Testing classes.
196 friend class G1CheckCSetFastTableClosure;
197
198 private:
199 // The one and only G1CollectedHeap, so static functions can find it.
200 static G1CollectedHeap* _g1h;
201
202 static size_t _humongous_object_threshold_in_words;
203
204 // The secondary free list which contains regions that have been
205 // freed up during the cleanup process. This will be appended to
206 // the master free list when appropriate.
207 FreeRegionList _secondary_free_list;
208
209 // It keeps track of the old regions.
226 // only exception is the humongous set which we leave unaltered. If
227 // free_list_only is true, it will only tear down the master free
228 // list. It is called before a Full GC (free_list_only == false) or
229 // before heap shrinking (free_list_only == true).
230 void tear_down_region_sets(bool free_list_only);
231
232 // Rebuilds the region sets / lists so that they are repopulated to
233 // reflect the contents of the heap. The only exception is the
234 // humongous set which was not torn down in the first place. If
235 // free_list_only is true, it will only rebuild the master free
236 // list. It is called after a Full GC (free_list_only == false) or
237 // after heap shrinking (free_list_only == true).
238 void rebuild_region_sets(bool free_list_only);
239
240 // Callback for region mapping changed events.
241 G1RegionMappingChangedListener _listener;
242
243 // The sequence of all heap regions in the heap.
244 HeapRegionManager _hrm;
245
246 // Manages all kinds of allocations within regions. This excludes only
247 // humongous object allocations.
248 G1Allocator* _allocator;
249
250 // Outside of GC pauses, the number of bytes used in all regions other
251 // than the current allocation region(s).
252 size_t _summary_bytes_used;
253
254 // Statistics for each allocation context
255 AllocationContextStats _allocation_context_stats;
256
257 // It specifies whether we should attempt to expand the heap after a
258 // region allocation failure. If heap expansion fails we set this to
259 // false so that we don't re-attempt the heap expansion (it's likely
260 // that subsequent expansion attempts will also fail if one fails).
261 // Currently, it is only consulted during GC and it's reset at the
262 // start of each GC.
263 bool _expand_heap_after_alloc_failure;
264
265 // Helper for monitoring and management support.
266 G1MonitoringSupport* _g1mm;
267
268 // Records whether the region at the given index is kept live by roots or
269 // references from the young generation.
270 class HumongousIsLiveBiasedMappedArray : public G1BiasedMappedArray<bool> {
271 protected:
272 bool default_value() const { return false; }
273 public:
274 void clear() { G1BiasedMappedArray<bool>::clear(); }
275 void set_live(uint region) {
276 set_by_index(region, true);
277 }
278 bool is_live(uint region) {
279 return get_by_index(region);
280 }
281 };
282
283 HumongousIsLiveBiasedMappedArray _humongous_is_live;
284 // Stores whether during humongous object registration we found candidate regions.
485 // pause. This should only be used for non-humongous allocations.
486 HeapWord* attempt_allocation_slow(size_t word_size,
487 AllocationContext_t context,
488 uint* gc_count_before_ret,
489 uint* gclocker_retry_count_ret);
490
491 // Takes the Heap_lock and attempts a humongous allocation. It can
492 // potentially schedule a GC pause.
493 HeapWord* attempt_allocation_humongous(size_t word_size,
494 uint* gc_count_before_ret,
495 uint* gclocker_retry_count_ret);
496
497 // Allocation attempt that should be called during safepoints (e.g.,
498 // at the end of a successful GC). expect_null_mutator_alloc_region
499 // specifies whether the mutator alloc region is expected to be NULL
500 // or not.
501 HeapWord* attempt_allocation_at_safepoint(size_t word_size,
502 AllocationContext_t context,
503 bool expect_null_mutator_alloc_region);
504
505 public:
506 // It dirties the cards that cover the block so that so that the post
507 // write barrier never queues anything when updating objects on this
508 // block. It is assumed (and in fact we assert) that the block
509 // belongs to a young region.
510 inline void dirty_young_block(HeapWord* start, size_t word_size);
511
512 // These methods are the "callbacks" from the G1AllocRegion class.
513
514 // For mutator alloc regions.
515 HeapRegion* new_mutator_alloc_region(size_t word_size, bool force);
516 void retire_mutator_alloc_region(HeapRegion* alloc_region,
517 size_t allocated_bytes);
518
519 // For GC alloc regions.
520 HeapRegion* new_gc_alloc_region(size_t word_size, uint count,
521 InCSetState dest);
522 void retire_gc_alloc_region(HeapRegion* alloc_region,
523 size_t allocated_bytes, InCSetState dest);
524 private:
525 // - if explicit_gc is true, the GC is for a System.gc() or a heap
526 // inspection request and should collect the entire heap
527 // - if clear_all_soft_refs is true, all soft references should be
528 // cleared during the GC
529 // - if explicit_gc is false, word_size describes the allocation that
530 // the GC should attempt (at least) to satisfy
531 // - it returns false if it is unable to do the collection due to the
532 // GC locker being active, true otherwise
533 bool do_collection(bool explicit_gc,
534 bool clear_all_soft_refs,
535 size_t word_size);
536
537 // Callback from VM_G1CollectFull operation.
538 // Perform a full collection.
539 virtual void do_full_collection(bool clear_all_soft_refs);
540
541 // Resize the heap if necessary after a full collection. If this is
542 // after a collect-for allocation, "word_size" is the allocation size,
543 // and will be considered part of the used portion of the heap.
544 void resize_if_necessary_after_full_collection(size_t word_size);
545
546 // Callback from VM_G1CollectForAllocation operation.
547 // This function does everything necessary/possible to satisfy a
548 // failed allocation request (including collection, expansion, etc.)
549 HeapWord* satisfy_failed_allocation(size_t word_size,
550 AllocationContext_t context,
551 bool* succeeded);
552
553 // Attempting to expand the heap sufficiently
554 // to support an allocation of the given "word_size". If
555 // successful, perform the allocation and return the address of the
556 // allocated block, or else "NULL".
557 HeapWord* expand_and_allocate(size_t word_size, AllocationContext_t context);
558
559 // Process any reference objects discovered during
560 // an incremental evacuation pause.
561 void process_discovered_references(G1ParScanThreadState** pss, uint no_of_gc_workers);
562
563 // Enqueue any remaining discovered references
564 // after processing.
565 void enqueue_discovered_references(uint no_of_gc_workers);
566
567 public:
568
569 G1Allocator* allocator() {
570 return _allocator;
571 }
572
573 G1MonitoringSupport* g1mm() {
574 assert(_g1mm != NULL, "should have been initialized");
575 return _g1mm;
576 }
577
578 // Expand the garbage-first heap by at least the given size (in bytes!).
579 // Returns true if the heap was expanded by the requested amount;
580 // false otherwise.
581 // (Rounds up to a HeapRegion boundary.)
582 bool expand(size_t expand_bytes);
583
584 inline AllocationContextStats& allocation_context_stats();
585
586 // Do anything common to GC's.
587 virtual void gc_prologue(bool full);
588 virtual void gc_epilogue(bool full);
589
590 inline void set_humongous_is_live(oop obj);
591
592 bool humongous_is_live(uint region) {
593 return _humongous_is_live.is_live(region);
594 }
595
596 // Returns whether the given region (which must be a humongous (start) region)
597 // is to be considered conservatively live regardless of any other conditions.
598 bool humongous_region_is_always_live(uint index);
599 // Returns whether the given region (which must be a humongous (start) region)
600 // is considered a candidate for eager reclamation.
601 bool humongous_region_is_candidate(uint index);
602 // Register the given region to be part of the collection set.
603 inline void register_humongous_region_with_in_cset_fast_test(uint index);
637 // Full GC). If concurrent is true, the caller is the outer caller
638 // in this nesting (i.e., the concurrent cycle). Further nesting is
639 // not currently supported. The end of this call also notifies
640 // the FullGCCount_lock in case a Java thread is waiting for a full
641 // GC to happen (e.g., it called System.gc() with
642 // +ExplicitGCInvokesConcurrent).
643 void increment_old_marking_cycles_completed(bool concurrent);
644
645 uint old_marking_cycles_completed() {
646 return _old_marking_cycles_completed;
647 }
648
649 void register_concurrent_cycle_start(const Ticks& start_time);
650 void register_concurrent_cycle_end();
651 void trace_heap_after_concurrent_cycle();
652
653 G1YCType yc_type();
654
655 G1HRPrinter* hr_printer() { return &_hr_printer; }
656
657 // Allocates a new heap region instance.
658 virtual HeapRegion* new_heap_region(uint hrs_index, MemRegion mr);
659
660 // Frees a non-humongous region by initializing its contents and
661 // adding it to the free list that's passed as a parameter (this is
662 // usually a local list which will be appended to the master free
663 // list later). The used bytes of freed regions are accumulated in
664 // pre_used. If par is true, the region's RSet will not be freed
665 // up. The assumption is that this will be done later.
666 // The locked parameter indicates if the caller has already taken
667 // care of proper synchronization. This may allow some optimizations.
668 void free_region(HeapRegion* hr,
669 FreeRegionList* free_list,
670 bool par,
671 bool locked = false);
672
673 // Frees a humongous region by collapsing it into individual regions
674 // and calling free_region() for each of them. The freed regions
675 // will be added to the free list that's passed as a parameter (this
676 // is usually a local list which will be appended to the master free
677 // list later). The used bytes of freed regions are accumulated in
678 // pre_used. If par is true, the region's RSet will not be freed
679 // up. The assumption is that this will be done later.
699 // evacuation pause) or not (another thread beat us to it or the GC
700 // locker was active). Given that we should not be holding the
701 // Heap_lock when we enter this method, we will pass the
702 // gc_count_before (i.e., total_collections()) as a parameter since
703 // it has to be read while holding the Heap_lock. Currently, both
704 // methods that call do_collection_pause() release the Heap_lock
705 // before the call, so it's easy to read gc_count_before just before.
706 HeapWord* do_collection_pause(size_t word_size,
707 uint gc_count_before,
708 bool* succeeded,
709 GCCause::Cause gc_cause);
710
711 // The guts of the incremental collection pause, executed by the vm
712 // thread. It returns false if it is unable to do the collection due
713 // to the GC locker being active, true otherwise
714 bool do_collection_pause_at_safepoint(double target_pause_time_ms);
715
716 // Actually do the work of evacuating the collection set.
717 void evacuate_collection_set(EvacuationInfo& evacuation_info);
718
719 // Print the header for the per-thread termination statistics.
720 static void print_termination_stats_hdr(outputStream* const st);
721 // Print actual per-thread termination statistics.
722 void print_termination_stats(outputStream* const st,
723 uint worker_id,
724 double elapsed_ms,
725 double strong_roots_ms,
726 double term_ms,
727 size_t term_attempts,
728 size_t alloc_buffer_waste,
729 size_t undo_waste) const;
730 // Update object copying statistics.
731 void record_obj_copy_mem_stats();
732 void record_obj_copy_mem_stats(InCSetState which);
733
734 // The g1 remembered set of the heap.
735 G1RemSet* _g1_rem_set;
736
737 // A set of cards that cover the objects for which the Rsets should be updated
738 // concurrently after the collection.
739 DirtyCardQueueSet _dirty_card_queue_set;
740
741 // The closure used to refine a single card.
742 RefineCardTableEntryClosure* _refine_cte_cl;
743
744 // A DirtyCardQueueSet that is used to hold cards that contain
745 // references into the current collection set. This is used to
746 // update the remembered sets of the regions in the collection
747 // set in the event of an evacuation failure.
748 DirtyCardQueueSet _into_cset_dirty_card_queue_set;
749
750 // After a collection pause, make the regions in the CS into free
751 // regions.
752 void free_collection_set(HeapRegion* cs_head, EvacuationInfo& evacuation_info);
753
1057 G1BlockOffsetSharedArray* bot_shared() const { return _bot_shared; }
1058
1059 // Reference Processing accessors
1060
1061 // The STW reference processor....
1062 ReferenceProcessor* ref_processor_stw() const { return _ref_processor_stw; }
1063
1064 // The Concurrent Marking reference processor...
1065 ReferenceProcessor* ref_processor_cm() const { return _ref_processor_cm; }
1066
1067 ConcurrentGCTimer* gc_timer_cm() const { return _gc_timer_cm; }
1068 G1OldTracer* gc_tracer_cm() const { return _gc_tracer_cm; }
1069
1070 virtual size_t capacity() const;
1071 virtual size_t used() const;
1072 // This should be called when we're not holding the heap lock. The
1073 // result might be a bit inaccurate.
1074 size_t used_unlocked() const;
1075 size_t recalculate_used() const;
1076
1077 void increase_used(size_t bytes) { _summary_bytes_used += bytes; }
1078 void set_used(size_t bytes) { _summary_bytes_used = bytes; }
1079
1080 void decrease_used(size_t bytes) {
1081 assert(_summary_bytes_used >= bytes,
1082 err_msg("invariant: _summary_bytes_used: "SIZE_FORMAT" should be >= bytes: "SIZE_FORMAT,
1083 _summary_bytes_used, bytes));
1084 _summary_bytes_used -= bytes;
1085 }
1086
1087 // These virtual functions do the actual allocation.
1088 // Some heaps may offer a contiguous region for shared non-blocking
1089 // allocation, via inlined code (by exporting the address of the top and
1090 // end fields defining the extent of the contiguous allocation region.)
1091 // But G1CollectedHeap doesn't yet support this.
1092
1093 virtual bool is_maximal_no_gc() const {
1094 return _hrm.available() == 0;
1095 }
1096
1097 // The current number of regions in the heap.
1098 uint num_regions() const { return _hrm.length(); }
1099
1100 // The max number of regions in the heap.
1101 uint max_regions() const { return _hrm.max_length(); }
1102
1103 // The number of regions that are completely free.
1104 uint num_free_regions() const { return _hrm.num_free_regions(); }
1105
1106 // The number of regions that are not completely free.
1401
1402 virtual bool is_scavengable(const void* addr);
1403
1404 // We don't need barriers for initializing stores to objects
1405 // in the young gen: for the SATB pre-barrier, there is no
1406 // pre-value that needs to be remembered; for the remembered-set
1407 // update logging post-barrier, we don't maintain remembered set
1408 // information for young gen objects.
1409 virtual inline bool can_elide_initializing_store_barrier(oop new_obj);
1410
1411 // Returns "true" iff the given word_size is "very large".
1412 static bool is_humongous(size_t word_size) {
1413 // Note this has to be strictly greater-than as the TLABs
1414 // are capped at the humongous threshold and we want to
1415 // ensure that we don't try to allocate a TLAB as
1416 // humongous and that we don't allocate a humongous
1417 // object in a TLAB.
1418 return word_size > _humongous_object_threshold_in_words;
1419 }
1420
1421 static size_t humongous_object_threshold_in_words() { return _humongous_object_threshold_in_words; }
1422
1423 // Update mod union table with the set of dirty cards.
1424 void updateModUnion();
1425
1426 // Set the mod union bits corresponding to the given memRegion. Note
1427 // that this is always a safe operation, since it doesn't clear any
1428 // bits.
1429 void markModUnionRange(MemRegion mr);
1430
1431 // Records the fact that a marking phase is no longer in progress.
1432 void set_marking_complete() {
1433 _mark_in_progress = false;
1434 }
1435 void set_marking_started() {
1436 _mark_in_progress = true;
1437 }
1438 bool mark_in_progress() {
1439 return _mark_in_progress;
1440 }
1441
1442 // Print the maximum heap capacity.
|