68 class Space;
69 class G1CollectionSet;
70 class G1CollectorPolicy;
71 class G1RemSet;
72 class HeapRegionRemSetIterator;
73 class G1ConcurrentMark;
74 class ConcurrentMarkThread;
75 class ConcurrentG1Refine;
76 class ConcurrentGCTimer;
77 class GenerationCounters;
78 class STWGCTimer;
79 class G1NewTracer;
80 class G1OldTracer;
81 class EvacuationFailedInfo;
82 class nmethod;
83 class Ticks;
84 class WorkGang;
85 class G1Allocator;
86 class G1ArchiveAllocator;
87 class G1HeapVerifier;
88
89 typedef OverflowTaskQueue<StarTask, mtGC> RefToScanQueue;
90 typedef GenericTaskQueueSet<RefToScanQueue, mtGC> RefToScanQueueSet;
91
92 typedef int RegionIdx_t; // needs to hold [ 0..max_regions() )
93 typedef int CardIdx_t; // needs to hold [ 0..CardsPerRegion )
94
95 // The G1 STW is alive closure.
96 // An instance is embedded into the G1CH and used as the
97 // (optional) _is_alive_non_header closure in the STW
98 // reference processor. It is also extensively used during
99 // reference processing during STW evacuation pauses.
100 class G1STWIsAliveClosure: public BoolObjectClosure {
101 G1CollectedHeap* _g1;
102 public:
103 G1STWIsAliveClosure(G1CollectedHeap* g1) : _g1(g1) {}
104 bool do_object_b(oop p);
105 };
106
107 class RefineCardTableEntryClosure;
347 #define assert_at_safepoint(_should_be_vm_thread_) \
348 do { \
349 assert(SafepointSynchronize::is_at_safepoint() && \
350 ((_should_be_vm_thread_) == Thread::current()->is_VM_thread()), \
351 heap_locking_asserts_params("should be at a safepoint")); \
352 } while (0)
353
354 #define assert_not_at_safepoint() \
355 do { \
356 assert(!SafepointSynchronize::is_at_safepoint(), \
357 heap_locking_asserts_params("should not be at a safepoint")); \
358 } while (0)
359
360 protected:
361
362 // The young region list.
363 YoungList* _young_list;
364
365 // The current policy object for the collector.
366 G1CollectorPolicy* _g1_policy;
367
368 G1CollectionSet _collection_set;
369
370 // This is the second level of trying to allocate a new region. If
371 // new_region() didn't find a region on the free_list, this call will
372 // check whether there's anything available on the
373 // secondary_free_list and/or wait for more regions to appear on
374 // that list, if _free_regions_coming is set.
375 HeapRegion* new_region_try_secondary_free_list(bool is_old);
376
377 // Try to allocate a single non-humongous HeapRegion sufficient for
378 // an allocation of the given word_size. If do_expand is true,
379 // attempt to expand the heap if necessary to satisfy the allocation
380 // request. If the region is to be used as an old region or for a
381 // humongous object, set is_old to true. If not, to false.
382 HeapRegion* new_region(size_t word_size, bool is_old, bool do_expand);
383
384 // Initialize a contiguous set of free regions of length num_regions
385 // and starting at index first so that they appear as a single
386 // humongous region.
|
68 class Space;
69 class G1CollectionSet;
70 class G1CollectorPolicy;
71 class G1RemSet;
72 class HeapRegionRemSetIterator;
73 class G1ConcurrentMark;
74 class ConcurrentMarkThread;
75 class ConcurrentG1Refine;
76 class ConcurrentGCTimer;
77 class GenerationCounters;
78 class STWGCTimer;
79 class G1NewTracer;
80 class G1OldTracer;
81 class EvacuationFailedInfo;
82 class nmethod;
83 class Ticks;
84 class WorkGang;
85 class G1Allocator;
86 class G1ArchiveAllocator;
87 class G1HeapVerifier;
88 class G1HeapSizingPolicy;
89
90 typedef OverflowTaskQueue<StarTask, mtGC> RefToScanQueue;
91 typedef GenericTaskQueueSet<RefToScanQueue, mtGC> RefToScanQueueSet;
92
93 typedef int RegionIdx_t; // needs to hold [ 0..max_regions() )
94 typedef int CardIdx_t; // needs to hold [ 0..CardsPerRegion )
95
96 // The G1 STW is alive closure.
97 // An instance is embedded into the G1CH and used as the
98 // (optional) _is_alive_non_header closure in the STW
99 // reference processor. It is also extensively used during
100 // reference processing during STW evacuation pauses.
101 class G1STWIsAliveClosure: public BoolObjectClosure {
102 G1CollectedHeap* _g1;
103 public:
104 G1STWIsAliveClosure(G1CollectedHeap* g1) : _g1(g1) {}
105 bool do_object_b(oop p);
106 };
107
108 class RefineCardTableEntryClosure;
348 #define assert_at_safepoint(_should_be_vm_thread_) \
349 do { \
350 assert(SafepointSynchronize::is_at_safepoint() && \
351 ((_should_be_vm_thread_) == Thread::current()->is_VM_thread()), \
352 heap_locking_asserts_params("should be at a safepoint")); \
353 } while (0)
354
355 #define assert_not_at_safepoint() \
356 do { \
357 assert(!SafepointSynchronize::is_at_safepoint(), \
358 heap_locking_asserts_params("should not be at a safepoint")); \
359 } while (0)
360
361 protected:
362
363 // The young region list.
364 YoungList* _young_list;
365
366 // The current policy object for the collector.
367 G1CollectorPolicy* _g1_policy;
368 G1HeapSizingPolicy* _heap_sizing_policy;
369
370 G1CollectionSet _collection_set;
371
372 // This is the second level of trying to allocate a new region. If
373 // new_region() didn't find a region on the free_list, this call will
374 // check whether there's anything available on the
375 // secondary_free_list and/or wait for more regions to appear on
376 // that list, if _free_regions_coming is set.
377 HeapRegion* new_region_try_secondary_free_list(bool is_old);
378
379 // Try to allocate a single non-humongous HeapRegion sufficient for
380 // an allocation of the given word_size. If do_expand is true,
381 // attempt to expand the heap if necessary to satisfy the allocation
382 // request. If the region is to be used as an old region or for a
383 // humongous object, set is_old to true. If not, to false.
384 HeapRegion* new_region(size_t word_size, bool is_old, bool do_expand);
385
386 // Initialize a contiguous set of free regions of length num_regions
387 // and starting at index first so that they appear as a single
388 // humongous region.
|