349 heap_locking_asserts_params("should not be holding the Heap_lock and " \
350 "should not be at a safepoint")); \
351 } while (0)
352
353 #define assert_at_safepoint_on_vm_thread() \
354 do { \
355 assert_at_safepoint(); \
356 assert(Thread::current_or_null() != NULL, "no current thread"); \
357 assert(Thread::current()->is_VM_thread(), "current thread is not VM thread"); \
358 } while (0)
359
360 // The young region list.
361 G1EdenRegions _eden;
362 G1SurvivorRegions _survivor;
363
364 STWGCTimer* _gc_timer_stw;
365
366 G1NewTracer* _gc_tracer_stw;
367
368 // The current policy object for the collector.
369 G1Policy* _g1_policy;
370 G1HeapSizingPolicy* _heap_sizing_policy;
371
372 G1CollectionSet _collection_set;
373
374 // Try to allocate a single non-humongous HeapRegion sufficient for
375 // an allocation of the given word_size. If do_expand is true,
376 // attempt to expand the heap if necessary to satisfy the allocation
377 // request. 'type' takes the type of region to be allocated. (Use constants
378 // Old, Eden, Humongous, Survivor defined in HeapRegionType.)
379 HeapRegion* new_region(size_t word_size, HeapRegionType type, bool do_expand);
380
381 // Initialize a contiguous set of free regions of length num_regions
382 // and starting at index first so that they appear as a single
383 // humongous region.
384 HeapWord* humongous_obj_allocate_initialize_regions(uint first,
385 uint num_regions,
386 size_t word_size);
387
388 // Attempt to allocate a humongous object of the given size. Return
389 // NULL if unsuccessful.
728 // The guts of the incremental collection pause, executed by the vm
729 // thread. It returns false if it is unable to do the collection due
730 // to the GC locker being active, true otherwise
731 bool do_collection_pause_at_safepoint(double target_pause_time_ms);
732
733 // Actually do the work of evacuating the collection set.
734 void evacuate_collection_set(G1ParScanThreadStateSet* per_thread_states);
735 void evacuate_optional_collection_set(G1ParScanThreadStateSet* per_thread_states);
736 void evacuate_optional_regions(G1ParScanThreadStateSet* per_thread_states, G1OptionalCSet* ocset);
737
738 void pre_evacuate_collection_set();
739 void post_evacuate_collection_set(G1EvacuationInfo& evacuation_info, G1ParScanThreadStateSet* pss);
740
741 // Update object copying statistics.
742 void record_obj_copy_mem_stats();
743
744 // The hot card cache for remembered set insertion optimization.
745 G1HotCardCache* _hot_card_cache;
746
747 // The g1 remembered set of the heap.
748 G1RemSet* _g1_rem_set;
749
750 // A set of cards that cover the objects for which the Rsets should be updated
751 // concurrently after the collection.
752 G1DirtyCardQueueSet _dirty_card_queue_set;
753
754 // After a collection pause, convert the regions in the collection set into free
755 // regions.
756 void free_collection_set(G1CollectionSet* collection_set, G1EvacuationInfo& evacuation_info, const size_t* surviving_young_words);
757
758 // Abandon the current collection set without recording policy
759 // statistics or updating free lists.
760 void abandon_collection_set(G1CollectionSet* collection_set);
761
762 // The concurrent marker (and the thread it runs in.)
763 G1ConcurrentMark* _cm;
764 G1ConcurrentMarkThread* _cm_thread;
765
766 // The concurrent refiner.
767 G1ConcurrentRefine* _cr;
768
933 static size_t conservative_max_heap_alignment();
934
935 // Does operations required after initialization has been done.
936 void post_initialize();
937
938 // Initialize weak reference processing.
939 void ref_processing_init();
940
941 virtual Name kind() const {
942 return CollectedHeap::G1;
943 }
944
945 virtual const char* name() const {
946 return "G1";
947 }
948
949 const G1CollectorState* collector_state() const { return &_collector_state; }
950 G1CollectorState* collector_state() { return &_collector_state; }
951
952 // The current policy object for the collector.
953 G1Policy* g1_policy() const { return _g1_policy; }
954
955 HeapRegionManager* hrm() const { return _hrm; }
956
957 const G1CollectionSet* collection_set() const { return &_collection_set; }
958 G1CollectionSet* collection_set() { return &_collection_set; }
959
960 virtual CollectorPolicy* collector_policy() const;
961 virtual G1CollectorPolicy* g1_collector_policy() const;
962
963 virtual SoftRefPolicy* soft_ref_policy();
964
965 virtual void initialize_serviceability();
966 virtual MemoryUsage memory_usage();
967 virtual GrowableArray<GCMemoryManager*> memory_managers();
968 virtual GrowableArray<MemoryPool*> memory_pools();
969
970 // The rem set and barrier set.
971 G1RemSet* g1_rem_set() const { return _g1_rem_set; }
972
973 // Try to minimize the remembered set.
974 void scrub_rem_set();
975
976 // Apply the given closure on all cards in the Hot Card Cache, emptying it.
977 void iterate_hcc_closure(G1CardTableEntryClosure* cl, uint worker_i);
978
979 // Apply the given closure on all cards in the Dirty Card Queue Set, emptying it.
980 void iterate_dirty_card_closure(G1CardTableEntryClosure* cl, uint worker_i);
981
982 // The shared block offset table array.
983 G1BlockOffsetTable* bot() const { return _bot; }
984
985 // Reference Processing accessors
986
987 // The STW reference processor....
988 ReferenceProcessor* ref_processor_stw() const { return _ref_processor_stw; }
989
990 G1NewTracer* gc_tracer_stw() const { return _gc_tracer_stw; }
991
992 // The Concurrent Marking reference processor...
1349 // Perform verification.
1350
1351 // vo == UsePrevMarking -> use "prev" marking information,
1352 // vo == UseNextMarking -> use "next" marking information
1353 // vo == UseFullMarking -> use "next" marking bitmap but no TAMS
1354 //
1355 // NOTE: Only the "prev" marking information is guaranteed to be
1356 // consistent most of the time, so most calls to this should use
1357 // vo == UsePrevMarking.
1358 // Currently, there is only one case where this is called with
1359 // vo == UseNextMarking, which is to verify the "next" marking
1360 // information at the end of remark.
1361 // Currently there is only one place where this is called with
1362 // vo == UseFullMarking, which is to verify the marking during a
1363 // full GC.
1364 void verify(VerifyOption vo);
1365
1366 // WhiteBox testing support.
1367 virtual bool supports_concurrent_phase_control() const;
1368 virtual bool request_concurrent_phase(const char* phase);
1369
1370 virtual WorkGang* get_safepoint_workers() { return _workers; }
1371
1372 // The methods below are here for convenience and dispatch the
1373 // appropriate method depending on value of the given VerifyOption
1374 // parameter. The values for that parameter, and their meanings,
1375 // are the same as those above.
1376
1377 bool is_obj_dead_cond(const oop obj,
1378 const HeapRegion* hr,
1379 const VerifyOption vo) const;
1380
1381 bool is_obj_dead_cond(const oop obj,
1382 const VerifyOption vo) const;
1383
1384 G1HeapSummary create_g1_heap_summary();
1385 G1EvacSummary create_g1_evac_summary(G1EvacStats* stats);
1386
1387 // Printing
1388 private:
|
349 heap_locking_asserts_params("should not be holding the Heap_lock and " \
350 "should not be at a safepoint")); \
351 } while (0)
352
353 #define assert_at_safepoint_on_vm_thread() \
354 do { \
355 assert_at_safepoint(); \
356 assert(Thread::current_or_null() != NULL, "no current thread"); \
357 assert(Thread::current()->is_VM_thread(), "current thread is not VM thread"); \
358 } while (0)
359
360 // The young region list.
361 G1EdenRegions _eden;
362 G1SurvivorRegions _survivor;
363
364 STWGCTimer* _gc_timer_stw;
365
366 G1NewTracer* _gc_tracer_stw;
367
368 // The current policy object for the collector.
369 G1Policy* _policy;
370 G1HeapSizingPolicy* _heap_sizing_policy;
371
372 G1CollectionSet _collection_set;
373
374 // Try to allocate a single non-humongous HeapRegion sufficient for
375 // an allocation of the given word_size. If do_expand is true,
376 // attempt to expand the heap if necessary to satisfy the allocation
377 // request. 'type' takes the type of region to be allocated. (Use constants
378 // Old, Eden, Humongous, Survivor defined in HeapRegionType.)
379 HeapRegion* new_region(size_t word_size, HeapRegionType type, bool do_expand);
380
381 // Initialize a contiguous set of free regions of length num_regions
382 // and starting at index first so that they appear as a single
383 // humongous region.
384 HeapWord* humongous_obj_allocate_initialize_regions(uint first,
385 uint num_regions,
386 size_t word_size);
387
388 // Attempt to allocate a humongous object of the given size. Return
389 // NULL if unsuccessful.
728 // The guts of the incremental collection pause, executed by the vm
729 // thread. It returns false if it is unable to do the collection due
730 // to the GC locker being active, true otherwise
731 bool do_collection_pause_at_safepoint(double target_pause_time_ms);
732
733 // Actually do the work of evacuating the collection set.
734 void evacuate_collection_set(G1ParScanThreadStateSet* per_thread_states);
735 void evacuate_optional_collection_set(G1ParScanThreadStateSet* per_thread_states);
736 void evacuate_optional_regions(G1ParScanThreadStateSet* per_thread_states, G1OptionalCSet* ocset);
737
738 void pre_evacuate_collection_set();
739 void post_evacuate_collection_set(G1EvacuationInfo& evacuation_info, G1ParScanThreadStateSet* pss);
740
741 // Update object copying statistics.
742 void record_obj_copy_mem_stats();
743
744 // The hot card cache for remembered set insertion optimization.
745 G1HotCardCache* _hot_card_cache;
746
747 // The g1 remembered set of the heap.
748 G1RemSet* _rem_set;
749
750 // A set of cards that cover the objects for which the Rsets should be updated
751 // concurrently after the collection.
752 G1DirtyCardQueueSet _dirty_card_queue_set;
753
754 // After a collection pause, convert the regions in the collection set into free
755 // regions.
756 void free_collection_set(G1CollectionSet* collection_set, G1EvacuationInfo& evacuation_info, const size_t* surviving_young_words);
757
758 // Abandon the current collection set without recording policy
759 // statistics or updating free lists.
760 void abandon_collection_set(G1CollectionSet* collection_set);
761
762 // The concurrent marker (and the thread it runs in.)
763 G1ConcurrentMark* _cm;
764 G1ConcurrentMarkThread* _cm_thread;
765
766 // The concurrent refiner.
767 G1ConcurrentRefine* _cr;
768
933 static size_t conservative_max_heap_alignment();
934
935 // Does operations required after initialization has been done.
936 void post_initialize();
937
938 // Initialize weak reference processing.
939 void ref_processing_init();
940
941 virtual Name kind() const {
942 return CollectedHeap::G1;
943 }
944
945 virtual const char* name() const {
946 return "G1";
947 }
948
949 const G1CollectorState* collector_state() const { return &_collector_state; }
950 G1CollectorState* collector_state() { return &_collector_state; }
951
952 // The current policy object for the collector.
953 G1Policy* policy() const { return _policy; }
954 // The remembered set.
955 G1RemSet* rem_set() const { return _rem_set; }
956
957 HeapRegionManager* hrm() const { return _hrm; }
958
959 const G1CollectionSet* collection_set() const { return &_collection_set; }
960 G1CollectionSet* collection_set() { return &_collection_set; }
961
962 virtual CollectorPolicy* collector_policy() const;
963
964 virtual SoftRefPolicy* soft_ref_policy();
965
966 virtual void initialize_serviceability();
967 virtual MemoryUsage memory_usage();
968 virtual GrowableArray<GCMemoryManager*> memory_managers();
969 virtual GrowableArray<MemoryPool*> memory_pools();
970
971 // Try to minimize the remembered set.
972 void scrub_rem_set();
973
974 // Apply the given closure on all cards in the Hot Card Cache, emptying it.
975 void iterate_hcc_closure(G1CardTableEntryClosure* cl, uint worker_i);
976
977 // Apply the given closure on all cards in the Dirty Card Queue Set, emptying it.
978 void iterate_dirty_card_closure(G1CardTableEntryClosure* cl, uint worker_i);
979
980 // The shared block offset table array.
981 G1BlockOffsetTable* bot() const { return _bot; }
982
983 // Reference Processing accessors
984
985 // The STW reference processor....
986 ReferenceProcessor* ref_processor_stw() const { return _ref_processor_stw; }
987
988 G1NewTracer* gc_tracer_stw() const { return _gc_tracer_stw; }
989
990 // The Concurrent Marking reference processor...
1347 // Perform verification.
1348
1349 // vo == UsePrevMarking -> use "prev" marking information,
1350 // vo == UseNextMarking -> use "next" marking information
1351 // vo == UseFullMarking -> use "next" marking bitmap but no TAMS
1352 //
1353 // NOTE: Only the "prev" marking information is guaranteed to be
1354 // consistent most of the time, so most calls to this should use
1355 // vo == UsePrevMarking.
1356 // Currently, there is only one case where this is called with
1357 // vo == UseNextMarking, which is to verify the "next" marking
1358 // information at the end of remark.
1359 // Currently there is only one place where this is called with
1360 // vo == UseFullMarking, which is to verify the marking during a
1361 // full GC.
1362 void verify(VerifyOption vo);
1363
1364 // WhiteBox testing support.
1365 virtual bool supports_concurrent_phase_control() const;
1366 virtual bool request_concurrent_phase(const char* phase);
1367 bool is_heap_heterogeneous() const;
1368
1369 virtual WorkGang* get_safepoint_workers() { return _workers; }
1370
1371 // The methods below are here for convenience and dispatch the
1372 // appropriate method depending on value of the given VerifyOption
1373 // parameter. The values for that parameter, and their meanings,
1374 // are the same as those above.
1375
1376 bool is_obj_dead_cond(const oop obj,
1377 const HeapRegion* hr,
1378 const VerifyOption vo) const;
1379
1380 bool is_obj_dead_cond(const oop obj,
1381 const VerifyOption vo) const;
1382
1383 G1HeapSummary create_g1_heap_summary();
1384 G1EvacSummary create_g1_evac_summary(G1EvacStats* stats);
1385
1386 // Printing
1387 private:
|