549
550 WorkGang* workers() const { return _workers; }
551
552 // Runs the given AbstractGangTask with the current active workers, returning the
553 // total time taken.
554 Tickspan run_task(AbstractGangTask* task);
555
556 G1Allocator* allocator() {
557 return _allocator;
558 }
559
560 G1HeapVerifier* verifier() {
561 return _verifier;
562 }
563
564 G1MonitoringSupport* g1mm() {
565 assert(_g1mm != NULL, "should have been initialized");
566 return _g1mm;
567 }
568
569 void resize_heap_if_necessary();
570
571 G1NUMA* numa() const { return _numa; }
572
573 // Expand the garbage-first heap by at least the given size (in bytes!).
574 // Returns true if the heap was expanded by the requested amount;
575 // false otherwise.
576 // (Rounds up to a HeapRegion boundary.)
577 bool expand(size_t expand_bytes, WorkGang* pretouch_workers = NULL, double* expand_time_ms = NULL);
578 bool expand_single_region(uint node_index);
579
580 // Returns the PLAB statistics for a given destination.
581 inline G1EvacStats* alloc_buffer_stats(G1HeapRegionAttr dest);
582
583 // Determines PLAB size for a given destination.
584 inline size_t desired_plab_sz(G1HeapRegionAttr dest);
585
586 // Do anything common to GC's.
587 void gc_prologue(bool full);
588 void gc_epilogue(bool full);
589
772
773 G1HeapVerifier::G1VerifyType young_collection_verify_type() const;
774 void verify_before_young_collection(G1HeapVerifier::G1VerifyType type);
775 void verify_after_young_collection(G1HeapVerifier::G1VerifyType type);
776
777 void calculate_collection_set(G1EvacuationInfo& evacuation_info, double target_pause_time_ms);
778
779 // Actually do the work of evacuating the parts of the collection set.
780 void evacuate_initial_collection_set(G1ParScanThreadStateSet* per_thread_states);
781 void evacuate_optional_collection_set(G1ParScanThreadStateSet* per_thread_states);
782 private:
783 // Evacuate the next set of optional regions.
784 void evacuate_next_optional_regions(G1ParScanThreadStateSet* per_thread_states);
785
786 public:
787 void pre_evacuate_collection_set(G1EvacuationInfo& evacuation_info, G1ParScanThreadStateSet* pss);
788 void post_evacuate_collection_set(G1EvacuationInfo& evacuation_info,
789 G1RedirtyCardsQueueSet* rdcqs,
790 G1ParScanThreadStateSet* pss);
791
792 void expand_heap_after_young_collection();
793 // Update object copying statistics.
794 void record_obj_copy_mem_stats();
795
796 // The hot card cache for remembered set insertion optimization.
797 G1HotCardCache* _hot_card_cache;
798
799 // The g1 remembered set of the heap.
800 G1RemSet* _rem_set;
801
802 // After a collection pause, convert the regions in the collection set into free
803 // regions.
804 void free_collection_set(G1CollectionSet* collection_set, G1EvacuationInfo& evacuation_info, const size_t* surviving_young_words);
805
806 // Abandon the current collection set without recording policy
807 // statistics or updating free lists.
808 void abandon_collection_set(G1CollectionSet* collection_set);
809
810 // The concurrent marker (and the thread it runs in.)
811 G1ConcurrentMark* _cm;
812 G1ConcurrentMarkThread* _cm_thread;
1069 uint num_free_or_available_regions() const { return num_free_regions() + _hrm->available(); }
1070
1071 MemoryUsage get_auxiliary_data_memory_usage() const {
1072 return _hrm->get_auxiliary_data_memory_usage();
1073 }
1074
1075 // The number of regions that are not completely free.
1076 uint num_used_regions() const { return num_regions() - num_free_regions(); }
1077
1078 #ifdef ASSERT
1079 bool is_on_master_free_list(HeapRegion* hr) {
1080 return _hrm->is_free(hr);
1081 }
1082 #endif // ASSERT
1083
1084 inline void old_set_add(HeapRegion* hr);
1085 inline void old_set_remove(HeapRegion* hr);
1086
1087 inline void archive_set_add(HeapRegion* hr);
1088
1089 size_t non_young_capacity_bytes() {
1090 return (old_regions_count() + _archive_set.length() + humongous_regions_count()) * HeapRegion::GrainBytes;
1091 }
1092
1093 // Determine whether the given region is one that we are using as an
1094 // old GC alloc region.
1095 bool is_old_gc_alloc_region(HeapRegion* hr);
1096
1097 // Perform a collection of the heap; intended for use in implementing
1098 // "System.gc". This probably implies as full a collection as the
1099 // "CollectedHeap" supports.
1100 virtual void collect(GCCause::Cause cause);
1101
1102 // Perform a collection of the heap with the given cause.
1103 // Returns whether this collection actually executed.
1104 bool try_collect(GCCause::Cause cause);
1105
1106 // True iff an evacuation has failed in the most-recent collection.
1107 bool evacuation_failed() { return _evacuation_failed; }
1108
1109 void remove_from_old_sets(const uint old_regions_removed, const uint humongous_regions_removed);
1267 static bool is_humongous(size_t word_size) {
1268 // Note this has to be strictly greater-than as the TLABs
1269 // are capped at the humongous threshold and we want to
1270 // ensure that we don't try to allocate a TLAB as
1271 // humongous and that we don't allocate a humongous
1272 // object in a TLAB.
1273 return word_size > _humongous_object_threshold_in_words;
1274 }
1275
1276 // Returns the humongous threshold for a specific region size
1277 static size_t humongous_threshold_for(size_t region_size) {
1278 return (region_size / 2);
1279 }
1280
1281 // Returns the number of regions the humongous object of the given word size
1282 // requires.
1283 static size_t humongous_obj_size_in_regions(size_t word_size);
1284
1285 // Print the maximum heap capacity.
1286 virtual size_t max_capacity() const;
1287
1288 // Return the size of reserved memory. Returns different value than max_capacity() when AllocateOldGenAt is used.
1289 virtual size_t max_reserved_capacity() const;
1290
1291 virtual jlong millis_since_last_gc();
1292
1293
1294 // Convenience function to be used in situations where the heap type can be
1295 // asserted to be this type.
1296 static G1CollectedHeap* heap();
1297
1298 void set_region_short_lived_locked(HeapRegion* hr);
1299 // add appropriate methods for any other surv rate groups
1300
1301 const G1SurvivorRegions* survivor() const { return &_survivor; }
1302
1303 uint eden_regions_count() const { return _eden.length(); }
1304 uint eden_regions_count(uint node_index) const { return _eden.regions_on_node(node_index); }
1305 uint survivor_regions_count() const { return _survivor.length(); }
1306 uint survivor_regions_count(uint node_index) const { return _survivor.regions_on_node(node_index); }
1307 size_t eden_regions_used_bytes() const { return _eden.used_bytes(); }
1308 size_t survivor_regions_used_bytes() const { return _survivor.used_bytes(); }
1309 uint young_regions_count() const { return _eden.length() + _survivor.length(); }
|
549
550 WorkGang* workers() const { return _workers; }
551
552 // Runs the given AbstractGangTask with the current active workers, returning the
553 // total time taken.
554 Tickspan run_task(AbstractGangTask* task);
555
556 G1Allocator* allocator() {
557 return _allocator;
558 }
559
560 G1HeapVerifier* verifier() {
561 return _verifier;
562 }
563
564 G1MonitoringSupport* g1mm() {
565 assert(_g1mm != NULL, "should have been initialized");
566 return _g1mm;
567 }
568
569 void resize_heap_after_full_collection();
570
571 G1NUMA* numa() const { return _numa; }
572
573 // Expand the garbage-first heap by at least the given size (in bytes!).
574 // Returns true if the heap was expanded by the requested amount;
575 // false otherwise.
576 // (Rounds up to a HeapRegion boundary.)
577 bool expand(size_t expand_bytes, WorkGang* pretouch_workers = NULL, double* expand_time_ms = NULL);
578 bool expand_single_region(uint node_index);
579
580 // Returns the PLAB statistics for a given destination.
581 inline G1EvacStats* alloc_buffer_stats(G1HeapRegionAttr dest);
582
583 // Determines PLAB size for a given destination.
584 inline size_t desired_plab_sz(G1HeapRegionAttr dest);
585
586 // Do anything common to GC's.
587 void gc_prologue(bool full);
588 void gc_epilogue(bool full);
589
772
773 G1HeapVerifier::G1VerifyType young_collection_verify_type() const;
774 void verify_before_young_collection(G1HeapVerifier::G1VerifyType type);
775 void verify_after_young_collection(G1HeapVerifier::G1VerifyType type);
776
777 void calculate_collection_set(G1EvacuationInfo& evacuation_info, double target_pause_time_ms);
778
779 // Actually do the work of evacuating the parts of the collection set.
780 void evacuate_initial_collection_set(G1ParScanThreadStateSet* per_thread_states);
781 void evacuate_optional_collection_set(G1ParScanThreadStateSet* per_thread_states);
782 private:
783 // Evacuate the next set of optional regions.
784 void evacuate_next_optional_regions(G1ParScanThreadStateSet* per_thread_states);
785
786 public:
787 void pre_evacuate_collection_set(G1EvacuationInfo& evacuation_info, G1ParScanThreadStateSet* pss);
788 void post_evacuate_collection_set(G1EvacuationInfo& evacuation_info,
789 G1RedirtyCardsQueueSet* rdcqs,
790 G1ParScanThreadStateSet* pss);
791
792 void resize_heap_after_young_gc();
793
794 // Update object copying statistics.
795 void record_obj_copy_mem_stats();
796
797 // The hot card cache for remembered set insertion optimization.
798 G1HotCardCache* _hot_card_cache;
799
800 // The g1 remembered set of the heap.
801 G1RemSet* _rem_set;
802
803 // After a collection pause, convert the regions in the collection set into free
804 // regions.
805 void free_collection_set(G1CollectionSet* collection_set, G1EvacuationInfo& evacuation_info, const size_t* surviving_young_words);
806
807 // Abandon the current collection set without recording policy
808 // statistics or updating free lists.
809 void abandon_collection_set(G1CollectionSet* collection_set);
810
811 // The concurrent marker (and the thread it runs in.)
812 G1ConcurrentMark* _cm;
813 G1ConcurrentMarkThread* _cm_thread;
1070 uint num_free_or_available_regions() const { return num_free_regions() + _hrm->available(); }
1071
1072 MemoryUsage get_auxiliary_data_memory_usage() const {
1073 return _hrm->get_auxiliary_data_memory_usage();
1074 }
1075
1076 // The number of regions that are not completely free.
1077 uint num_used_regions() const { return num_regions() - num_free_regions(); }
1078
1079 #ifdef ASSERT
1080 bool is_on_master_free_list(HeapRegion* hr) {
1081 return _hrm->is_free(hr);
1082 }
1083 #endif // ASSERT
1084
1085 inline void old_set_add(HeapRegion* hr);
1086 inline void old_set_remove(HeapRegion* hr);
1087
1088 inline void archive_set_add(HeapRegion* hr);
1089
1090 size_t non_young_capacity_bytes() const {
1091 return (old_regions_count() + _archive_set.length() + humongous_regions_count()) * HeapRegion::GrainBytes;
1092 }
1093
1094 // Determine whether the given region is one that we are using as an
1095 // old GC alloc region.
1096 bool is_old_gc_alloc_region(HeapRegion* hr);
1097
1098 // Perform a collection of the heap; intended for use in implementing
1099 // "System.gc". This probably implies as full a collection as the
1100 // "CollectedHeap" supports.
1101 virtual void collect(GCCause::Cause cause);
1102
1103 // Perform a collection of the heap with the given cause.
1104 // Returns whether this collection actually executed.
1105 bool try_collect(GCCause::Cause cause);
1106
1107 // True iff an evacuation has failed in the most-recent collection.
1108 bool evacuation_failed() { return _evacuation_failed; }
1109
1110 void remove_from_old_sets(const uint old_regions_removed, const uint humongous_regions_removed);
1268 static bool is_humongous(size_t word_size) {
1269 // Note this has to be strictly greater-than as the TLABs
1270 // are capped at the humongous threshold and we want to
1271 // ensure that we don't try to allocate a TLAB as
1272 // humongous and that we don't allocate a humongous
1273 // object in a TLAB.
1274 return word_size > _humongous_object_threshold_in_words;
1275 }
1276
1277 // Returns the humongous threshold for a specific region size
1278 static size_t humongous_threshold_for(size_t region_size) {
1279 return (region_size / 2);
1280 }
1281
1282 // Returns the number of regions the humongous object of the given word size
1283 // requires.
1284 static size_t humongous_obj_size_in_regions(size_t word_size);
1285
1286 // Print the maximum heap capacity.
1287 virtual size_t max_capacity() const;
1288 virtual size_t min_capacity() const;
1289
1290 // Return the size of reserved memory. Returns different value than max_capacity() when AllocateOldGenAt is used.
1291 virtual size_t max_reserved_capacity() const;
1292
1293 // Print the soft maximum heap capacity.
1294 size_t soft_max_capacity() const;
1295
1296 virtual jlong millis_since_last_gc();
1297
1298
1299 // Convenience function to be used in situations where the heap type can be
1300 // asserted to be this type.
1301 static G1CollectedHeap* heap();
1302
1303 void set_region_short_lived_locked(HeapRegion* hr);
1304 // add appropriate methods for any other surv rate groups
1305
1306 const G1SurvivorRegions* survivor() const { return &_survivor; }
1307
1308 uint eden_regions_count() const { return _eden.length(); }
1309 uint eden_regions_count(uint node_index) const { return _eden.regions_on_node(node_index); }
1310 uint survivor_regions_count() const { return _survivor.length(); }
1311 uint survivor_regions_count(uint node_index) const { return _survivor.regions_on_node(node_index); }
1312 size_t eden_regions_used_bytes() const { return _eden.used_bytes(); }
1313 size_t survivor_regions_used_bytes() const { return _survivor.used_bytes(); }
1314 uint young_regions_count() const { return _eden.length() + _survivor.length(); }
|