< prev index next >

src/hotspot/share/gc/g1/g1CollectedHeap.hpp

Print this page
rev 60257 : [mq]: 8248401-unify-millis-since-last-gc


 142   friend class G1HeapVerifier;
 143 
 144   // Closures used in implementation.
 145   friend class G1ParScanThreadState;
 146   friend class G1ParScanThreadStateSet;
 147   friend class G1EvacuateRegionsTask;
 148   friend class G1PLABAllocator;
 149 
 150   // Other related classes.
 151   friend class HeapRegionClaimer;
 152 
 153   // Testing classes.
 154   friend class G1CheckRegionAttrTableClosure;
 155 
 156 private:
 157   G1YoungRemSetSamplingThread* _young_gen_sampling_thread;
 158 
 159   WorkGang* _workers;
 160   G1CardTable* _card_table;
 161 


 162   SoftRefPolicy      _soft_ref_policy;
 163 
 164   static size_t _humongous_object_threshold_in_words;
 165 
 166   // These sets keep track of old, archive and humongous regions respectively.
 167   HeapRegionSet _old_set;
 168   HeapRegionSet _archive_set;
 169   HeapRegionSet _humongous_set;
 170 
 171   void eagerly_reclaim_humongous_regions();
 172   // Start a new incremental collection set for the next pause.
 173   void start_new_collection_set();
 174 
 175   // The block offset table for the G1 heap.
 176   G1BlockOffsetTable* _bot;
 177 
 178   // Tears down the region sets / lists so that they are empty and the
 179   // regions on the heap do not belong to a region set / list. The
 180   // only exception is the humongous set which we leave unaltered. If
 181   // free_list_only is true, it will only tear down the master free


 627   bool is_user_requested_concurrent_full_gc(GCCause::Cause cause);
 628 
 629   // This is called at the start of either a concurrent cycle or a Full
 630   // GC to update the number of old marking cycles started.
 631   void increment_old_marking_cycles_started();
 632 
 633   // This is called at the end of either a concurrent cycle or a Full
 634   // GC to update the number of old marking cycles completed. Those two
 635   // can happen in a nested fashion, i.e., we start a concurrent
 636   // cycle, a Full GC happens half-way through it which ends first,
 637   // and then the cycle notices that a Full GC happened and ends
 638   // too. The concurrent parameter is a boolean to help us do a bit
 639   // tighter consistency checking in the method. If concurrent is
 640   // false, the caller is the inner caller in the nesting (i.e., the
 641   // Full GC). If concurrent is true, the caller is the outer caller
 642   // in this nesting (i.e., the concurrent cycle). Further nesting is
 643   // not currently supported. The end of this call also notifies
 644   // the G1OldGCCount_lock in case a Java thread is waiting for a full
 645   // GC to happen (e.g., it called System.gc() with
 646   // +ExplicitGCInvokesConcurrent).
 647   void increment_old_marking_cycles_completed(bool concurrent);


 648 
 649   uint old_marking_cycles_completed() {
 650     return _old_marking_cycles_completed;
 651   }
 652 
 653   G1HRPrinter* hr_printer() { return &_hr_printer; }
 654 
 655   // Allocates a new heap region instance.
 656   HeapRegion* new_heap_region(uint hrs_index, MemRegion mr);
 657 
 658   // Allocate the highest free region in the reserved heap. This will commit
 659   // regions as necessary.
 660   HeapRegion* alloc_highest_free_region();
 661 
 662   // Frees a region by resetting its metadata and adding it to the free list
 663   // passed as a parameter (this is usually a local list which will be appended
 664   // to the master free list later or NULL if free list management is handled
 665   // in another way).
 666   // Callers must ensure they are the only one calling free on the given region
 667   // at the same time.


1271     // humongous and that we don't allocate a humongous
1272     // object in a TLAB.
1273     return word_size > _humongous_object_threshold_in_words;
1274   }
1275 
1276   // Returns the humongous threshold for a specific region size
1277   static size_t humongous_threshold_for(size_t region_size) {
1278     return (region_size / 2);
1279   }
1280 
1281   // Returns the number of regions the humongous object of the given word size
1282   // requires.
1283   static size_t humongous_obj_size_in_regions(size_t word_size);
1284 
1285   // Print the maximum heap capacity.
1286   virtual size_t max_capacity() const;
1287 
1288   // Return the size of reserved memory. Returns different value than max_capacity() when AllocateOldGenAt is used.
1289   virtual size_t max_reserved_capacity() const;
1290 
1291   virtual jlong millis_since_last_gc();
1292 
1293 
1294   // Convenience function to be used in situations where the heap type can be
1295   // asserted to be this type.
1296   static G1CollectedHeap* heap() {
1297     return named_heap<G1CollectedHeap>(CollectedHeap::G1);
1298   }
1299 
1300   void set_region_short_lived_locked(HeapRegion* hr);
1301   // add appropriate methods for any other surv rate groups
1302 
1303   const G1SurvivorRegions* survivor() const { return &_survivor; }
1304 
1305   uint eden_regions_count() const { return _eden.length(); }
1306   uint eden_regions_count(uint node_index) const { return _eden.regions_on_node(node_index); }
1307   uint survivor_regions_count() const { return _survivor.length(); }
1308   uint survivor_regions_count(uint node_index) const { return _survivor.regions_on_node(node_index); }
1309   size_t eden_regions_used_bytes() const { return _eden.used_bytes(); }
1310   size_t survivor_regions_used_bytes() const { return _survivor.used_bytes(); }
1311   uint young_regions_count() const { return _eden.length() + _survivor.length(); }
1312   uint old_regions_count() const { return _old_set.length(); }




 142   friend class G1HeapVerifier;
 143 
 144   // Closures used in implementation.
 145   friend class G1ParScanThreadState;
 146   friend class G1ParScanThreadStateSet;
 147   friend class G1EvacuateRegionsTask;
 148   friend class G1PLABAllocator;
 149 
 150   // Other related classes.
 151   friend class HeapRegionClaimer;
 152 
 153   // Testing classes.
 154   friend class G1CheckRegionAttrTableClosure;
 155 
 156 private:
 157   G1YoungRemSetSamplingThread* _young_gen_sampling_thread;
 158 
 159   WorkGang* _workers;
 160   G1CardTable* _card_table;
 161 
 162   Ticks _collection_pause_end;
 163 
 164   SoftRefPolicy      _soft_ref_policy;
 165 
 166   static size_t _humongous_object_threshold_in_words;
 167 
 168   // These sets keep track of old, archive and humongous regions respectively.
 169   HeapRegionSet _old_set;
 170   HeapRegionSet _archive_set;
 171   HeapRegionSet _humongous_set;
 172 
 173   void eagerly_reclaim_humongous_regions();
 174   // Start a new incremental collection set for the next pause.
 175   void start_new_collection_set();
 176 
 177   // The block offset table for the G1 heap.
 178   G1BlockOffsetTable* _bot;
 179 
 180   // Tears down the region sets / lists so that they are empty and the
 181   // regions on the heap do not belong to a region set / list. The
 182   // only exception is the humongous set which we leave unaltered. If
 183   // free_list_only is true, it will only tear down the master free


 629   bool is_user_requested_concurrent_full_gc(GCCause::Cause cause);
 630 
 631   // This is called at the start of either a concurrent cycle or a Full
 632   // GC to update the number of old marking cycles started.
 633   void increment_old_marking_cycles_started();
 634 
 635   // This is called at the end of either a concurrent cycle or a Full
 636   // GC to update the number of old marking cycles completed. Those two
 637   // can happen in a nested fashion, i.e., we start a concurrent
 638   // cycle, a Full GC happens half-way through it which ends first,
 639   // and then the cycle notices that a Full GC happened and ends
 640   // too. The concurrent parameter is a boolean to help us do a bit
 641   // tighter consistency checking in the method. If concurrent is
 642   // false, the caller is the inner caller in the nesting (i.e., the
 643   // Full GC). If concurrent is true, the caller is the outer caller
 644   // in this nesting (i.e., the concurrent cycle). Further nesting is
 645   // not currently supported. The end of this call also notifies
 646   // the G1OldGCCount_lock in case a Java thread is waiting for a full
 647   // GC to happen (e.g., it called System.gc() with
 648   // +ExplicitGCInvokesConcurrent).
 649   // liveness_completed should indicate that during that old marking
 650   // cycle the whole heap has actually been examined for live objects.
 651   void increment_old_marking_cycles_completed(bool concurrent, bool liveness_completed);
 652 
 653   uint old_marking_cycles_completed() {
 654     return _old_marking_cycles_completed;
 655   }
 656 
 657   G1HRPrinter* hr_printer() { return &_hr_printer; }
 658 
 659   // Allocates a new heap region instance.
 660   HeapRegion* new_heap_region(uint hrs_index, MemRegion mr);
 661 
 662   // Allocate the highest free region in the reserved heap. This will commit
 663   // regions as necessary.
 664   HeapRegion* alloc_highest_free_region();
 665 
 666   // Frees a region by resetting its metadata and adding it to the free list
 667   // passed as a parameter (this is usually a local list which will be appended
 668   // to the master free list later or NULL if free list management is handled
 669   // in another way).
 670   // Callers must ensure they are the only one calling free on the given region
 671   // at the same time.


1275     // humongous and that we don't allocate a humongous
1276     // object in a TLAB.
1277     return word_size > _humongous_object_threshold_in_words;
1278   }
1279 
1280   // Returns the humongous threshold for a specific region size
1281   static size_t humongous_threshold_for(size_t region_size) {
1282     return (region_size / 2);
1283   }
1284 
1285   // Returns the number of regions the humongous object of the given word size
1286   // requires.
1287   static size_t humongous_obj_size_in_regions(size_t word_size);
1288 
1289   // Print the maximum heap capacity.
1290   virtual size_t max_capacity() const;
1291 
1292   // Return the size of reserved memory. Returns different value than max_capacity() when AllocateOldGenAt is used.
1293   virtual size_t max_reserved_capacity() const;
1294 
1295   Tickspan time_since_last_collection() const { return Ticks::now() - _collection_pause_end; }

1296 
1297   // Convenience function to be used in situations where the heap type can be
1298   // asserted to be this type.
1299   static G1CollectedHeap* heap() {
1300     return named_heap<G1CollectedHeap>(CollectedHeap::G1);
1301   }
1302 
1303   void set_region_short_lived_locked(HeapRegion* hr);
1304   // add appropriate methods for any other surv rate groups
1305 
1306   const G1SurvivorRegions* survivor() const { return &_survivor; }
1307 
1308   uint eden_regions_count() const { return _eden.length(); }
1309   uint eden_regions_count(uint node_index) const { return _eden.regions_on_node(node_index); }
1310   uint survivor_regions_count() const { return _survivor.length(); }
1311   uint survivor_regions_count(uint node_index) const { return _survivor.regions_on_node(node_index); }
1312   size_t eden_regions_used_bytes() const { return _eden.used_bytes(); }
1313   size_t survivor_regions_used_bytes() const { return _survivor.used_bytes(); }
1314   uint young_regions_count() const { return _eden.length() + _survivor.length(); }
1315   uint old_regions_count() const { return _old_set.length(); }


< prev index next >