< prev index next >

src/hotspot/share/gc/g1/g1CollectedHeap.hpp

Print this page
rev 55208 : imported patch 8220089.webrev.0
rev 55209 : imported patch 8220089.webrev.1
rev 55210 : imported patch 8220089.webrev.2
rev 55211 : imported patch 8220089.webrev.3


 118   G1CollectedHeap* _g1h;
 119 public:
 120   G1STWSubjectToDiscoveryClosure(G1CollectedHeap* g1h) : _g1h(g1h) {}
 121   bool do_object_b(oop p);
 122 };
 123 
 124 class G1RegionMappingChangedListener : public G1MappingChangedListener {
 125  private:
 126   void reset_from_card_cache(uint start_idx, size_t num_regions);
 127  public:
 128   virtual void on_commit(uint start_idx, size_t num_regions, bool zero_filled);
 129 };
 130 
 131 class G1CollectedHeap : public CollectedHeap {
 132   friend class G1FreeCollectionSetTask;
 133   friend class VM_CollectForMetadataAllocation;
 134   friend class VM_G1CollectForAllocation;
 135   friend class VM_G1CollectFull;
 136   friend class VMStructs;
 137   friend class MutatorAllocRegion;

 138   friend class G1FullCollector;
 139   friend class G1GCAllocRegion;
 140   friend class G1HeapVerifier;
 141 
 142   // Closures used in implementation.
 143   friend class G1ParScanThreadState;
 144   friend class G1ParScanThreadStateSet;
 145   friend class G1EvacuateRegionsTask;
 146   friend class G1PLABAllocator;
 147 
 148   // Other related classes.
 149   friend class HeapRegionClaimer;
 150 
 151   // Testing classes.
 152   friend class G1CheckRegionAttrTableClosure;
 153 
 154 private:
 155   G1YoungRemSetSamplingThread* _young_gen_sampling_thread;
 156 
 157   WorkGang* _workers;


 442 
 443   // Second-level mutator allocation attempt: take the Heap_lock and
 444   // retry the allocation attempt, potentially scheduling a GC
 445   // pause. This should only be used for non-humongous allocations.
 446   HeapWord* attempt_allocation_slow(size_t word_size);
 447 
 448   // Takes the Heap_lock and attempts a humongous allocation. It can
 449   // potentially schedule a GC pause.
 450   HeapWord* attempt_allocation_humongous(size_t word_size);
 451 
 452   // Allocation attempt that should be called during safepoints (e.g.,
 453   // at the end of a successful GC). expect_null_mutator_alloc_region
 454   // specifies whether the mutator alloc region is expected to be NULL
 455   // or not.
 456   HeapWord* attempt_allocation_at_safepoint(size_t word_size,
 457                                             bool expect_null_mutator_alloc_region);
 458 
 459   // These methods are the "callbacks" from the G1AllocRegion class.
 460 
 461   // For mutator alloc regions.

 462   HeapRegion* new_mutator_alloc_region(size_t word_size, bool force);
 463   void retire_mutator_alloc_region(HeapRegion* alloc_region,
 464                                    size_t allocated_bytes);

 465 
 466   // For GC alloc regions.
 467   bool has_more_regions(G1HeapRegionAttr dest);
 468   HeapRegion* new_gc_alloc_region(size_t word_size, G1HeapRegionAttr dest);
 469   void retire_gc_alloc_region(HeapRegion* alloc_region,
 470                               size_t allocated_bytes, G1HeapRegionAttr dest);
 471 


 472   // - if explicit_gc is true, the GC is for a System.gc() etc,
 473   //   otherwise it's for a failed allocation.
 474   // - if clear_all_soft_refs is true, all soft references should be
 475   //   cleared during the GC.
 476   // - it returns false if it is unable to do the collection due to the
 477   //   GC locker being active, true otherwise.
 478   bool do_full_collection(bool explicit_gc,
 479                           bool clear_all_soft_refs);
 480 
 481   // Callback from VM_G1CollectFull operation, or collect_as_vm_thread.
 482   virtual void do_full_collection(bool clear_all_soft_refs);
 483 
 484   // Callback from VM_G1CollectForAllocation operation.
 485   // This function does everything necessary/possible to satisfy a
 486   // failed allocation request (including collection, expansion, etc.)
 487   HeapWord* satisfy_failed_allocation(size_t word_size,
 488                                       bool* succeeded);
 489   // Internal helpers used during full GC to split it up to
 490   // increase readability.
 491   void abort_concurrent_cycle();


1235   }
1236 
1237   // Returns the number of regions the humongous object of the given word size
1238   // requires.
1239   static size_t humongous_obj_size_in_regions(size_t word_size);
1240 
1241   // Print the maximum heap capacity.
1242   virtual size_t max_capacity() const;
1243 
1244   // Return the size of reserved memory. Returns different value than max_capacity() when AllocateOldGenAt is used.
1245   virtual size_t max_reserved_capacity() const;
1246 
1247   virtual jlong millis_since_last_gc();
1248 
1249 
1250   // Convenience function to be used in situations where the heap type can be
1251   // asserted to be this type.
1252   static G1CollectedHeap* heap();
1253 
1254   void set_region_short_lived_locked(HeapRegion* hr);


1255   // add appropriate methods for any other surv rate groups
1256 
1257   const G1SurvivorRegions* survivor() const { return &_survivor; }
1258 
1259   uint eden_regions_count() const { return _eden.length(); }
1260   uint survivor_regions_count() const { return _survivor.length(); }
1261   size_t eden_regions_used_bytes() const { return _eden.used_bytes(); }
1262   size_t survivor_regions_used_bytes() const { return _survivor.used_bytes(); }








1263   uint young_regions_count() const { return _eden.length() + _survivor.length(); }
1264   uint old_regions_count() const { return _old_set.length(); }
1265   uint archive_regions_count() const { return _archive_set.length(); }
1266   uint humongous_regions_count() const { return _humongous_set.length(); }


1267 
1268 #ifdef ASSERT
1269   bool check_young_list_empty();
1270 #endif
1271 
1272   // *** Stuff related to concurrent marking.  It's not clear to me that so
1273   // many of these need to be public.
1274 
1275   // The functions below are helper functions that a subclass of
1276   // "CollectedHeap" can use in the implementation of its virtual
1277   // functions.
1278   // This performs a concurrent marking of the live objects in a
1279   // bitmap off to the side.
1280   void do_concurrent_mark();
1281 
1282   bool is_marked_next(oop obj) const;
1283 
1284   // Determine if an object is dead, given the object and also
1285   // the region to which the object belongs. An object is dead
1286   // iff a) it was not allocated since the last mark, b) it




 118   G1CollectedHeap* _g1h;
 119 public:
 120   G1STWSubjectToDiscoveryClosure(G1CollectedHeap* g1h) : _g1h(g1h) {}
 121   bool do_object_b(oop p);
 122 };
 123 
 124 class G1RegionMappingChangedListener : public G1MappingChangedListener {
 125  private:
 126   void reset_from_card_cache(uint start_idx, size_t num_regions);
 127  public:
 128   virtual void on_commit(uint start_idx, size_t num_regions, bool zero_filled);
 129 };
 130 
 131 class G1CollectedHeap : public CollectedHeap {
 132   friend class G1FreeCollectionSetTask;
 133   friend class VM_CollectForMetadataAllocation;
 134   friend class VM_G1CollectForAllocation;
 135   friend class VM_G1CollectFull;
 136   friend class VMStructs;
 137   friend class MutatorAllocRegion;
 138   friend class G1Allocator;
 139   friend class G1FullCollector;
 140   friend class G1GCAllocRegion;
 141   friend class G1HeapVerifier;
 142 
 143   // Closures used in implementation.
 144   friend class G1ParScanThreadState;
 145   friend class G1ParScanThreadStateSet;
 146   friend class G1EvacuateRegionsTask;
 147   friend class G1PLABAllocator;
 148 
 149   // Other related classes.
 150   friend class HeapRegionClaimer;
 151 
 152   // Testing classes.
 153   friend class G1CheckRegionAttrTableClosure;
 154 
 155 private:
 156   G1YoungRemSetSamplingThread* _young_gen_sampling_thread;
 157 
 158   WorkGang* _workers;


 443 
 444   // Second-level mutator allocation attempt: take the Heap_lock and
 445   // retry the allocation attempt, potentially scheduling a GC
 446   // pause. This should only be used for non-humongous allocations.
 447   HeapWord* attempt_allocation_slow(size_t word_size);
 448 
 449   // Takes the Heap_lock and attempts a humongous allocation. It can
 450   // potentially schedule a GC pause.
 451   HeapWord* attempt_allocation_humongous(size_t word_size);
 452 
 453   // Allocation attempt that should be called during safepoints (e.g.,
 454   // at the end of a successful GC). expect_null_mutator_alloc_region
 455   // specifies whether the mutator alloc region is expected to be NULL
 456   // or not.
 457   HeapWord* attempt_allocation_at_safepoint(size_t word_size,
 458                                             bool expect_null_mutator_alloc_region);
 459 
 460   // These methods are the "callbacks" from the G1AllocRegion class.
 461 
 462   // For mutator alloc regions.
 463   void update_as_mutator_region(HeapRegion* alloc_region, bool is_reused);
 464   HeapRegion* new_mutator_alloc_region(size_t word_size, bool force);
 465   void retire_mutator_alloc_region(HeapRegion* alloc_region,
 466                                    size_t allocated_bytes);
 467   void reuse_retained_survivor_region(HeapRegion* alloc_region);
 468 
 469   // For GC alloc regions.
 470   bool has_more_regions(G1HeapRegionAttr dest);
 471   HeapRegion* new_gc_alloc_region(size_t word_size, G1HeapRegionAttr dest);
 472   void retire_gc_alloc_region(HeapRegion* alloc_region,
 473                               size_t allocated_bytes, G1HeapRegionAttr dest);
 474 
 475   void update_retained_survivor_gc_alloc_region(HeapRegion* alloc_region);
 476 
 477   // - if explicit_gc is true, the GC is for a System.gc() etc,
 478   //   otherwise it's for a failed allocation.
 479   // - if clear_all_soft_refs is true, all soft references should be
 480   //   cleared during the GC.
 481   // - it returns false if it is unable to do the collection due to the
 482   //   GC locker being active, true otherwise.
 483   bool do_full_collection(bool explicit_gc,
 484                           bool clear_all_soft_refs);
 485 
 486   // Callback from VM_G1CollectFull operation, or collect_as_vm_thread.
 487   virtual void do_full_collection(bool clear_all_soft_refs);
 488 
 489   // Callback from VM_G1CollectForAllocation operation.
 490   // This function does everything necessary/possible to satisfy a
 491   // failed allocation request (including collection, expansion, etc.)
 492   HeapWord* satisfy_failed_allocation(size_t word_size,
 493                                       bool* succeeded);
 494   // Internal helpers used during full GC to split it up to
 495   // increase readability.
 496   void abort_concurrent_cycle();


1240   }
1241 
1242   // Returns the number of regions the humongous object of the given word size
1243   // requires.
1244   static size_t humongous_obj_size_in_regions(size_t word_size);
1245 
1246   // Print the maximum heap capacity.
1247   virtual size_t max_capacity() const;
1248 
1249   // Return the size of reserved memory. Returns different value than max_capacity() when AllocateOldGenAt is used.
1250   virtual size_t max_reserved_capacity() const;
1251 
1252   virtual jlong millis_since_last_gc();
1253 
1254 
1255   // Convenience function to be used in situations where the heap type can be
1256   // asserted to be this type.
1257   static G1CollectedHeap* heap();
1258 
1259   void set_region_short_lived_locked(HeapRegion* hr);
1260   // Used to re-set survivor region to eden region.
1261   void set_retained_region_short_lived_locked(HeapRegion* hr);
1262   // add appropriate methods for any other surv rate groups
1263 
1264   const G1SurvivorRegions* survivor() const { return &_survivor; }
1265 
1266   // Returns eden region count without any retained region.
1267   // At the end of GC, all retained regions were already added to _eden so
1268   // we have to subtract the retained region count to return only eden regions.
1269   uint eden_regions_count() const;
1270   // Returns survivor region count with retained region.
1271   // At the end of GC, all retained regions were already removed from _survivor so
1272   // we have to add the retained region count to return survivor regions.
1273   uint survivor_regions_count() const;
1274   // Returns used bytes of mutator regions without used bytes of a retained region.
1275   size_t eden_regions_used_bytes() const;
1276   // Returns used bytes of survivor regions with used bytes of a retained region.
1277   size_t survivor_regions_used_bytes() const;
1278   uint young_regions_count() const { return _eden.length() + _survivor.length(); }
1279   uint old_regions_count() const { return _old_set.length(); }
1280   uint archive_regions_count() const { return _archive_set.length(); }
1281   uint humongous_regions_count() const { return _humongous_set.length(); }
1282 
1283   void convert_survivor_to_eden() { _survivor.convert_to_eden(); }
1284 
1285 #ifdef ASSERT
1286   bool check_young_list_empty();
1287 #endif
1288 
1289   // *** Stuff related to concurrent marking.  It's not clear to me that so
1290   // many of these need to be public.
1291 
1292   // The functions below are helper functions that a subclass of
1293   // "CollectedHeap" can use in the implementation of its virtual
1294   // functions.
1295   // This performs a concurrent marking of the live objects in a
1296   // bitmap off to the side.
1297   void do_concurrent_mark();
1298 
1299   bool is_marked_next(oop obj) const;
1300 
1301   // Determine if an object is dead, given the object and also
1302   // the region to which the object belongs. An object is dead
1303   // iff a) it was not allocated since the last mark, b) it


< prev index next >