src/share/vm/gc_implementation/g1/g1CollectedHeap.hpp

Print this page
rev 6802 : imported patch refactor-heapregionseq
rev 6803 : imported patch bengt-review-1
rev 6804 : imported patch commit-uncommit-within-heap
rev 6805 : imported patch mikael-suggestions
rev 6806 : [mq]: bengt-suggestions


 182   virtual void retire_region(HeapRegion* alloc_region, size_t allocated_bytes);
 183 public:
 184   OldGCAllocRegion()
 185   : G1AllocRegion("Old GC Alloc Region", true /* bot_updates */) { }
 186 };
 187 
 188 // The G1 STW is alive closure.
 189 // An instance is embedded into the G1CH and used as the
 190 // (optional) _is_alive_non_header closure in the STW
 191 // reference processor. It is also extensively used during
 192 // reference processing during STW evacuation pauses.
 193 class G1STWIsAliveClosure: public BoolObjectClosure {
 194   G1CollectedHeap* _g1;
 195 public:
 196   G1STWIsAliveClosure(G1CollectedHeap* g1) : _g1(g1) {}
 197   bool do_object_b(oop p);
 198 };
 199 
 200 class RefineCardTableEntryClosure;
 201 







 202 class G1CollectedHeap : public SharedHeap {
 203   friend class VM_CollectForMetadataAllocation;
 204   friend class VM_G1CollectForAllocation;
 205   friend class VM_G1CollectFull;
 206   friend class VM_G1IncCollectionPause;
 207   friend class VMStructs;
 208   friend class MutatorAllocRegion;
 209   friend class SurvivorGCAllocRegion;
 210   friend class OldGCAllocRegion;
 211 
 212   // Closures used in implementation.
 213   template <G1Barrier barrier, G1Mark do_mark_object>
 214   friend class G1ParCopyClosure;
 215   friend class G1IsAliveClosure;
 216   friend class G1EvacuateFollowersClosure;
 217   friend class G1ParScanThreadState;
 218   friend class G1ParScanClosureSuper;
 219   friend class G1ParEvacuateFollowersClosure;
 220   friend class G1ParTask;
 221   friend class G1FreeGarbageRegionClosure;


 256 
 257   // The block offset table for the G1 heap.
 258   G1BlockOffsetSharedArray* _bot_shared;
 259 
 260   // Tears down the region sets / lists so that they are empty and the
 261   // regions on the heap do not belong to a region set / list. The
 262   // only exception is the humongous set which we leave unaltered. If
 263   // free_list_only is true, it will only tear down the master free
 264   // list. It is called before a Full GC (free_list_only == false) or
 265   // before heap shrinking (free_list_only == true).
 266   void tear_down_region_sets(bool free_list_only);
 267 
 268   // Rebuilds the region sets / lists so that they are repopulated to
 269   // reflect the contents of the heap. The only exception is the
 270   // humongous set which was not torn down in the first place. If
 271   // free_list_only is true, it will only rebuild the master free
 272   // list. It is called after a Full GC (free_list_only == false) or
 273   // after heap shrinking (free_list_only == true).
 274   void rebuild_region_sets(bool free_list_only);
 275 



 276   // The sequence of all heap regions in the heap.
 277   HeapRegionSeq _hrs;
 278 
 279   // Alloc region used to satisfy mutator allocation requests.
 280   MutatorAllocRegion _mutator_alloc_region;
 281 
 282   // Alloc region used to satisfy allocation requests by the GC for
 283   // survivor objects.
 284   SurvivorGCAllocRegion _survivor_gc_alloc_region;
 285 
 286   // PLAB sizing policy for survivors.
 287   PLABStats _survivor_plab_stats;
 288 
 289   // Alloc region used to satisfy allocation requests by the GC for
 290   // old objects.
 291   OldGCAllocRegion _old_gc_alloc_region;
 292 
 293   // PLAB sizing policy for tenured objects.
 294   PLABStats _old_plab_stats;
 295 


 827   // Abandon the current collection set without recording policy
 828   // statistics or updating free lists.
 829   void abandon_collection_set(HeapRegion* cs_head);
 830 
 831   // Applies "scan_non_heap_roots" to roots outside the heap,
 832   // "scan_rs" to roots inside the heap (having done "set_region" to
 833   // indicate the region in which the root resides),
 834   // and does "scan_metadata" If "scan_rs" is
 835   // NULL, then this step is skipped.  The "worker_i"
 836   // param is for use with parallel roots processing, and should be
 837   // the "i" of the calling parallel worker thread's work(i) function.
 838   // In the sequential case this param will be ignored.
 839   void g1_process_roots(OopClosure* scan_non_heap_roots,
 840                         OopClosure* scan_non_heap_weak_roots,
 841                         OopsInHeapRegionClosure* scan_rs,
 842                         CLDClosure* scan_strong_clds,
 843                         CLDClosure* scan_weak_clds,
 844                         CodeBlobClosure* scan_strong_code,
 845                         uint worker_i);
 846 
 847   // Notifies all the necessary spaces that the committed space has
 848   // been updated (either expanded or shrunk). It should be called
 849   // after _g1_storage is updated.
 850   void update_committed_space(HeapWord* old_end, HeapWord* new_end);
 851 
 852   // The concurrent marker (and the thread it runs in.)
 853   ConcurrentMark* _cm;
 854   ConcurrentMarkThread* _cmThread;
 855   bool _mark_in_progress;
 856 
 857   // The concurrent refiner.
 858   ConcurrentG1Refine* _cg1r;
 859 
 860   // The parallel task queues
 861   RefToScanQueueSet *_task_queues;
 862 
 863   // True iff a evacuation has failed in the current collection.
 864   bool _evacuation_failed;
 865 
 866   EvacuationFailedInfo* _evacuation_failed_info_array;
 867 
 868   // Failed evacuations cause some logical from-space objects to have
 869   // forwarding pointers to themselves.  Reset them.
 870   void remove_self_forwarding_pointers();
 871 


1262     return hr == _retained_old_gc_alloc_region;
1263   }
1264 
1265   // Perform a collection of the heap; intended for use in implementing
1266   // "System.gc".  This probably implies as full a collection as the
1267   // "CollectedHeap" supports.
1268   virtual void collect(GCCause::Cause cause);
1269 
1270   // The same as above but assume that the caller holds the Heap_lock.
1271   void collect_locked(GCCause::Cause cause);
1272 
1273   // True iff an evacuation has failed in the most-recent collection.
1274   bool evacuation_failed() { return _evacuation_failed; }
1275 
1276   void remove_from_old_sets(const HeapRegionSetCount& old_regions_removed, const HeapRegionSetCount& humongous_regions_removed);
1277   void prepend_to_freelist(FreeRegionList* list);
1278   void decrement_summary_bytes(size_t bytes);
1279 
1280   // Returns "TRUE" iff "p" points into the committed areas of the heap.
1281   virtual bool is_in(const void* p) const;





1282 
1283   // Return "TRUE" iff the given object address is within the collection
1284   // set. Slow implementation.
1285   inline bool obj_in_cs(oop obj);
1286 
1287   inline bool is_in_cset(oop obj);
1288 
1289   inline bool is_in_cset_or_humongous(const oop obj);
1290 
1291   enum in_cset_state_t {
1292    InNeither,           // neither in collection set nor humongous
1293    InCSet,              // region is in collection set only
1294    IsHumongous          // region is a humongous start region
1295   };
1296  private:
1297   // Instances of this class are used for quick tests on whether a reference points
1298   // into the collection set or is a humongous object (points into a humongous
1299   // object).
1300   // Each of the array's elements denotes whether the corresponding region is in
1301   // the collection set or a humongous region.


1331   // the collection set or not. Each of the array's elements denotes whether the
1332   // corresponding region is in the collection set or not.
1333   G1FastCSetBiasedMappedArray _in_cset_fast_test;
1334 
1335  public:
1336 
1337   inline in_cset_state_t in_cset_state(const oop obj);
1338 
1339   // Return "TRUE" iff the given object address is in the reserved
1340   // region of g1.
1341   bool is_in_g1_reserved(const void* p) const {
1342     return _hrs.reserved().contains(p);
1343   }
1344 
1345   // Returns a MemRegion that corresponds to the space that has been
1346   // reserved for the heap
1347   MemRegion g1_reserved() const {
1348     return _hrs.reserved();
1349   }
1350 
1351   // Returns a MemRegion that corresponds to the space that has been
1352   // committed in the heap
1353   MemRegion g1_committed() {
1354     return _hrs.committed();
1355   }
1356 
1357   virtual bool is_in_closed_subset(const void* p) const;
1358 
1359   G1SATBCardTableModRefBS* g1_barrier_set() {
1360     return (G1SATBCardTableModRefBS*) barrier_set();
1361   }
1362 
1363   // This resets the card table to all zeros.  It is used after
1364   // a collection pause which used the card table to claim cards.
1365   void cleanUpCardTable();
1366 
1367   // Iteration functions.
1368 
1369   // Iterate over all the ref-containing fields of all objects, calling
1370   // "cl.do_oop" on each.
1371   virtual void oop_iterate(ExtendedOopClosure* cl);
1372 
1373   // Iterate over all objects, calling "cl.do_object" on each.
1374   virtual void object_iterate(ObjectClosure* cl);
1375 
1376   virtual void safe_object_iterate(ObjectClosure* cl) {
1377     object_iterate(cl);
1378   }
1379 
1380   // Iterate over all spaces in use in the heap, in ascending address order.




 182   virtual void retire_region(HeapRegion* alloc_region, size_t allocated_bytes);
 183 public:
 184   OldGCAllocRegion()
 185   : G1AllocRegion("Old GC Alloc Region", true /* bot_updates */) { }
 186 };
 187 
 188 // The G1 STW is alive closure.
 189 // An instance is embedded into the G1CH and used as the
 190 // (optional) _is_alive_non_header closure in the STW
 191 // reference processor. It is also extensively used during
 192 // reference processing during STW evacuation pauses.
 193 class G1STWIsAliveClosure: public BoolObjectClosure {
 194   G1CollectedHeap* _g1;
 195 public:
 196   G1STWIsAliveClosure(G1CollectedHeap* g1) : _g1(g1) {}
 197   bool do_object_b(oop p);
 198 };
 199 
 200 class RefineCardTableEntryClosure;
 201 
 202 class G1RegionMappingChangedListener : public G1MappingChangedListener {
 203  private:
 204   void reset_from_card_cache(uint start_idx, size_t num_regions);
 205  public:
 206   virtual void on_commit(uint start_idx, size_t num_regions);
 207 };
 208 
 209 class G1CollectedHeap : public SharedHeap {
 210   friend class VM_CollectForMetadataAllocation;
 211   friend class VM_G1CollectForAllocation;
 212   friend class VM_G1CollectFull;
 213   friend class VM_G1IncCollectionPause;
 214   friend class VMStructs;
 215   friend class MutatorAllocRegion;
 216   friend class SurvivorGCAllocRegion;
 217   friend class OldGCAllocRegion;
 218 
 219   // Closures used in implementation.
 220   template <G1Barrier barrier, G1Mark do_mark_object>
 221   friend class G1ParCopyClosure;
 222   friend class G1IsAliveClosure;
 223   friend class G1EvacuateFollowersClosure;
 224   friend class G1ParScanThreadState;
 225   friend class G1ParScanClosureSuper;
 226   friend class G1ParEvacuateFollowersClosure;
 227   friend class G1ParTask;
 228   friend class G1FreeGarbageRegionClosure;


 263 
 264   // The block offset table for the G1 heap.
 265   G1BlockOffsetSharedArray* _bot_shared;
 266 
 267   // Tears down the region sets / lists so that they are empty and the
 268   // regions on the heap do not belong to a region set / list. The
 269   // only exception is the humongous set which we leave unaltered. If
 270   // free_list_only is true, it will only tear down the master free
 271   // list. It is called before a Full GC (free_list_only == false) or
 272   // before heap shrinking (free_list_only == true).
 273   void tear_down_region_sets(bool free_list_only);
 274 
 275   // Rebuilds the region sets / lists so that they are repopulated to
 276   // reflect the contents of the heap. The only exception is the
 277   // humongous set which was not torn down in the first place. If
 278   // free_list_only is true, it will only rebuild the master free
 279   // list. It is called after a Full GC (free_list_only == false) or
 280   // after heap shrinking (free_list_only == true).
 281   void rebuild_region_sets(bool free_list_only);
 282 
 283   // Callback for region mapping changed events.
 284   G1RegionMappingChangedListener _listener;
 285 
 286   // The sequence of all heap regions in the heap.
 287   HeapRegionSeq _hrs;
 288 
 289   // Alloc region used to satisfy mutator allocation requests.
 290   MutatorAllocRegion _mutator_alloc_region;
 291 
 292   // Alloc region used to satisfy allocation requests by the GC for
 293   // survivor objects.
 294   SurvivorGCAllocRegion _survivor_gc_alloc_region;
 295 
 296   // PLAB sizing policy for survivors.
 297   PLABStats _survivor_plab_stats;
 298 
 299   // Alloc region used to satisfy allocation requests by the GC for
 300   // old objects.
 301   OldGCAllocRegion _old_gc_alloc_region;
 302 
 303   // PLAB sizing policy for tenured objects.
 304   PLABStats _old_plab_stats;
 305 


 837   // Abandon the current collection set without recording policy
 838   // statistics or updating free lists.
 839   void abandon_collection_set(HeapRegion* cs_head);
 840 
 841   // Applies "scan_non_heap_roots" to roots outside the heap,
 842   // "scan_rs" to roots inside the heap (having done "set_region" to
 843   // indicate the region in which the root resides),
 844   // and does "scan_metadata" If "scan_rs" is
 845   // NULL, then this step is skipped.  The "worker_i"
 846   // param is for use with parallel roots processing, and should be
 847   // the "i" of the calling parallel worker thread's work(i) function.
 848   // In the sequential case this param will be ignored.
 849   void g1_process_roots(OopClosure* scan_non_heap_roots,
 850                         OopClosure* scan_non_heap_weak_roots,
 851                         OopsInHeapRegionClosure* scan_rs,
 852                         CLDClosure* scan_strong_clds,
 853                         CLDClosure* scan_weak_clds,
 854                         CodeBlobClosure* scan_strong_code,
 855                         uint worker_i);
 856 





 857   // The concurrent marker (and the thread it runs in.)
 858   ConcurrentMark* _cm;
 859   ConcurrentMarkThread* _cmThread;
 860   bool _mark_in_progress;
 861 
 862   // The concurrent refiner.
 863   ConcurrentG1Refine* _cg1r;
 864 
 865   // The parallel task queues
 866   RefToScanQueueSet *_task_queues;
 867 
 868   // True iff a evacuation has failed in the current collection.
 869   bool _evacuation_failed;
 870 
 871   EvacuationFailedInfo* _evacuation_failed_info_array;
 872 
 873   // Failed evacuations cause some logical from-space objects to have
 874   // forwarding pointers to themselves.  Reset them.
 875   void remove_self_forwarding_pointers();
 876 


1267     return hr == _retained_old_gc_alloc_region;
1268   }
1269 
1270   // Perform a collection of the heap; intended for use in implementing
1271   // "System.gc".  This probably implies as full a collection as the
1272   // "CollectedHeap" supports.
1273   virtual void collect(GCCause::Cause cause);
1274 
1275   // The same as above but assume that the caller holds the Heap_lock.
1276   void collect_locked(GCCause::Cause cause);
1277 
1278   // True iff an evacuation has failed in the most-recent collection.
1279   bool evacuation_failed() { return _evacuation_failed; }
1280 
1281   void remove_from_old_sets(const HeapRegionSetCount& old_regions_removed, const HeapRegionSetCount& humongous_regions_removed);
1282   void prepend_to_freelist(FreeRegionList* list);
1283   void decrement_summary_bytes(size_t bytes);
1284 
1285   // Returns "TRUE" iff "p" points into the committed areas of the heap.
1286   virtual bool is_in(const void* p) const;
1287 #ifdef ASSERT
1288   // Returns whether p is in one of the available areas of the heap. Slow but
1289   // extensive version.
1290   bool is_in_exact(const void* p) const;
1291 #endif
1292 
1293   // Return "TRUE" iff the given object address is within the collection
1294   // set. Slow implementation.
1295   inline bool obj_in_cs(oop obj);
1296 
1297   inline bool is_in_cset(oop obj);
1298 
1299   inline bool is_in_cset_or_humongous(const oop obj);
1300 
1301   enum in_cset_state_t {
1302    InNeither,           // neither in collection set nor humongous
1303    InCSet,              // region is in collection set only
1304    IsHumongous          // region is a humongous start region
1305   };
1306  private:
1307   // Instances of this class are used for quick tests on whether a reference points
1308   // into the collection set or is a humongous object (points into a humongous
1309   // object).
1310   // Each of the array's elements denotes whether the corresponding region is in
1311   // the collection set or a humongous region.


1341   // the collection set or not. Each of the array's elements denotes whether the
1342   // corresponding region is in the collection set or not.
1343   G1FastCSetBiasedMappedArray _in_cset_fast_test;
1344 
1345  public:
1346 
1347   inline in_cset_state_t in_cset_state(const oop obj);
1348 
1349   // Return "TRUE" iff the given object address is in the reserved
1350   // region of g1.
1351   bool is_in_g1_reserved(const void* p) const {
1352     return _hrs.reserved().contains(p);
1353   }
1354 
1355   // Returns a MemRegion that corresponds to the space that has been
1356   // reserved for the heap
1357   MemRegion g1_reserved() const {
1358     return _hrs.reserved();
1359   }
1360 






1361   virtual bool is_in_closed_subset(const void* p) const;
1362 
1363   G1SATBCardTableLoggingModRefBS* g1_barrier_set() {
1364     return (G1SATBCardTableLoggingModRefBS*) barrier_set();
1365   }
1366 
1367   // This resets the card table to all zeros.  It is used after
1368   // a collection pause which used the card table to claim cards.
1369   void cleanUpCardTable();
1370 
1371   // Iteration functions.
1372 
1373   // Iterate over all the ref-containing fields of all objects, calling
1374   // "cl.do_oop" on each.
1375   virtual void oop_iterate(ExtendedOopClosure* cl);
1376 
1377   // Iterate over all objects, calling "cl.do_object" on each.
1378   virtual void object_iterate(ObjectClosure* cl);
1379 
1380   virtual void safe_object_iterate(ObjectClosure* cl) {
1381     object_iterate(cl);
1382   }
1383 
1384   // Iterate over all spaces in use in the heap, in ascending address order.