src/share/vm/gc_implementation/g1/g1CollectedHeap.hpp

Print this page
rev 6670 : fast reclaim main patch


 181   virtual HeapRegion* allocate_new_region(size_t word_size, bool force);
 182   virtual void retire_region(HeapRegion* alloc_region, size_t allocated_bytes);
 183 public:
 184   OldGCAllocRegion()
 185   : G1AllocRegion("Old GC Alloc Region", true /* bot_updates */) { }
 186 };
 187 
 188 // The G1 STW is alive closure.
 189 // An instance is embedded into the G1CH and used as the
 190 // (optional) _is_alive_non_header closure in the STW
 191 // reference processor. It is also extensively used during
 192 // reference processing during STW evacuation pauses.
 193 class G1STWIsAliveClosure: public BoolObjectClosure {
 194   G1CollectedHeap* _g1;
 195 public:
 196   G1STWIsAliveClosure(G1CollectedHeap* g1) : _g1(g1) {}
 197   bool do_object_b(oop p);
 198 };
 199 
 200 // Instances of this class are used for quick tests on whether a reference points
 201 // into the collection set. Each of the array's elements denotes whether the
 202 // corresponding region is in the collection set.
 203 class G1FastCSetBiasedMappedArray : public G1BiasedMappedArray<bool> {















 204  protected:
 205   bool default_value() const { return false; }
 206  public:
 207   void clear() { G1BiasedMappedArray<bool>::clear(); }









 208 };
 209 
 210 class RefineCardTableEntryClosure;
 211 
 212 class G1CollectedHeap : public SharedHeap {
 213   friend class VM_CollectForMetadataAllocation;
 214   friend class VM_G1CollectForAllocation;
 215   friend class VM_G1CollectFull;
 216   friend class VM_G1IncCollectionPause;
 217   friend class VMStructs;
 218   friend class MutatorAllocRegion;
 219   friend class SurvivorGCAllocRegion;
 220   friend class OldGCAllocRegion;
 221 
 222   // Closures used in implementation.
 223   template <G1Barrier barrier, G1Mark do_mark_object>
 224   friend class G1ParCopyClosure;
 225   friend class G1IsAliveClosure;
 226   friend class G1EvacuateFollowersClosure;
 227   friend class G1ParScanThreadState;
 228   friend class G1ParScanClosureSuper;
 229   friend class G1ParEvacuateFollowersClosure;
 230   friend class G1ParTask;
 231   friend class G1FreeGarbageRegionClosure;
 232   friend class RefineCardTableEntryClosure;
 233   friend class G1PrepareCompactClosure;
 234   friend class RegionSorter;
 235   friend class RegionResetter;
 236   friend class CountRCClosure;
 237   friend class EvacPopObjClosure;
 238   friend class G1ParCleanupCTTask;
 239 

 240   // Other related classes.
 241   friend class G1MarkSweep;
 242 
 243 private:
 244   // The one and only G1CollectedHeap, so static functions can find it.
 245   static G1CollectedHeap* _g1h;
 246 
 247   static size_t _humongous_object_threshold_in_words;
 248 
 249   // Storage for the G1 heap.
 250   VirtualSpace _g1_storage;
 251   MemRegion    _g1_reserved;
 252 
 253   // The part of _g1_storage that is currently committed.
 254   MemRegion _g1_committed;
 255 
 256   // The master free list. It will satisfy all new region allocations.
 257   FreeRegionList _free_list;
 258 
 259   // The secondary free list which contains regions that have been
 260   // freed up during the cleanup process. This will be appended to the
 261   // master free list when appropriate.
 262   FreeRegionList _secondary_free_list;
 263 
 264   // It keeps track of the old regions.
 265   HeapRegionSet _old_set;
 266 
 267   // It keeps track of the humongous regions.
 268   HeapRegionSet _humongous_set;
 269 



 270   // The number of regions we could create by expansion.
 271   uint _expansion_regions;
 272 
 273   // The block offset table for the G1 heap.
 274   G1BlockOffsetSharedArray* _bot_shared;
 275 
 276   // Tears down the region sets / lists so that they are empty and the
 277   // regions on the heap do not belong to a region set / list. The
 278   // only exception is the humongous set which we leave unaltered. If
 279   // free_list_only is true, it will only tear down the master free
 280   // list. It is called before a Full GC (free_list_only == false) or
 281   // before heap shrinking (free_list_only == true).
 282   void tear_down_region_sets(bool free_list_only);
 283 
 284   // Rebuilds the region sets / lists so that they are repopulated to
 285   // reflect the contents of the heap. The only exception is the
 286   // humongous set which was not torn down in the first place. If
 287   // free_list_only is true, it will only rebuild the master free
 288   // list. It is called after a Full GC (free_list_only == false) or
 289   // after heap shrinking (free_list_only == true).


 355 
 356   // It does any cleanup that needs to be done on the GC alloc regions
 357   // before a Full GC.
 358   void abandon_gc_alloc_regions();
 359 
 360   // Helper for monitoring and management support.
 361   G1MonitoringSupport* _g1mm;
 362 
 363   // Determines PLAB size for a particular allocation purpose.
 364   size_t desired_plab_sz(GCAllocPurpose purpose);
 365 
 366   // Outside of GC pauses, the number of bytes used in all regions other
 367   // than the current allocation region.
 368   size_t _summary_bytes_used;
 369 
 370   // This array is used for a quick test on whether a reference points into
 371   // the collection set or not. Each of the array's elements denotes whether the
 372   // corresponding region is in the collection set or not.
 373   G1FastCSetBiasedMappedArray _in_cset_fast_test;
 374 




















 375   volatile unsigned _gc_time_stamp;
 376 
 377   size_t* _surviving_young_words;
 378 
 379   G1HRPrinter _hr_printer;
 380 
 381   void setup_surviving_young_words();
 382   void update_surviving_young_words(size_t* surv_young_words);
 383   void cleanup_surviving_young_words();
 384 
 385   // It decides whether an explicit GC should start a concurrent cycle
 386   // instead of doing a STW GC. Currently, a concurrent cycle is
 387   // explicitly started if:
 388   // (a) cause == _gc_locker and +GCLockerInvokesConcurrent, or
 389   // (b) cause == _java_lang_system_gc and +ExplicitGCInvokesConcurrent.
 390   // (c) cause == _g1_humongous_allocation
 391   bool should_do_concurrent_full_gc(GCCause::Cause cause);
 392 
 393   // Keeps track of how many "old marking cycles" (i.e., Full GCs or
 394   // concurrent cycles) we have started.


 673   // after processing.
 674   void enqueue_discovered_references(uint no_of_gc_workers);
 675 
 676 public:
 677 
 678   G1MonitoringSupport* g1mm() {
 679     assert(_g1mm != NULL, "should have been initialized");
 680     return _g1mm;
 681   }
 682 
 683   // Expand the garbage-first heap by at least the given size (in bytes!).
 684   // Returns true if the heap was expanded by the requested amount;
 685   // false otherwise.
 686   // (Rounds up to a HeapRegion boundary.)
 687   bool expand(size_t expand_bytes);
 688 
 689   // Do anything common to GC's.
 690   virtual void gc_prologue(bool full);
 691   virtual void gc_epilogue(bool full);
 692 














 693   // We register a region with the fast "in collection set" test. We
 694   // simply set to true the array slot corresponding to this region.
 695   void register_region_with_in_cset_fast_test(HeapRegion* r) {
 696     _in_cset_fast_test.set_by_index(r->hrs_index(), true);
 697   }
 698 
 699   // This is a fast test on whether a reference points into the
 700   // collection set or not. Assume that the reference
 701   // points into the heap.
 702   inline bool in_cset_fast_test(oop obj);
 703 
 704   void clear_cset_fast_test() {
 705     _in_cset_fast_test.clear();
 706   }
 707 
 708   // This is called at the start of either a concurrent cycle or a Full
 709   // GC to update the number of old marking cycles started.
 710   void increment_old_marking_cycles_started();
 711 
 712   // This is called at the end of either a concurrent cycle or a Full
 713   // GC to update the number of old marking cycles completed. Those two
 714   // can happen in a nested fashion, i.e., we start a concurrent
 715   // cycle, a Full GC happens half-way through it which ends first,
 716   // and then the cycle notices that a Full GC happened and ends


1266 
1267   // Perform a collection of the heap; intended for use in implementing
1268   // "System.gc".  This probably implies as full a collection as the
1269   // "CollectedHeap" supports.
1270   virtual void collect(GCCause::Cause cause);
1271 
1272   // The same as above but assume that the caller holds the Heap_lock.
1273   void collect_locked(GCCause::Cause cause);
1274 
1275   // True iff an evacuation has failed in the most-recent collection.
1276   bool evacuation_failed() { return _evacuation_failed; }
1277 
1278   void remove_from_old_sets(const HeapRegionSetCount& old_regions_removed, const HeapRegionSetCount& humongous_regions_removed);
1279   void prepend_to_freelist(FreeRegionList* list);
1280   void decrement_summary_bytes(size_t bytes);
1281 
1282   // Returns "TRUE" iff "p" points into the committed areas of the heap.
1283   virtual bool is_in(const void* p) const;
1284 
1285   // Return "TRUE" iff the given object address is within the collection
1286   // set.
1287   inline bool obj_in_cs(oop obj);
1288 






1289   // Return "TRUE" iff the given object address is in the reserved
1290   // region of g1.
1291   bool is_in_g1_reserved(const void* p) const {
1292     return _g1_reserved.contains(p);
1293   }
1294 
1295   // Returns a MemRegion that corresponds to the space that has been
1296   // reserved for the heap
1297   MemRegion g1_reserved() {
1298     return _g1_reserved;
1299   }
1300 
1301   // Returns a MemRegion that corresponds to the space that has been
1302   // committed in the heap
1303   MemRegion g1_committed() {
1304     return _g1_committed;
1305   }
1306 
1307   virtual bool is_in_closed_subset(const void* p) const;
1308 


1323   // Same as above, restricted to a memory region.
1324   void oop_iterate(MemRegion mr, ExtendedOopClosure* cl);
1325 
1326   // Iterate over all objects, calling "cl.do_object" on each.
1327   virtual void object_iterate(ObjectClosure* cl);
1328 
1329   virtual void safe_object_iterate(ObjectClosure* cl) {
1330     object_iterate(cl);
1331   }
1332 
1333   // Iterate over all spaces in use in the heap, in ascending address order.
1334   virtual void space_iterate(SpaceClosure* cl);
1335 
1336   // Iterate over heap regions, in address order, terminating the
1337   // iteration early if the "doHeapRegion" method returns "true".
1338   void heap_region_iterate(HeapRegionClosure* blk) const;
1339 
1340   // Return the region with the given index. It assumes the index is valid.
1341   inline HeapRegion* region_at(uint index) const;
1342 




1343   // Divide the heap region sequence into "chunks" of some size (the number
1344   // of regions divided by the number of parallel threads times some
1345   // overpartition factor, currently 4).  Assumes that this will be called
1346   // in parallel by ParallelGCThreads worker threads with distinct worker
1347   // ids in the range [0..max(ParallelGCThreads-1, 1)], that all parallel
1348   // calls will use the same "claim_value", and that that claim value is
1349   // different from the claim_value of any heap region before the start of
1350   // the iteration.  Applies "blk->doHeapRegion" to each of the regions, by
1351   // attempting to claim the first region in each chunk, and, if
1352   // successful, applying the closure to each region in the chunk (and
1353   // setting the claim value of the second and subsequent regions of the
1354   // chunk.)  For now requires that "doHeapRegion" always returns "false",
1355   // i.e., that a closure never attempt to abort a traversal.
1356   void heap_region_par_iterate_chunked(HeapRegionClosure* blk,
1357                                        uint worker,
1358                                        uint no_of_par_workers,
1359                                        jint claim_value);
1360 
1361   // It resets all the region claim values to the default.
1362   void reset_heap_region_claim_values();




 181   virtual HeapRegion* allocate_new_region(size_t word_size, bool force);
 182   virtual void retire_region(HeapRegion* alloc_region, size_t allocated_bytes);
 183 public:
 184   OldGCAllocRegion()
 185   : G1AllocRegion("Old GC Alloc Region", true /* bot_updates */) { }
 186 };
 187 
 188 // The G1 STW is alive closure.
 189 // An instance is embedded into the G1CH and used as the
 190 // (optional) _is_alive_non_header closure in the STW
 191 // reference processor. It is also extensively used during
 192 // reference processing during STW evacuation pauses.
 193 class G1STWIsAliveClosure: public BoolObjectClosure {
 194   G1CollectedHeap* _g1;
 195 public:
 196   G1STWIsAliveClosure(G1CollectedHeap* g1) : _g1(g1) {}
 197   bool do_object_b(oop p);
 198 };
 199 
 200 // Instances of this class are used for quick tests on whether a reference points
 201 // into the collection set or is a humongous object (points into a humongous
 202 // object).
 203 // Each of the array's elements denotes whether the corresponding region is in
 204 // the collection set or a humongous region.
 205 // We use this to quickly reclaim humongous objects:  by making a humongous region
 206 // succeed this test, we sort-of add it to the collection set which objects are
 207 // supposed to be evacuated. However, since the region is humongous, evacuation
 208 // will automatically fail the test to allocate it into a PLAB. We catch this
 209 // condition (in this slow-path), and mark that region as "live" in a side table.
 210 // At the end of GC, we use this information, among other, to determine whether
 211 // we can reclaim the humongous object or not.
 212 class G1FastCSetBiasedMappedArray : public G1BiasedMappedArray<char> {
 213  private:
 214   enum {
 215    InNeither,           // neither in collection set nor humongous
 216    InCSet,              // region is in collection set only
 217    IsHumongous          // region is a humongous start region
 218   };
 219  protected:
 220   char default_value() const { return InNeither; }
 221  public:
 222   void set_humongous(uintptr_t index) { assert(get_by_index(index) != InCSet, "Should not overwrite InCSet values"); set_by_index(index, IsHumongous); }
 223   void clear_humongous(uintptr_t index) {
 224     set_by_index(index, InNeither);
 225   }
 226   void set_in_cset(uintptr_t index) { assert(get_by_index(index) != IsHumongous, "Should not overwrite InCSetOrHumongous value"); set_by_index(index, InCSet); }
 227 
 228   bool is_in_cset_or_humongous(HeapWord* addr) const { return get_by_address(addr) != InNeither; }
 229   bool is_in_cset_and_humongous(HeapWord* addr) const { return get_by_address(addr) == IsHumongous; }
 230   bool is_in_cset(HeapWord* addr) const { return get_by_address(addr) == InCSet; }
 231   void clear() { G1BiasedMappedArray<char>::clear(); }
 232 };
 233 
 234 class RefineCardTableEntryClosure;
 235 
 236 class G1CollectedHeap : public SharedHeap {
 237   friend class VM_CollectForMetadataAllocation;
 238   friend class VM_G1CollectForAllocation;
 239   friend class VM_G1CollectFull;
 240   friend class VM_G1IncCollectionPause;
 241   friend class VMStructs;
 242   friend class MutatorAllocRegion;
 243   friend class SurvivorGCAllocRegion;
 244   friend class OldGCAllocRegion;
 245 
 246   // Closures used in implementation.
 247   template <G1Barrier barrier, G1Mark do_mark_object>
 248   friend class G1ParCopyClosure;
 249   friend class G1IsAliveClosure;
 250   friend class G1EvacuateFollowersClosure;
 251   friend class G1ParScanThreadState;
 252   friend class G1ParScanClosureSuper;
 253   friend class G1ParEvacuateFollowersClosure;
 254   friend class G1ParTask;
 255   friend class G1FreeGarbageRegionClosure;
 256   friend class RefineCardTableEntryClosure;
 257   friend class G1PrepareCompactClosure;
 258   friend class RegionSorter;
 259   friend class RegionResetter;
 260   friend class CountRCClosure;
 261   friend class EvacPopObjClosure;
 262   friend class G1ParCleanupCTTask;
 263 
 264   friend class G1FreeHumongousRegionClosure;
 265   // Other related classes.
 266   friend class G1MarkSweep;
 267 
 268 private:
 269   // The one and only G1CollectedHeap, so static functions can find it.
 270   static G1CollectedHeap* _g1h;
 271 
 272   static size_t _humongous_object_threshold_in_words;
 273 
 274   // Storage for the G1 heap.
 275   VirtualSpace _g1_storage;
 276   MemRegion    _g1_reserved;
 277 
 278   // The part of _g1_storage that is currently committed.
 279   MemRegion _g1_committed;
 280 
 281   // The master free list. It will satisfy all new region allocations.
 282   FreeRegionList _free_list;
 283 
 284   // The secondary free list which contains regions that have been
 285   // freed up during the cleanup process. This will be appended to the
 286   // master free list when appropriate.
 287   FreeRegionList _secondary_free_list;
 288 
 289   // It keeps track of the old regions.
 290   HeapRegionSet _old_set;
 291 
 292   // It keeps track of the humongous regions.
 293   HeapRegionSet _humongous_set;
 294 
 295   void clear_humongous_is_live_table();
 296   void eagerly_reclaim_humongous_regions();
 297 
 298   // The number of regions we could create by expansion.
 299   uint _expansion_regions;
 300 
 301   // The block offset table for the G1 heap.
 302   G1BlockOffsetSharedArray* _bot_shared;
 303 
 304   // Tears down the region sets / lists so that they are empty and the
 305   // regions on the heap do not belong to a region set / list. The
 306   // only exception is the humongous set which we leave unaltered. If
 307   // free_list_only is true, it will only tear down the master free
 308   // list. It is called before a Full GC (free_list_only == false) or
 309   // before heap shrinking (free_list_only == true).
 310   void tear_down_region_sets(bool free_list_only);
 311 
 312   // Rebuilds the region sets / lists so that they are repopulated to
 313   // reflect the contents of the heap. The only exception is the
 314   // humongous set which was not torn down in the first place. If
 315   // free_list_only is true, it will only rebuild the master free
 316   // list. It is called after a Full GC (free_list_only == false) or
 317   // after heap shrinking (free_list_only == true).


 383 
 384   // It does any cleanup that needs to be done on the GC alloc regions
 385   // before a Full GC.
 386   void abandon_gc_alloc_regions();
 387 
 388   // Helper for monitoring and management support.
 389   G1MonitoringSupport* _g1mm;
 390 
 391   // Determines PLAB size for a particular allocation purpose.
 392   size_t desired_plab_sz(GCAllocPurpose purpose);
 393 
 394   // Outside of GC pauses, the number of bytes used in all regions other
 395   // than the current allocation region.
 396   size_t _summary_bytes_used;
 397 
 398   // This array is used for a quick test on whether a reference points into
 399   // the collection set or not. Each of the array's elements denotes whether the
 400   // corresponding region is in the collection set or not.
 401   G1FastCSetBiasedMappedArray _in_cset_fast_test;
 402 
 403   // Records whether the region at the given index is kept live by roots or
 404   // references from the young generation.
 405   class HumongousIsLiveBiasedMappedArray : public G1BiasedMappedArray<bool> {
 406    protected:
 407     bool default_value() const { return false; }
 408    public:
 409     void clear() { G1BiasedMappedArray<bool>::clear(); }
 410     void set_live(uint region) {
 411       set_by_index(region, true);
 412     }
 413     bool is_live(uint region) {
 414       return get_by_index(region);
 415     }
 416   };
 417 
 418   HumongousIsLiveBiasedMappedArray _humongous_is_live;
 419   // Stores whether during humongous object registration we found candidate regions.
 420   // If not, we can skip a few steps.
 421   bool _has_humongous_reclaim_candidates;
 422 
 423   volatile unsigned _gc_time_stamp;
 424 
 425   size_t* _surviving_young_words;
 426 
 427   G1HRPrinter _hr_printer;
 428 
 429   void setup_surviving_young_words();
 430   void update_surviving_young_words(size_t* surv_young_words);
 431   void cleanup_surviving_young_words();
 432 
 433   // It decides whether an explicit GC should start a concurrent cycle
 434   // instead of doing a STW GC. Currently, a concurrent cycle is
 435   // explicitly started if:
 436   // (a) cause == _gc_locker and +GCLockerInvokesConcurrent, or
 437   // (b) cause == _java_lang_system_gc and +ExplicitGCInvokesConcurrent.
 438   // (c) cause == _g1_humongous_allocation
 439   bool should_do_concurrent_full_gc(GCCause::Cause cause);
 440 
 441   // Keeps track of how many "old marking cycles" (i.e., Full GCs or
 442   // concurrent cycles) we have started.


 721   // after processing.
 722   void enqueue_discovered_references(uint no_of_gc_workers);
 723 
 724 public:
 725 
 726   G1MonitoringSupport* g1mm() {
 727     assert(_g1mm != NULL, "should have been initialized");
 728     return _g1mm;
 729   }
 730 
 731   // Expand the garbage-first heap by at least the given size (in bytes!).
 732   // Returns true if the heap was expanded by the requested amount;
 733   // false otherwise.
 734   // (Rounds up to a HeapRegion boundary.)
 735   bool expand(size_t expand_bytes);
 736 
 737   // Do anything common to GC's.
 738   virtual void gc_prologue(bool full);
 739   virtual void gc_epilogue(bool full);
 740 
 741   inline void set_humongous_is_live(oop obj);
 742 
 743   bool humongous_is_live(uint region) {
 744     return _humongous_is_live.is_live(region);
 745   }
 746 
 747   // Returns whether the given region (which must be a humongous (start) region)
 748   // is to be considered conservatively live regardless of any other conditions.
 749   bool humongous_region_is_always_live(HeapRegion* region);
 750   // Register the given region to be part of the collection set.
 751   inline void register_humongous_region_with_in_cset_fast_test(uint index);
 752   // Register regions with humongous objects (actually on the start region) in
 753   // the in_cset_fast_test table.
 754   void register_humongous_regions_with_in_cset_fast_test();
 755   // We register a region with the fast "in collection set" test. We
 756   // simply set to true the array slot corresponding to this region.
 757   void register_region_with_in_cset_fast_test(HeapRegion* r) {
 758     _in_cset_fast_test.set_in_cset(r->hrs_index());
 759   }
 760 
 761   // This is a fast test on whether a reference points into the
 762   // collection set or not. Assume that the reference
 763   // points into the heap.
 764   inline bool in_cset_fast_test(oop obj);
 765 
 766   void clear_cset_fast_test() {
 767     _in_cset_fast_test.clear();
 768   }
 769 
 770   // This is called at the start of either a concurrent cycle or a Full
 771   // GC to update the number of old marking cycles started.
 772   void increment_old_marking_cycles_started();
 773 
 774   // This is called at the end of either a concurrent cycle or a Full
 775   // GC to update the number of old marking cycles completed. Those two
 776   // can happen in a nested fashion, i.e., we start a concurrent
 777   // cycle, a Full GC happens half-way through it which ends first,
 778   // and then the cycle notices that a Full GC happened and ends


1328 
1329   // Perform a collection of the heap; intended for use in implementing
1330   // "System.gc".  This probably implies as full a collection as the
1331   // "CollectedHeap" supports.
1332   virtual void collect(GCCause::Cause cause);
1333 
1334   // The same as above but assume that the caller holds the Heap_lock.
1335   void collect_locked(GCCause::Cause cause);
1336 
1337   // True iff an evacuation has failed in the most-recent collection.
1338   bool evacuation_failed() { return _evacuation_failed; }
1339 
1340   void remove_from_old_sets(const HeapRegionSetCount& old_regions_removed, const HeapRegionSetCount& humongous_regions_removed);
1341   void prepend_to_freelist(FreeRegionList* list);
1342   void decrement_summary_bytes(size_t bytes);
1343 
1344   // Returns "TRUE" iff "p" points into the committed areas of the heap.
1345   virtual bool is_in(const void* p) const;
1346 
1347   // Return "TRUE" iff the given object address is within the collection
1348   // set. Slow implementation.
1349   inline bool obj_in_cs(oop obj);
1350 
1351   inline bool is_in_cset(oop obj);
1352 
1353   inline bool is_in_cset_or_humongous(const oop obj);
1354 
1355   inline bool is_in_cset_and_humongous(const oop obj);
1356 
1357   // Return "TRUE" iff the given object address is in the reserved
1358   // region of g1.
1359   bool is_in_g1_reserved(const void* p) const {
1360     return _g1_reserved.contains(p);
1361   }
1362 
1363   // Returns a MemRegion that corresponds to the space that has been
1364   // reserved for the heap
1365   MemRegion g1_reserved() {
1366     return _g1_reserved;
1367   }
1368 
1369   // Returns a MemRegion that corresponds to the space that has been
1370   // committed in the heap
1371   MemRegion g1_committed() {
1372     return _g1_committed;
1373   }
1374 
1375   virtual bool is_in_closed_subset(const void* p) const;
1376 


1391   // Same as above, restricted to a memory region.
1392   void oop_iterate(MemRegion mr, ExtendedOopClosure* cl);
1393 
1394   // Iterate over all objects, calling "cl.do_object" on each.
1395   virtual void object_iterate(ObjectClosure* cl);
1396 
1397   virtual void safe_object_iterate(ObjectClosure* cl) {
1398     object_iterate(cl);
1399   }
1400 
1401   // Iterate over all spaces in use in the heap, in ascending address order.
1402   virtual void space_iterate(SpaceClosure* cl);
1403 
1404   // Iterate over heap regions, in address order, terminating the
1405   // iteration early if the "doHeapRegion" method returns "true".
1406   void heap_region_iterate(HeapRegionClosure* blk) const;
1407 
1408   // Return the region with the given index. It assumes the index is valid.
1409   inline HeapRegion* region_at(uint index) const;
1410 
1411   // Calculate the region index of the given address. Given address must be
1412   // within the heap.
1413   inline uint addr_to_region(HeapWord* addr) const;
1414 
1415   // Divide the heap region sequence into "chunks" of some size (the number
1416   // of regions divided by the number of parallel threads times some
1417   // overpartition factor, currently 4).  Assumes that this will be called
1418   // in parallel by ParallelGCThreads worker threads with distinct worker
1419   // ids in the range [0..max(ParallelGCThreads-1, 1)], that all parallel
1420   // calls will use the same "claim_value", and that that claim value is
1421   // different from the claim_value of any heap region before the start of
1422   // the iteration.  Applies "blk->doHeapRegion" to each of the regions, by
1423   // attempting to claim the first region in each chunk, and, if
1424   // successful, applying the closure to each region in the chunk (and
1425   // setting the claim value of the second and subsequent regions of the
1426   // chunk.)  For now requires that "doHeapRegion" always returns "false",
1427   // i.e., that a closure never attempt to abort a traversal.
1428   void heap_region_par_iterate_chunked(HeapRegionClosure* blk,
1429                                        uint worker,
1430                                        uint no_of_par_workers,
1431                                        jint claim_value);
1432 
1433   // It resets all the region claim values to the default.
1434   void reset_heap_region_claim_values();