src/share/vm/gc_implementation/g1/g1CollectedHeap.hpp

Print this page
rev 3463 : 7114678: G1: various small fixes, code cleanup, and refactoring
Summary: Various cleanups as a prelude to introducing iterators for HeapRegions.
Reviewed-by: johnc
Contributed-by: tonyp


 358   // (b) cause == _java_lang_system_gc and +ExplicitGCInvokesConcurrent.
 359   // (c) cause == _g1_humongous_allocation
 360   bool should_do_concurrent_full_gc(GCCause::Cause cause);
 361 
 362   // Keeps track of how many "old marking cycles" (i.e., Full GCs or
 363   // concurrent cycles) we have started.
 364   volatile unsigned int _old_marking_cycles_started;
 365 
 366   // Keeps track of how many "old marking cycles" (i.e., Full GCs or
 367   // concurrent cycles) we have completed.
 368   volatile unsigned int _old_marking_cycles_completed;
 369 
 370   // This is a non-product method that is helpful for testing. It is
 371   // called at the end of a GC and artificially expands the heap by
 372   // allocating a number of dead regions. This way we can induce very
 373   // frequent marking cycles and stress the cleanup / concurrent
 374   // cleanup code more (as all the regions that will be allocated by
 375   // this method will be found dead by the marking cycle).
 376   void allocate_dummy_regions() PRODUCT_RETURN;
 377 







 378   // These are macros so that, if the assert fires, we get the correct
 379   // line number, file, etc.
 380 
 381 #define heap_locking_asserts_err_msg(_extra_message_)                         \
 382   err_msg("%s : Heap_lock locked: %s, at safepoint: %s, is VM thread: %s",    \
 383           (_extra_message_),                                                  \
 384           BOOL_TO_STR(Heap_lock->owned_by_self()),                            \
 385           BOOL_TO_STR(SafepointSynchronize::is_at_safepoint()),               \
 386           BOOL_TO_STR(Thread::current()->is_VM_thread()))
 387 
 388 #define assert_heap_locked()                                                  \
 389   do {                                                                        \
 390     assert(Heap_lock->owned_by_self(),                                        \
 391            heap_locking_asserts_err_msg("should be holding the Heap_lock"));  \
 392   } while (0)
 393 
 394 #define assert_heap_locked_or_at_safepoint(_should_be_vm_thread_)             \
 395   do {                                                                        \
 396     assert(Heap_lock->owned_by_self() ||                                      \
 397            (SafepointSynchronize::is_at_safepoint() &&                        \


1044   HeapRegionRemSetIterator* rem_set_iterator(int i) {
1045     return _rem_set_iterator[i];
1046   }
1047 
1048   HeapRegionRemSetIterator* rem_set_iterator() {
1049     return _rem_set_iterator[0];
1050   }
1051 
1052   unsigned get_gc_time_stamp() {
1053     return _gc_time_stamp;
1054   }
1055 
1056   void reset_gc_time_stamp() {
1057     _gc_time_stamp = 0;
1058     OrderAccess::fence();
1059     // Clear the cached CSet starting regions and time stamps.
1060     // Their validity is dependent on the GC timestamp.
1061     clear_cset_start_regions();
1062   }
1063 


1064   void increment_gc_time_stamp() {
1065     ++_gc_time_stamp;
1066     OrderAccess::fence();
1067   }
1068 





1069   void iterate_dirty_card_closure(CardTableEntryClosure* cl,
1070                                   DirtyCardQueue* into_cset_dcq,
1071                                   bool concurrent, int worker_i);
1072 
1073   // The shared block offset table array.
1074   G1BlockOffsetSharedArray* bot_shared() const { return _bot_shared; }
1075 
1076   // Reference Processing accessors
1077 
1078   // The STW reference processor....
1079   ReferenceProcessor* ref_processor_stw() const { return _ref_processor_stw; }
1080 
1081   // The Concurent Marking reference processor...
1082   ReferenceProcessor* ref_processor_cm() const { return _ref_processor_cm; }
1083 
1084   virtual size_t capacity() const;
1085   virtual size_t used() const;
1086   // This should be called when we're not holding the heap lock. The
1087   // result might be a bit inaccurate.
1088   size_t used_unlocked() const;


1285   virtual void object_iterate(ObjectClosure* cl) {
1286     object_iterate(cl, true);
1287   }
1288   virtual void safe_object_iterate(ObjectClosure* cl) {
1289     object_iterate(cl, true);
1290   }
1291   void object_iterate(ObjectClosure* cl, bool do_perm);
1292 
1293   // Iterate over all objects allocated since the last collection, calling
1294   // "cl.do_object" on each.  The heap must have been initialized properly
1295   // to support this function, or else this call will fail.
1296   virtual void object_iterate_since_last_GC(ObjectClosure* cl);
1297 
1298   // Iterate over all spaces in use in the heap, in ascending address order.
1299   virtual void space_iterate(SpaceClosure* cl);
1300 
1301   // Iterate over heap regions, in address order, terminating the
1302   // iteration early if the "doHeapRegion" method returns "true".
1303   void heap_region_iterate(HeapRegionClosure* blk) const;
1304 
1305   // Iterate over heap regions starting with r (or the first region if "r"
1306   // is NULL), in address order, terminating early if the "doHeapRegion"
1307   // method returns "true".
1308   void heap_region_iterate_from(HeapRegion* r, HeapRegionClosure* blk) const;
1309 
1310   // Return the region with the given index. It assumes the index is valid.
1311   HeapRegion* region_at(uint index) const { return _hrs.at(index); }
1312 
1313   // Divide the heap region sequence into "chunks" of some size (the number
1314   // of regions divided by the number of parallel threads times some
1315   // overpartition factor, currently 4).  Assumes that this will be called
1316   // in parallel by ParallelGCThreads worker threads with discinct worker
1317   // ids in the range [0..max(ParallelGCThreads-1, 1)], that all parallel
1318   // calls will use the same "claim_value", and that that claim value is
1319   // different from the claim_value of any heap region before the start of
1320   // the iteration.  Applies "blk->doHeapRegion" to each of the regions, by
1321   // attempting to claim the first region in each chunk, and, if
1322   // successful, applying the closure to each region in the chunk (and
1323   // setting the claim value of the second and subsequent regions of the
1324   // chunk.)  For now requires that "doHeapRegion" always returns "false",
1325   // i.e., that a closure never attempt to abort a traversal.
1326   void heap_region_par_iterate_chunked(HeapRegionClosure* blk,
1327                                        uint worker,
1328                                        uint no_of_par_workers,
1329                                        jint claim_value);


1334   // Resets the claim values of regions in the current
1335   // collection set to the default.
1336   void reset_cset_heap_region_claim_values();
1337 
1338 #ifdef ASSERT
1339   bool check_heap_region_claim_values(jint claim_value);
1340 
1341   // Same as the routine above but only checks regions in the
1342   // current collection set.
1343   bool check_cset_heap_region_claim_values(jint claim_value);
1344 #endif // ASSERT
1345 
1346   // Clear the cached cset start regions and (more importantly)
1347   // the time stamps. Called when we reset the GC time stamp.
1348   void clear_cset_start_regions();
1349 
1350   // Given the id of a worker, obtain or calculate a suitable
1351   // starting region for iterating over the current collection set.
1352   HeapRegion* start_cset_region_for_worker(int worker_i);
1353 





1354   // Iterate over the regions (if any) in the current collection set.
1355   void collection_set_iterate(HeapRegionClosure* blk);
1356 
1357   // As above but starting from region r
1358   void collection_set_iterate_from(HeapRegion* r, HeapRegionClosure *blk);
1359 
1360   // Returns the first (lowest address) compactible space in the heap.
1361   virtual CompactibleSpace* first_compactible_space();
1362 
1363   // A CollectedHeap will contain some number of spaces.  This finds the
1364   // space containing a given address, or else returns NULL.
1365   virtual Space* space_containing(const void* addr) const;
1366 
1367   // A G1CollectedHeap will contain some number of heap regions.  This
1368   // finds the region containing a given address, or else returns NULL.
1369   template <class T>
1370   inline HeapRegion* heap_region_containing(const T addr) const;
1371 
1372   // Like the above, but requires "addr" to be in the heap (to avoid a
1373   // null-check), and unlike the above, may return an continuing humongous


1541   bool check_young_list_well_formed() {
1542     return _young_list->check_list_well_formed();
1543   }
1544 
1545   bool check_young_list_empty(bool check_heap,
1546                               bool check_sample = true);
1547 
1548   // *** Stuff related to concurrent marking.  It's not clear to me that so
1549   // many of these need to be public.
1550 
1551   // The functions below are helper functions that a subclass of
1552   // "CollectedHeap" can use in the implementation of its virtual
1553   // functions.
1554   // This performs a concurrent marking of the live objects in a
1555   // bitmap off to the side.
1556   void doConcurrentMark();
1557 
1558   bool isMarkedPrev(oop obj) const;
1559   bool isMarkedNext(oop obj) const;
1560 
1561   // vo == UsePrevMarking -> use "prev" marking information,
1562   // vo == UseNextMarking -> use "next" marking information,
1563   // vo == UseMarkWord    -> use mark word from object header
1564   bool is_obj_dead_cond(const oop obj,
1565                         const HeapRegion* hr,
1566                         const VerifyOption vo) const {
1567 
1568     switch (vo) {
1569       case VerifyOption_G1UsePrevMarking:
1570         return is_obj_dead(obj, hr);
1571       case VerifyOption_G1UseNextMarking:
1572         return is_obj_ill(obj, hr);
1573       default:
1574         assert(vo == VerifyOption_G1UseMarkWord, "must be");
1575         return !obj->is_gc_marked();
1576     }
1577   }
1578 
1579   // Determine if an object is dead, given the object and also
1580   // the region to which the object belongs. An object is dead
1581   // iff a) it was not allocated since the last mark and b) it
1582   // is not marked.
1583 
1584   bool is_obj_dead(const oop obj, const HeapRegion* hr) const {
1585     return
1586       !hr->obj_allocated_since_prev_marking(obj) &&
1587       !isMarkedPrev(obj);
1588   }
1589 
1590   // This is used when copying an object to survivor space.
1591   // If the object is marked live, then we mark the copy live.
1592   // If the object is allocated since the start of this mark
1593   // cycle, then we mark the copy live.
1594   // If the object has been around since the previous mark
1595   // phase, and hasn't been marked yet during this phase,
1596   // then we don't mark it, we just wait for the
1597   // current marking cycle to get to it.
1598 
1599   // This function returns true when an object has been
1600   // around since the previous marking and hasn't yet
1601   // been marked during this marking.
1602 
1603   bool is_obj_ill(const oop obj, const HeapRegion* hr) const {
1604     return
1605       !hr->obj_allocated_since_next_marking(obj) &&
1606       !isMarkedNext(obj);
1607   }
1608 
1609   // Determine if an object is dead, given only the object itself.
1610   // This will find the region to which the object belongs and
1611   // then call the region version of the same function.
1612 
1613   // Added if it is in permanent gen it isn't dead.
1614   // Added if it is NULL it isn't dead.
1615 
1616   // vo == UsePrevMarking -> use "prev" marking information,
1617   // vo == UseNextMarking -> use "next" marking information,
1618   // vo == UseMarkWord    -> use mark word from object header
1619   bool is_obj_dead_cond(const oop obj,
1620                         const VerifyOption vo) const {
1621 
1622     switch (vo) {
1623       case VerifyOption_G1UsePrevMarking:
1624         return is_obj_dead(obj);
1625       case VerifyOption_G1UseNextMarking:
1626         return is_obj_ill(obj);
1627       default:
1628         assert(vo == VerifyOption_G1UseMarkWord, "must be");
1629         return !obj->is_gc_marked();
1630     }
1631   }
1632 
1633   bool is_obj_dead(const oop obj) const {
1634     const HeapRegion* hr = heap_region_containing(obj);
1635     if (hr == NULL) {
1636       if (Universe::heap()->is_in_permanent(obj))
1637         return false;
1638       else if (obj == NULL) return false;
1639       else return true;
1640     }
1641     else return is_obj_dead(obj, hr);
1642   }
1643 
1644   bool is_obj_ill(const oop obj) const {
1645     const HeapRegion* hr = heap_region_containing(obj);
1646     if (hr == NULL) {
1647       if (Universe::heap()->is_in_permanent(obj))
1648         return false;
1649       else if (obj == NULL) return false;
1650       else return true;
1651     }
1652     else return is_obj_ill(obj, hr);
1653   }
1654 




































1655   // The following is just to alert the verification code
1656   // that a full collection has occurred and that the
1657   // remembered sets are no longer up to date.
1658   bool _full_collection;
1659   void set_full_collection() { _full_collection = true;}
1660   void clear_full_collection() {_full_collection = false;}
1661   bool full_collection() {return _full_collection;}
1662 
1663   ConcurrentMark* concurrent_mark() const { return _cm; }
1664   ConcurrentG1Refine* concurrent_g1_refine() const { return _cg1r; }
1665 
1666   // The dirty cards region list is used to record a subset of regions
1667   // whose cards need clearing. The list if populated during the
1668   // remembered set scanning and drained during the card table
1669   // cleanup. Although the methods are reentrant, population/draining
1670   // phases must not overlap. For synchronization purposes the last
1671   // element on the list points to itself.
1672   HeapRegion* _dirty_cards_region_list;
1673   void push_dirty_cards_region(HeapRegion* hr);
1674   HeapRegion* pop_dirty_cards_region();




 358   // (b) cause == _java_lang_system_gc and +ExplicitGCInvokesConcurrent.
 359   // (c) cause == _g1_humongous_allocation
 360   bool should_do_concurrent_full_gc(GCCause::Cause cause);
 361 
 362   // Keeps track of how many "old marking cycles" (i.e., Full GCs or
 363   // concurrent cycles) we have started.
 364   volatile unsigned int _old_marking_cycles_started;
 365 
 366   // Keeps track of how many "old marking cycles" (i.e., Full GCs or
 367   // concurrent cycles) we have completed.
 368   volatile unsigned int _old_marking_cycles_completed;
 369 
 370   // This is a non-product method that is helpful for testing. It is
 371   // called at the end of a GC and artificially expands the heap by
 372   // allocating a number of dead regions. This way we can induce very
 373   // frequent marking cycles and stress the cleanup / concurrent
 374   // cleanup code more (as all the regions that will be allocated by
 375   // this method will be found dead by the marking cycle).
 376   void allocate_dummy_regions() PRODUCT_RETURN;
 377 
 378   // Clear RSets after a compaction. It also resets the GC time stamps.
 379   void clear_rsets_post_compaction();
 380 
 381   // If the HR printer is active, dump the state of the regions in the
 382   // heap after a compaction.
 383   void print_hrs_post_compaction();
 384 
 385   // These are macros so that, if the assert fires, we get the correct
 386   // line number, file, etc.
 387 
 388 #define heap_locking_asserts_err_msg(_extra_message_)                         \
 389   err_msg("%s : Heap_lock locked: %s, at safepoint: %s, is VM thread: %s",    \
 390           (_extra_message_),                                                  \
 391           BOOL_TO_STR(Heap_lock->owned_by_self()),                            \
 392           BOOL_TO_STR(SafepointSynchronize::is_at_safepoint()),               \
 393           BOOL_TO_STR(Thread::current()->is_VM_thread()))
 394 
 395 #define assert_heap_locked()                                                  \
 396   do {                                                                        \
 397     assert(Heap_lock->owned_by_self(),                                        \
 398            heap_locking_asserts_err_msg("should be holding the Heap_lock"));  \
 399   } while (0)
 400 
 401 #define assert_heap_locked_or_at_safepoint(_should_be_vm_thread_)             \
 402   do {                                                                        \
 403     assert(Heap_lock->owned_by_self() ||                                      \
 404            (SafepointSynchronize::is_at_safepoint() &&                        \


1051   HeapRegionRemSetIterator* rem_set_iterator(int i) {
1052     return _rem_set_iterator[i];
1053   }
1054 
1055   HeapRegionRemSetIterator* rem_set_iterator() {
1056     return _rem_set_iterator[0];
1057   }
1058 
1059   unsigned get_gc_time_stamp() {
1060     return _gc_time_stamp;
1061   }
1062 
1063   void reset_gc_time_stamp() {
1064     _gc_time_stamp = 0;
1065     OrderAccess::fence();
1066     // Clear the cached CSet starting regions and time stamps.
1067     // Their validity is dependent on the GC timestamp.
1068     clear_cset_start_regions();
1069   }
1070 
1071   void check_gc_time_stamps() PRODUCT_RETURN;
1072 
1073   void increment_gc_time_stamp() {
1074     ++_gc_time_stamp;
1075     OrderAccess::fence();
1076   }
1077 
1078   // Reset the given region's GC timestamp. If it's starts humongous,
1079   // also reset the GC timestamp of its corresponding
1080   // continues humongous regions too.
1081   void reset_gc_time_stamps(HeapRegion* hr);
1082 
1083   void iterate_dirty_card_closure(CardTableEntryClosure* cl,
1084                                   DirtyCardQueue* into_cset_dcq,
1085                                   bool concurrent, int worker_i);
1086 
1087   // The shared block offset table array.
1088   G1BlockOffsetSharedArray* bot_shared() const { return _bot_shared; }
1089 
1090   // Reference Processing accessors
1091 
1092   // The STW reference processor....
1093   ReferenceProcessor* ref_processor_stw() const { return _ref_processor_stw; }
1094 
1095   // The Concurent Marking reference processor...
1096   ReferenceProcessor* ref_processor_cm() const { return _ref_processor_cm; }
1097 
1098   virtual size_t capacity() const;
1099   virtual size_t used() const;
1100   // This should be called when we're not holding the heap lock. The
1101   // result might be a bit inaccurate.
1102   size_t used_unlocked() const;


1299   virtual void object_iterate(ObjectClosure* cl) {
1300     object_iterate(cl, true);
1301   }
1302   virtual void safe_object_iterate(ObjectClosure* cl) {
1303     object_iterate(cl, true);
1304   }
1305   void object_iterate(ObjectClosure* cl, bool do_perm);
1306 
1307   // Iterate over all objects allocated since the last collection, calling
1308   // "cl.do_object" on each.  The heap must have been initialized properly
1309   // to support this function, or else this call will fail.
1310   virtual void object_iterate_since_last_GC(ObjectClosure* cl);
1311 
1312   // Iterate over all spaces in use in the heap, in ascending address order.
1313   virtual void space_iterate(SpaceClosure* cl);
1314 
1315   // Iterate over heap regions, in address order, terminating the
1316   // iteration early if the "doHeapRegion" method returns "true".
1317   void heap_region_iterate(HeapRegionClosure* blk) const;
1318 





1319   // Return the region with the given index. It assumes the index is valid.
1320   HeapRegion* region_at(uint index) const { return _hrs.at(index); }
1321 
1322   // Divide the heap region sequence into "chunks" of some size (the number
1323   // of regions divided by the number of parallel threads times some
1324   // overpartition factor, currently 4).  Assumes that this will be called
1325   // in parallel by ParallelGCThreads worker threads with discinct worker
1326   // ids in the range [0..max(ParallelGCThreads-1, 1)], that all parallel
1327   // calls will use the same "claim_value", and that that claim value is
1328   // different from the claim_value of any heap region before the start of
1329   // the iteration.  Applies "blk->doHeapRegion" to each of the regions, by
1330   // attempting to claim the first region in each chunk, and, if
1331   // successful, applying the closure to each region in the chunk (and
1332   // setting the claim value of the second and subsequent regions of the
1333   // chunk.)  For now requires that "doHeapRegion" always returns "false",
1334   // i.e., that a closure never attempt to abort a traversal.
1335   void heap_region_par_iterate_chunked(HeapRegionClosure* blk,
1336                                        uint worker,
1337                                        uint no_of_par_workers,
1338                                        jint claim_value);


1343   // Resets the claim values of regions in the current
1344   // collection set to the default.
1345   void reset_cset_heap_region_claim_values();
1346 
1347 #ifdef ASSERT
1348   bool check_heap_region_claim_values(jint claim_value);
1349 
1350   // Same as the routine above but only checks regions in the
1351   // current collection set.
1352   bool check_cset_heap_region_claim_values(jint claim_value);
1353 #endif // ASSERT
1354 
1355   // Clear the cached cset start regions and (more importantly)
1356   // the time stamps. Called when we reset the GC time stamp.
1357   void clear_cset_start_regions();
1358 
1359   // Given the id of a worker, obtain or calculate a suitable
1360   // starting region for iterating over the current collection set.
1361   HeapRegion* start_cset_region_for_worker(int worker_i);
1362 
1363   // This is a convenience method that is used by the
1364   // HeapRegionIterator classes to calculate the starting region for
1365   // each worker so that they do not all start from the same region.
1366   HeapRegion* start_region_for_worker(uint worker_i, uint no_of_par_workers);
1367 
1368   // Iterate over the regions (if any) in the current collection set.
1369   void collection_set_iterate(HeapRegionClosure* blk);
1370 
1371   // As above but starting from region r
1372   void collection_set_iterate_from(HeapRegion* r, HeapRegionClosure *blk);
1373 
1374   // Returns the first (lowest address) compactible space in the heap.
1375   virtual CompactibleSpace* first_compactible_space();
1376 
1377   // A CollectedHeap will contain some number of spaces.  This finds the
1378   // space containing a given address, or else returns NULL.
1379   virtual Space* space_containing(const void* addr) const;
1380 
1381   // A G1CollectedHeap will contain some number of heap regions.  This
1382   // finds the region containing a given address, or else returns NULL.
1383   template <class T>
1384   inline HeapRegion* heap_region_containing(const T addr) const;
1385 
1386   // Like the above, but requires "addr" to be in the heap (to avoid a
1387   // null-check), and unlike the above, may return an continuing humongous


1555   bool check_young_list_well_formed() {
1556     return _young_list->check_list_well_formed();
1557   }
1558 
1559   bool check_young_list_empty(bool check_heap,
1560                               bool check_sample = true);
1561 
1562   // *** Stuff related to concurrent marking.  It's not clear to me that so
1563   // many of these need to be public.
1564 
1565   // The functions below are helper functions that a subclass of
1566   // "CollectedHeap" can use in the implementation of its virtual
1567   // functions.
1568   // This performs a concurrent marking of the live objects in a
1569   // bitmap off to the side.
1570   void doConcurrentMark();
1571 
1572   bool isMarkedPrev(oop obj) const;
1573   bool isMarkedNext(oop obj) const;
1574 


















1575   // Determine if an object is dead, given the object and also
1576   // the region to which the object belongs. An object is dead
1577   // iff a) it was not allocated since the last mark and b) it
1578   // is not marked.
1579 
1580   bool is_obj_dead(const oop obj, const HeapRegion* hr) const {
1581     return
1582       !hr->obj_allocated_since_prev_marking(obj) &&
1583       !isMarkedPrev(obj);
1584   }
1585 









1586   // This function returns true when an object has been
1587   // around since the previous marking and hasn't yet
1588   // been marked during this marking.
1589 
1590   bool is_obj_ill(const oop obj, const HeapRegion* hr) const {
1591     return
1592       !hr->obj_allocated_since_next_marking(obj) &&
1593       !isMarkedNext(obj);
1594   }
1595 
1596   // Determine if an object is dead, given only the object itself.
1597   // This will find the region to which the object belongs and
1598   // then call the region version of the same function.
1599 
1600   // Added if it is in permanent gen it isn't dead.
1601   // Added if it is NULL it isn't dead.
1602 

















1603   bool is_obj_dead(const oop obj) const {
1604     const HeapRegion* hr = heap_region_containing(obj);
1605     if (hr == NULL) {
1606       if (Universe::heap()->is_in_permanent(obj))
1607         return false;
1608       else if (obj == NULL) return false;
1609       else return true;
1610     }
1611     else return is_obj_dead(obj, hr);
1612   }
1613 
1614   bool is_obj_ill(const oop obj) const {
1615     const HeapRegion* hr = heap_region_containing(obj);
1616     if (hr == NULL) {
1617       if (Universe::heap()->is_in_permanent(obj))
1618         return false;
1619       else if (obj == NULL) return false;
1620       else return true;
1621     }
1622     else return is_obj_ill(obj, hr);
1623   }
1624 
1625   // The methods below are here for convenience and dispatch the
1626   // appropriate method depending on value of the given VerifyOption
1627   // parameter. The options for that parameter are:
1628   //
1629   // vo == UsePrevMarking -> use "prev" marking information,
1630   // vo == UseNextMarking -> use "next" marking information,
1631   // vo == UseMarkWord    -> use mark word from object header
1632 
1633   bool is_obj_dead_cond(const oop obj,
1634                         const HeapRegion* hr,
1635                         const VerifyOption vo) const {
1636     switch (vo) {
1637     case VerifyOption_G1UsePrevMarking: return is_obj_dead(obj, hr);
1638     case VerifyOption_G1UseNextMarking: return is_obj_ill(obj, hr);
1639     case VerifyOption_G1UseMarkWord:    return !obj->is_gc_marked();
1640     default:                            ShouldNotReachHere();
1641     }
1642     return false; // keep some compilers happy
1643   }
1644 
1645   bool is_obj_dead_cond(const oop obj,
1646                         const VerifyOption vo) const {
1647     switch (vo) {
1648     case VerifyOption_G1UsePrevMarking: return is_obj_dead(obj);
1649     case VerifyOption_G1UseNextMarking: return is_obj_ill(obj);
1650     case VerifyOption_G1UseMarkWord:    return !obj->is_gc_marked();
1651     default:                            ShouldNotReachHere();
1652     }
1653     return false; // keep some compilers happy
1654   }
1655 
1656   bool allocated_since_marking(oop obj, HeapRegion* hr, VerifyOption vo);
1657   HeapWord* top_at_mark_start(HeapRegion* hr, VerifyOption vo);
1658   bool is_marked(oop obj, VerifyOption vo);
1659   const char* top_at_mark_start_str(VerifyOption vo);
1660 
1661   // The following is just to alert the verification code
1662   // that a full collection has occurred and that the
1663   // remembered sets are no longer up to date.
1664   bool _full_collection;
1665   void set_full_collection() { _full_collection = true;}
1666   void clear_full_collection() {_full_collection = false;}
1667   bool full_collection() {return _full_collection;}
1668 
1669   ConcurrentMark* concurrent_mark() const { return _cm; }
1670   ConcurrentG1Refine* concurrent_g1_refine() const { return _cg1r; }
1671 
1672   // The dirty cards region list is used to record a subset of regions
1673   // whose cards need clearing. The list if populated during the
1674   // remembered set scanning and drained during the card table
1675   // cleanup. Although the methods are reentrant, population/draining
1676   // phases must not overlap. For synchronization purposes the last
1677   // element on the list points to itself.
1678   HeapRegion* _dirty_cards_region_list;
1679   void push_dirty_cards_region(HeapRegion* hr);
1680   HeapRegion* pop_dirty_cards_region();