src/share/vm/gc_implementation/g1/concurrentMark.hpp

Print this page
rev 6802 : imported patch refactor-heapregionseq
rev 6805 : imported patch commit-uncommit-within-heap
rev 6807 : imported patch bengt-suggestions


  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #ifndef SHARE_VM_GC_IMPLEMENTATION_G1_CONCURRENTMARK_HPP
  26 #define SHARE_VM_GC_IMPLEMENTATION_G1_CONCURRENTMARK_HPP
  27 
  28 #include "classfile/javaClasses.hpp"
  29 #include "gc_implementation/g1/heapRegionSet.hpp"

  30 #include "gc_implementation/shared/gcId.hpp"
  31 #include "utilities/taskqueue.hpp"
  32 
  33 class G1CollectedHeap;

  34 class CMTask;
  35 typedef GenericTaskQueue<oop, mtGC>            CMTaskQueue;
  36 typedef GenericTaskQueueSet<CMTaskQueue, mtGC> CMTaskQueueSet;
  37 
  38 // Closure used by CM during concurrent reference discovery
  39 // and reference processing (during remarking) to determine
  40 // if a particular object is alive. It is primarily used
  41 // to determine if referents of discovered reference objects
  42 // are alive. An instance is also embedded into the
  43 // reference processor as the _is_alive_non_header field
  44 class G1CMIsAliveClosure: public BoolObjectClosure {
  45   G1CollectedHeap* _g1;
  46  public:
  47   G1CMIsAliveClosure(G1CollectedHeap* g1) : _g1(g1) { }
  48 
  49   bool do_object_b(oop obj);
  50 };
  51 
  52 // A generic CM bit map.  This is essentially a wrapper around the BitMap
  53 // class, with one bit per (1<<_shifter) HeapWords.
  54 
  55 class CMBitMapRO VALUE_OBJ_CLASS_SPEC {
  56  protected:
  57   HeapWord* _bmStartWord;      // base address of range covered by map
  58   size_t    _bmWordSize;       // map size (in #HeapWords covered)
  59   const int _shifter;          // map to char or bit
  60   VirtualSpace _virtual_space; // underlying the bit map
  61   BitMap    _bm;               // the bit map itself
  62 
  63  public:
  64   // constructor
  65   CMBitMapRO(int shifter);
  66 
  67   enum { do_yield = true };
  68 
  69   // inquiries
  70   HeapWord* startWord()   const { return _bmStartWord; }
  71   size_t    sizeInWords() const { return _bmWordSize;  }
  72   // the following is one past the last word in space
  73   HeapWord* endWord()     const { return _bmStartWord + _bmWordSize; }
  74 
  75   // read marks
  76 
  77   bool isMarked(HeapWord* addr) const {
  78     assert(_bmStartWord <= addr && addr < (_bmStartWord + _bmWordSize),
  79            "outside underlying space?");
  80     return _bm.at(heapWordToOffset(addr));


  98   // conversion utilities
  99   HeapWord* offsetToHeapWord(size_t offset) const {
 100     return _bmStartWord + (offset << _shifter);
 101   }
 102   size_t heapWordToOffset(const HeapWord* addr) const {
 103     return pointer_delta(addr, _bmStartWord) >> _shifter;
 104   }
 105   int heapWordDiffToOffsetDiff(size_t diff) const;
 106 
 107   // The argument addr should be the start address of a valid object
 108   HeapWord* nextObject(HeapWord* addr) {
 109     oop obj = (oop) addr;
 110     HeapWord* res =  addr + obj->size();
 111     assert(offsetToHeapWord(heapWordToOffset(res)) == res, "sanity");
 112     return res;
 113   }
 114 
 115   void print_on_error(outputStream* st, const char* prefix) const;
 116 
 117   // debugging
 118   NOT_PRODUCT(bool covers(ReservedSpace rs) const;)











 119 };
 120 
 121 class CMBitMap : public CMBitMapRO {


 122 
 123  public:
 124   // constructor
 125   CMBitMap(int shifter) :
 126     CMBitMapRO(shifter) {}











 127 
 128   // Allocates the back store for the marking bitmap
 129   bool allocate(ReservedSpace heap_rs);
 130 
 131   // write marks
 132   void mark(HeapWord* addr) {
 133     assert(_bmStartWord <= addr && addr < (_bmStartWord + _bmWordSize),
 134            "outside underlying space?");
 135     _bm.set_bit(heapWordToOffset(addr));
 136   }
 137   void clear(HeapWord* addr) {
 138     assert(_bmStartWord <= addr && addr < (_bmStartWord + _bmWordSize),
 139            "outside underlying space?");
 140     _bm.clear_bit(heapWordToOffset(addr));
 141   }
 142   bool parMark(HeapWord* addr) {
 143     assert(_bmStartWord <= addr && addr < (_bmStartWord + _bmWordSize),
 144            "outside underlying space?");
 145     return _bm.par_set_bit(heapWordToOffset(addr));
 146   }
 147   bool parClear(HeapWord* addr) {
 148     assert(_bmStartWord <= addr && addr < (_bmStartWord + _bmWordSize),
 149            "outside underlying space?");
 150     return _bm.par_clear_bit(heapWordToOffset(addr));
 151   }
 152   void markRange(MemRegion mr);
 153   void clearAll();
 154   void clearRange(MemRegion mr);
 155 
 156   // Starting at the bit corresponding to "addr" (inclusive), find the next
 157   // "1" bit, if any.  This bit starts some run of consecutive "1"'s; find
 158   // the end of this run (stopping at "end_addr").  Return the MemRegion
 159   // covering from the start of the region corresponding to the first bit
 160   // of the run to the end of the region corresponding to the last bit of
 161   // the run.  If there is no "1" bit at or after "addr", return an empty
 162   // MemRegion.
 163   MemRegion getAndClearMarkedRegion(HeapWord* addr, HeapWord* end_addr);



 164 };
 165 
 166 // Represents a marking stack used by ConcurrentMarking in the G1 collector.
 167 class CMMarkStack VALUE_OBJ_CLASS_SPEC {
 168   VirtualSpace _virtual_space;   // Underlying backing store for actual stack
 169   ConcurrentMark* _cm;
 170   oop* _base;        // bottom of stack
 171   jint _index;       // one more than last occupied index
 172   jint _capacity;    // max #elements
 173   jint _saved_index; // value of _index saved at start of GC
 174   NOT_PRODUCT(jint _max_depth;)   // max depth plumbed during run
 175 
 176   bool  _overflow;
 177   bool  _should_expand;
 178   DEBUG_ONLY(bool _drain_in_progress;)
 179   DEBUG_ONLY(bool _drain_in_progress_yields;)
 180 
 181  public:
 182   CMMarkStack(ConcurrentMark* cm);
 183   ~CMMarkStack();


 663   void clear_concurrent_marking_in_progress() {
 664     _concurrent_marking_in_progress = false;
 665   }
 666 
 667   void update_accum_task_vtime(int i, double vtime) {
 668     _accum_task_vtime[i] += vtime;
 669   }
 670 
 671   double all_task_accum_vtime() {
 672     double ret = 0.0;
 673     for (uint i = 0; i < _max_worker_id; ++i)
 674       ret += _accum_task_vtime[i];
 675     return ret;
 676   }
 677 
 678   // Attempts to steal an object from the task queues of other tasks
 679   bool try_stealing(uint worker_id, int* hash_seed, oop& obj) {
 680     return _task_queues->steal(worker_id, hash_seed, obj);
 681   }
 682 
 683   ConcurrentMark(G1CollectedHeap* g1h, ReservedSpace heap_rs);
 684   ~ConcurrentMark();
 685 
 686   ConcurrentMarkThread* cmThread() { return _cmThread; }
 687 
 688   CMBitMapRO* prevMarkBitMap() const { return _prevMarkBitMap; }
 689   CMBitMap*   nextMarkBitMap() const { return _nextMarkBitMap; }
 690 
 691   // Returns the number of GC threads to be used in a concurrent
 692   // phase based on the number of GC threads being used in a STW
 693   // phase.
 694   uint scale_parallel_threads(uint n_par_threads);
 695 
 696   // Calculates the number of GC threads to be used in a concurrent phase.
 697   uint calc_parallel_marking_threads();
 698 
 699   // The following three are interaction between CM and
 700   // G1CollectedHeap
 701 
 702   // This notifies CM that a root during initial-mark needs to be
 703   // grayed. It is MT-safe. word_size is the size of the object in


 719   // G1PrintReachableBaseFile + "." + str.
 720   // vo decides whether the prev (vo == UsePrevMarking), the next
 721   // (vo == UseNextMarking) marking information, or the mark word
 722   // (vo == UseMarkWord) will be used to determine the liveness of
 723   // each object / referent.
 724   // If all is true, all objects in the heap will be dumped, otherwise
 725   // only the live ones. In the dump the following symbols / breviations
 726   // are used:
 727   //   M : an explicitly live object (its bitmap bit is set)
 728   //   > : an implicitly live object (over tams)
 729   //   O : an object outside the G1 heap (typically: in the perm gen)
 730   //   NOT : a reference field whose referent is not live
 731   //   AND MARKED : indicates that an object is both explicitly and
 732   //   implicitly live (it should be one or the other, not both)
 733   void print_reachable(const char* str,
 734                        VerifyOption vo, bool all) PRODUCT_RETURN;
 735 
 736   // Clear the next marking bitmap (will be called concurrently).
 737   void clearNextBitmap();
 738 
 739   // Return whether the next mark bitmap has no marks set.

 740   bool nextMarkBitmapIsClear();
 741 
 742   // These two do the work that needs to be done before and after the
 743   // initial root checkpoint. Since this checkpoint can be done at two
 744   // different points (i.e. an explicit pause or piggy-backed on a
 745   // young collection), then it's nice to be able to easily share the
 746   // pre/post code. It might be the case that we can put everything in
 747   // the post method. TP
 748   void checkpointRootsInitialPre();
 749   void checkpointRootsInitialPost();
 750 
 751   // Scan all the root regions and mark everything reachable from
 752   // them.
 753   void scanRootRegions();
 754 
 755   // Scan a single root region and mark everything reachable from it.
 756   void scanRootRegion(HeapRegion* hr, uint worker_id);
 757 
 758   // Do concurrent phase of marking, to a tentative transitive closure.
 759   void markFromRoots();


 776 
 777   // Notify data structures that a GC has started.
 778   void note_start_of_gc() {
 779     _markStack.note_start_of_gc();
 780   }
 781 
 782   // Notify data structures that a GC is finished.
 783   void note_end_of_gc() {
 784     _markStack.note_end_of_gc();
 785   }
 786 
 787   // Verify that there are no CSet oops on the stacks (taskqueues /
 788   // global mark stack), enqueued SATB buffers, per-thread SATB
 789   // buffers, and fingers (global / per-task). The boolean parameters
 790   // decide which of the above data structures to verify. If marking
 791   // is not in progress, it's a no-op.
 792   void verify_no_cset_oops(bool verify_stacks,
 793                            bool verify_enqueued_buffers,
 794                            bool verify_thread_buffers,
 795                            bool verify_fingers) PRODUCT_RETURN;
 796 
 797   // It is called at the end of an evacuation pause during marking so
 798   // that CM is notified of where the new end of the heap is. It
 799   // doesn't do anything if concurrent_marking_in_progress() is false,
 800   // unless the force parameter is true.
 801   void update_heap_boundaries(MemRegion bounds, bool force = false);
 802 
 803   bool isMarked(oop p) const {
 804     assert(p != NULL && p->is_oop(), "expected an oop");
 805     HeapWord* addr = (HeapWord*)p;
 806     assert(addr >= _nextMarkBitMap->startWord() ||
 807            addr < _nextMarkBitMap->endWord(), "in a region");
 808 
 809     return _nextMarkBitMap->isMarked(addr);
 810   }
 811 
 812   inline bool not_yet_marked(oop p) const;
 813 
 814   // XXX Debug code
 815   bool containing_card_is_marked(void* p);
 816   bool containing_cards_are_marked(void* start, void* last);
 817 
 818   bool isPrevMarked(oop p) const {
 819     assert(p != NULL && p->is_oop(), "expected an oop");
 820     HeapWord* addr = (HeapWord*)p;
 821     assert(addr >= _prevMarkBitMap->startWord() ||




  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #ifndef SHARE_VM_GC_IMPLEMENTATION_G1_CONCURRENTMARK_HPP
  26 #define SHARE_VM_GC_IMPLEMENTATION_G1_CONCURRENTMARK_HPP
  27 
  28 #include "classfile/javaClasses.hpp"
  29 #include "gc_implementation/g1/heapRegionSet.hpp"
  30 #include "gc_implementation/g1/g1RegionToSpaceMapper.hpp"
  31 #include "gc_implementation/shared/gcId.hpp"
  32 #include "utilities/taskqueue.hpp"
  33 
  34 class G1CollectedHeap;
  35 class CMBitMap;
  36 class CMTask;
  37 typedef GenericTaskQueue<oop, mtGC>            CMTaskQueue;
  38 typedef GenericTaskQueueSet<CMTaskQueue, mtGC> CMTaskQueueSet;
  39 
  40 // Closure used by CM during concurrent reference discovery
  41 // and reference processing (during remarking) to determine
  42 // if a particular object is alive. It is primarily used
  43 // to determine if referents of discovered reference objects
  44 // are alive. An instance is also embedded into the
  45 // reference processor as the _is_alive_non_header field
  46 class G1CMIsAliveClosure: public BoolObjectClosure {
  47   G1CollectedHeap* _g1;
  48  public:
  49   G1CMIsAliveClosure(G1CollectedHeap* g1) : _g1(g1) { }
  50 
  51   bool do_object_b(oop obj);
  52 };
  53 
  54 // A generic CM bit map.  This is essentially a wrapper around the BitMap
  55 // class, with one bit per (1<<_shifter) HeapWords.
  56 
  57 class CMBitMapRO VALUE_OBJ_CLASS_SPEC {
  58  protected:
  59   HeapWord* _bmStartWord;      // base address of range covered by map
  60   size_t    _bmWordSize;       // map size (in #HeapWords covered)
  61   const int _shifter;          // map to char or bit

  62   BitMap    _bm;               // the bit map itself
  63 
  64  public:
  65   // constructor
  66   CMBitMapRO(int shifter);
  67 
  68   enum { do_yield = true };
  69 
  70   // inquiries
  71   HeapWord* startWord()   const { return _bmStartWord; }
  72   size_t    sizeInWords() const { return _bmWordSize;  }
  73   // the following is one past the last word in space
  74   HeapWord* endWord()     const { return _bmStartWord + _bmWordSize; }
  75 
  76   // read marks
  77 
  78   bool isMarked(HeapWord* addr) const {
  79     assert(_bmStartWord <= addr && addr < (_bmStartWord + _bmWordSize),
  80            "outside underlying space?");
  81     return _bm.at(heapWordToOffset(addr));


  99   // conversion utilities
 100   HeapWord* offsetToHeapWord(size_t offset) const {
 101     return _bmStartWord + (offset << _shifter);
 102   }
 103   size_t heapWordToOffset(const HeapWord* addr) const {
 104     return pointer_delta(addr, _bmStartWord) >> _shifter;
 105   }
 106   int heapWordDiffToOffsetDiff(size_t diff) const;
 107 
 108   // The argument addr should be the start address of a valid object
 109   HeapWord* nextObject(HeapWord* addr) {
 110     oop obj = (oop) addr;
 111     HeapWord* res =  addr + obj->size();
 112     assert(offsetToHeapWord(heapWordToOffset(res)) == res, "sanity");
 113     return res;
 114   }
 115 
 116   void print_on_error(outputStream* st, const char* prefix) const;
 117 
 118   // debugging
 119   NOT_PRODUCT(bool covers(MemRegion rs) const;)
 120 };
 121 
 122 class CMBitMapMappingChangedListener : public G1MappingChangedListener {
 123  private:
 124   CMBitMap* _bm;
 125  public:
 126   CMBitMapMappingChangedListener() : _bm(NULL) {}
 127 
 128   void set_bitmap(CMBitMap* bm) { _bm = bm; }
 129 
 130   virtual void on_commit(uint start_idx, size_t num_regions);
 131 };
 132 
 133 class CMBitMap : public CMBitMapRO {
 134  private:
 135   CMBitMapMappingChangedListener _listener;
 136 
 137  public:
 138   static size_t compute_size(size_t heap_size);
 139   // Returns the amount of bytes on the heap between two marks in the bitmap.
 140   static size_t mark_distance();
 141 
 142   CMBitMap() : CMBitMapRO(LogMinObjAlignment), _listener() { _listener.set_bitmap(this); }
 143 
 144   // Initializes the underlying BitMap to cover the given area.
 145   void initialize(MemRegion heap, G1RegionToSpaceMapper* storage);
 146 
 147   // Write marks.
 148   inline void mark(HeapWord* addr);
 149   inline void clear(HeapWord* addr);
 150   inline bool parMark(HeapWord* addr);
 151   inline bool parClear(HeapWord* addr);
 152 
























 153   void markRange(MemRegion mr);

 154   void clearRange(MemRegion mr);
 155 
 156   // Starting at the bit corresponding to "addr" (inclusive), find the next
 157   // "1" bit, if any.  This bit starts some run of consecutive "1"'s; find
 158   // the end of this run (stopping at "end_addr").  Return the MemRegion
 159   // covering from the start of the region corresponding to the first bit
 160   // of the run to the end of the region corresponding to the last bit of
 161   // the run.  If there is no "1" bit at or after "addr", return an empty
 162   // MemRegion.
 163   MemRegion getAndClearMarkedRegion(HeapWord* addr, HeapWord* end_addr);
 164 
 165   // Clear the whole mark bitmap.
 166   void clearAll();
 167 };
 168 
 169 // Represents a marking stack used by ConcurrentMarking in the G1 collector.
 170 class CMMarkStack VALUE_OBJ_CLASS_SPEC {
 171   VirtualSpace _virtual_space;   // Underlying backing store for actual stack
 172   ConcurrentMark* _cm;
 173   oop* _base;        // bottom of stack
 174   jint _index;       // one more than last occupied index
 175   jint _capacity;    // max #elements
 176   jint _saved_index; // value of _index saved at start of GC
 177   NOT_PRODUCT(jint _max_depth;)   // max depth plumbed during run
 178 
 179   bool  _overflow;
 180   bool  _should_expand;
 181   DEBUG_ONLY(bool _drain_in_progress;)
 182   DEBUG_ONLY(bool _drain_in_progress_yields;)
 183 
 184  public:
 185   CMMarkStack(ConcurrentMark* cm);
 186   ~CMMarkStack();


 666   void clear_concurrent_marking_in_progress() {
 667     _concurrent_marking_in_progress = false;
 668   }
 669 
 670   void update_accum_task_vtime(int i, double vtime) {
 671     _accum_task_vtime[i] += vtime;
 672   }
 673 
 674   double all_task_accum_vtime() {
 675     double ret = 0.0;
 676     for (uint i = 0; i < _max_worker_id; ++i)
 677       ret += _accum_task_vtime[i];
 678     return ret;
 679   }
 680 
 681   // Attempts to steal an object from the task queues of other tasks
 682   bool try_stealing(uint worker_id, int* hash_seed, oop& obj) {
 683     return _task_queues->steal(worker_id, hash_seed, obj);
 684   }
 685 
 686   ConcurrentMark(G1CollectedHeap* g1h, G1RegionToSpaceMapper* prev_bitmap_storage, G1RegionToSpaceMapper* next_bitmap_storage);
 687   ~ConcurrentMark();
 688 
 689   ConcurrentMarkThread* cmThread() { return _cmThread; }
 690 
 691   CMBitMapRO* prevMarkBitMap() const { return _prevMarkBitMap; }
 692   CMBitMap*   nextMarkBitMap() const { return _nextMarkBitMap; }
 693 
 694   // Returns the number of GC threads to be used in a concurrent
 695   // phase based on the number of GC threads being used in a STW
 696   // phase.
 697   uint scale_parallel_threads(uint n_par_threads);
 698 
 699   // Calculates the number of GC threads to be used in a concurrent phase.
 700   uint calc_parallel_marking_threads();
 701 
 702   // The following three are interaction between CM and
 703   // G1CollectedHeap
 704 
 705   // This notifies CM that a root during initial-mark needs to be
 706   // grayed. It is MT-safe. word_size is the size of the object in


 722   // G1PrintReachableBaseFile + "." + str.
 723   // vo decides whether the prev (vo == UsePrevMarking), the next
 724   // (vo == UseNextMarking) marking information, or the mark word
 725   // (vo == UseMarkWord) will be used to determine the liveness of
 726   // each object / referent.
 727   // If all is true, all objects in the heap will be dumped, otherwise
 728   // only the live ones. In the dump the following symbols / breviations
 729   // are used:
 730   //   M : an explicitly live object (its bitmap bit is set)
 731   //   > : an implicitly live object (over tams)
 732   //   O : an object outside the G1 heap (typically: in the perm gen)
 733   //   NOT : a reference field whose referent is not live
 734   //   AND MARKED : indicates that an object is both explicitly and
 735   //   implicitly live (it should be one or the other, not both)
 736   void print_reachable(const char* str,
 737                        VerifyOption vo, bool all) PRODUCT_RETURN;
 738 
 739   // Clear the next marking bitmap (will be called concurrently).
 740   void clearNextBitmap();
 741 
 742   // Return whether the next mark bitmap has no marks set. To be used for assertions
 743   // only. Will not yield to pause requests.
 744   bool nextMarkBitmapIsClear();
 745 
 746   // These two do the work that needs to be done before and after the
 747   // initial root checkpoint. Since this checkpoint can be done at two
 748   // different points (i.e. an explicit pause or piggy-backed on a
 749   // young collection), then it's nice to be able to easily share the
 750   // pre/post code. It might be the case that we can put everything in
 751   // the post method. TP
 752   void checkpointRootsInitialPre();
 753   void checkpointRootsInitialPost();
 754 
 755   // Scan all the root regions and mark everything reachable from
 756   // them.
 757   void scanRootRegions();
 758 
 759   // Scan a single root region and mark everything reachable from it.
 760   void scanRootRegion(HeapRegion* hr, uint worker_id);
 761 
 762   // Do concurrent phase of marking, to a tentative transitive closure.
 763   void markFromRoots();


 780 
 781   // Notify data structures that a GC has started.
 782   void note_start_of_gc() {
 783     _markStack.note_start_of_gc();
 784   }
 785 
 786   // Notify data structures that a GC is finished.
 787   void note_end_of_gc() {
 788     _markStack.note_end_of_gc();
 789   }
 790 
 791   // Verify that there are no CSet oops on the stacks (taskqueues /
 792   // global mark stack), enqueued SATB buffers, per-thread SATB
 793   // buffers, and fingers (global / per-task). The boolean parameters
 794   // decide which of the above data structures to verify. If marking
 795   // is not in progress, it's a no-op.
 796   void verify_no_cset_oops(bool verify_stacks,
 797                            bool verify_enqueued_buffers,
 798                            bool verify_thread_buffers,
 799                            bool verify_fingers) PRODUCT_RETURN;






 800 
 801   bool isMarked(oop p) const {
 802     assert(p != NULL && p->is_oop(), "expected an oop");
 803     HeapWord* addr = (HeapWord*)p;
 804     assert(addr >= _nextMarkBitMap->startWord() ||
 805            addr < _nextMarkBitMap->endWord(), "in a region");
 806 
 807     return _nextMarkBitMap->isMarked(addr);
 808   }
 809 
 810   inline bool not_yet_marked(oop p) const;
 811 
 812   // XXX Debug code
 813   bool containing_card_is_marked(void* p);
 814   bool containing_cards_are_marked(void* start, void* last);
 815 
 816   bool isPrevMarked(oop p) const {
 817     assert(p != NULL && p->is_oop(), "expected an oop");
 818     HeapWord* addr = (HeapWord*)p;
 819     assert(addr >= _prevMarkBitMap->startWord() ||