< prev index next >

src/share/vm/gc/g1/g1ConcurrentMark.inline.hpp

Print this page
rev 13280 : imported patch 8184346-cleanup-g1cmbitmap
rev 13282 : imported patch 8184346-erikd-mgerdin-review
rev 13283 : imported patch 8184346-erikd-review
rev 13284 : [mq]: 8184346-ashipilev-2


  21  * questions.
  22  *
  23  */
  24 
  25 #ifndef SHARE_VM_GC_G1_G1CONCURRENTMARK_INLINE_HPP
  26 #define SHARE_VM_GC_G1_G1CONCURRENTMARK_INLINE_HPP
  27 
  28 #include "gc/g1/g1CollectedHeap.inline.hpp"
  29 #include "gc/g1/g1ConcurrentMark.hpp"
  30 #include "gc/g1/g1ConcurrentMarkObjArrayProcessor.inline.hpp"
  31 #include "gc/g1/suspendibleThreadSet.hpp"
  32 #include "gc/shared/taskqueue.inline.hpp"
  33 #include "utilities/bitMap.inline.hpp"
  34 
  35 inline bool G1ConcurrentMark::par_mark(oop obj) {
  36   return _nextMarkBitMap->par_mark((HeapWord*)obj);
  37 }
  38 
  39 inline bool G1CMBitMap::iterate(G1CMBitMapClosure* cl, MemRegion mr) {
  40   assert(!mr.is_empty(), "Does not support empty memregion to iterate over");
  41   assert(_covered.contains(mr), "Given MemRegion from " PTR_FORMAT " to " PTR_FORMAT " not contained in heap area", p2i(mr.start()), p2i(mr.end()));


  42 
  43   BitMap::idx_t const end_offset = addr_to_offset(mr.end());
  44   BitMap::idx_t offset = _bm.get_next_one_offset(addr_to_offset(mr.start()), end_offset);
  45 
  46   while (offset < end_offset) {
  47     HeapWord* const addr = offset_to_addr(offset);
  48     if (!cl->do_addr(addr)) {
  49       return false;
  50     }
  51     size_t const obj_size = (size_t)((oop)addr)->size();
  52     offset = _bm.get_next_one_offset(offset + (obj_size >> _shifter), end_offset);
  53   }
  54   return true;
  55 }
  56 
  57 inline HeapWord* G1CMBitMap::get_next_marked_addr(const HeapWord* addr,
  58                                                   const HeapWord* limit) const {
  59   assert(limit != NULL, "limit must not be NULL");
  60   // Round addr up to a possible object boundary to be safe.
  61   size_t const addr_offset = addr_to_offset(align_up(addr, HeapWordSize << _shifter));




  21  * questions.
  22  *
  23  */
  24 
  25 #ifndef SHARE_VM_GC_G1_G1CONCURRENTMARK_INLINE_HPP
  26 #define SHARE_VM_GC_G1_G1CONCURRENTMARK_INLINE_HPP
  27 
  28 #include "gc/g1/g1CollectedHeap.inline.hpp"
  29 #include "gc/g1/g1ConcurrentMark.hpp"
  30 #include "gc/g1/g1ConcurrentMarkObjArrayProcessor.inline.hpp"
  31 #include "gc/g1/suspendibleThreadSet.hpp"
  32 #include "gc/shared/taskqueue.inline.hpp"
  33 #include "utilities/bitMap.inline.hpp"
  34 
  35 inline bool G1ConcurrentMark::par_mark(oop obj) {
  36   return _nextMarkBitMap->par_mark((HeapWord*)obj);
  37 }
  38 
  39 inline bool G1CMBitMap::iterate(G1CMBitMapClosure* cl, MemRegion mr) {
  40   assert(!mr.is_empty(), "Does not support empty memregion to iterate over");
  41   assert(_covered.contains(mr),
  42          "Given MemRegion from " PTR_FORMAT " to " PTR_FORMAT " not contained in heap area",
  43          p2i(mr.start()), p2i(mr.end()));
  44 
  45   BitMap::idx_t const end_offset = addr_to_offset(mr.end());
  46   BitMap::idx_t offset = _bm.get_next_one_offset(addr_to_offset(mr.start()), end_offset);
  47 
  48   while (offset < end_offset) {
  49     HeapWord* const addr = offset_to_addr(offset);
  50     if (!cl->do_addr(addr)) {
  51       return false;
  52     }
  53     size_t const obj_size = (size_t)((oop)addr)->size();
  54     offset = _bm.get_next_one_offset(offset + (obj_size >> _shifter), end_offset);
  55   }
  56   return true;
  57 }
  58 
  59 inline HeapWord* G1CMBitMap::get_next_marked_addr(const HeapWord* addr,
  60                                                   const HeapWord* limit) const {
  61   assert(limit != NULL, "limit must not be NULL");
  62   // Round addr up to a possible object boundary to be safe.
  63   size_t const addr_offset = addr_to_offset(align_up(addr, HeapWordSize << _shifter));


< prev index next >