< prev index next >

src/share/vm/gc/shenandoah/shenandoahHeap.inline.hpp

Print this page
rev 12551 : Refactor/consolidate/cleanup


  25 #define SHARE_VM_GC_SHENANDOAH_SHENANDOAHHEAP_INLINE_HPP
  26 
  27 #include "gc/shared/cmBitMap.inline.hpp"
  28 #include "gc/shared/threadLocalAllocBuffer.inline.hpp"
  29 #include "gc/shenandoah/brooksPointer.inline.hpp"
  30 #include "gc/shenandoah/shenandoahBarrierSet.inline.hpp"
  31 #include "gc/shenandoah/shenandoahHeap.hpp"
  32 #include "gc/shenandoah/shenandoahHeapRegionSet.hpp"
  33 #include "gc/shenandoah/shenandoahHeapRegion.inline.hpp"
  34 #include "oops/oop.inline.hpp"
  35 #include "runtime/atomic.hpp"
  36 #include "runtime/prefetch.hpp"
  37 #include "runtime/prefetch.inline.hpp"
  38 #include "utilities/copy.hpp"
  39 
  40 /*
  41  * Marks the object. Returns true if the object has not been marked before and has
  42  * been marked by this thread. Returns false if the object has already been marked,
  43  * or if a competing thread succeeded in marking this object.
  44  */
  45 inline bool ShenandoahHeap::mark_current(oop obj) const {
  46 #ifdef ASSERT
  47   if (! oopDesc::unsafe_equals(obj, oopDesc::bs()->read_barrier(obj))) {
  48     tty->print_cr("heap region containing obj:");
  49     ShenandoahHeapRegion* obj_region = heap_region_containing(obj);
  50     obj_region->print();
  51     tty->print_cr("heap region containing forwardee:");
  52     ShenandoahHeapRegion* forward_region = heap_region_containing(oopDesc::bs()->read_barrier(obj));
  53     forward_region->print();
  54   }
  55 #endif
  56 
  57   assert(oopDesc::unsafe_equals(obj, oopDesc::bs()->read_barrier(obj)), "only mark forwarded copy of objects");
  58   return mark_current_no_checks(obj);
  59 }
  60 
  61 inline bool ShenandoahHeap::mark_current_no_checks(oop obj) const {
  62   HeapWord* addr = (HeapWord*) obj;
  63   return (! allocated_after_mark_start(addr)) && _next_mark_bit_map->parMark(addr);
  64 }
  65 
  66 inline bool ShenandoahHeap::is_marked_current(oop obj) const {
  67   HeapWord* addr = (HeapWord*) obj;
  68   return allocated_after_mark_start(addr) || _next_mark_bit_map->isMarked(addr);
  69 }
  70 
  71 inline bool ShenandoahHeap::is_marked_current(oop obj, ShenandoahHeapRegion* r) const {
  72   HeapWord* addr = (HeapWord*) obj;
  73   return _next_mark_bit_map->isMarked(addr) || r->allocated_after_mark_start(addr);
  74 }
  75 
  76 inline bool ShenandoahHeap::is_marked_prev(oop obj) const {
  77   ShenandoahHeapRegion* r = heap_region_containing((void*) obj);
  78   return is_marked_prev(obj, r);
  79 }
  80 
  81 inline bool ShenandoahHeap::is_marked_prev(oop obj, const ShenandoahHeapRegion* r) const {
  82   HeapWord* addr = (HeapWord*) obj;
  83   return _prev_mark_bit_map->isMarked(addr) || r->allocated_after_prev_mark_start(addr);
  84 }
  85 
  86 inline bool ShenandoahHeap::need_update_refs() const {
  87   return _need_update_refs;
  88 }
  89 
  90 inline uint ShenandoahHeap::heap_region_index_containing(const void* addr) const {
  91   uintptr_t region_start = ((uintptr_t) addr);
  92   uintptr_t index = (region_start - (uintptr_t) _first_region_bottom) >> ShenandoahHeapRegion::RegionSizeShift;
  93 #ifdef ASSERT
  94   if (!(index < _num_regions)) {
  95     tty->print_cr("heap region does not contain address, first_region_bottom: "PTR_FORMAT", real bottom of first region: "PTR_FORMAT", num_regions: "SIZE_FORMAT", region_size: "SIZE_FORMAT, p2i(_first_region_bottom), p2i(_ordered_regions->get(0)->bottom()), _num_regions, ShenandoahHeapRegion::RegionSizeBytes);
  96   }
  97 #endif
  98   assert(index < _num_regions, "heap region index must be in range");
  99   return index;
 100 }
 101 
 102 inline ShenandoahHeapRegion* ShenandoahHeap::heap_region_containing(const void* addr) const {
 103   uint index = heap_region_index_containing(addr);
 104   ShenandoahHeapRegion* result = _ordered_regions->get(index);
 105 #ifdef ASSERT
 106   if (!(addr >= result->bottom() && addr < result->end())) {
 107     tty->print_cr("heap region does not contain address, first_region_bottom: "PTR_FORMAT", real bottom of first region: "PTR_FORMAT", num_regions: "SIZE_FORMAT, p2i(_first_region_bottom), p2i(_ordered_regions->get(0)->bottom()), _num_regions);
 108   }
 109 #endif
 110   assert(addr >= result->bottom() && addr < result->end(), "address must be in found region");
 111   return result;
 112 }
 113 
 114 template <class T>
 115 inline oop ShenandoahHeap::update_oop_ref_not_null(T* p, oop obj) {
 116   if (in_cset_fast_test((HeapWord*) obj)) {
 117     oop forw = ShenandoahBarrierSet::resolve_oop_static_not_null(obj);
 118     assert(! oopDesc::unsafe_equals(forw, obj) || is_full_gc_in_progress(), "expect forwarded object");
 119     obj = forw;
 120     oopDesc::encode_store_heap_oop(p, obj);
 121   }
 122 #ifdef ASSERT
 123   else {
 124     assert(oopDesc::unsafe_equals(obj, ShenandoahBarrierSet::resolve_oop_static_not_null(obj)), "expect not forwarded");
 125   }
 126 #endif
 127   return obj;
 128 }
 129 
 130 template <class T>
 131 inline oop ShenandoahHeap::maybe_update_oop_ref(T* p) {
 132   T o = oopDesc::load_heap_oop(p);
 133   if (! oopDesc::is_null(o)) {
 134     oop obj = oopDesc::decode_heap_oop_not_null(o);
 135     return maybe_update_oop_ref_not_null(p, obj);
 136   } else {
 137     return NULL;
 138   }
 139 }
 140 
 141 inline oop ShenandoahHeap::atomic_compare_exchange_oop(oop n, oop* addr, oop c) {
 142   return (oop) Atomic::cmpxchg_ptr(n, addr, c);
 143 }
 144 
 145 inline oop ShenandoahHeap::atomic_compare_exchange_oop(oop n, narrowOop* addr, oop c) {
 146   narrowOop cmp = oopDesc::encode_heap_oop(c);
 147   narrowOop val = oopDesc::encode_heap_oop(n);
 148   return oopDesc::decode_heap_oop((narrowOop) Atomic::cmpxchg(val, addr, cmp));
 149 }
 150 
 151 template <class T>
 152 inline oop ShenandoahHeap::maybe_update_oop_ref_not_null(T* p, oop heap_oop) {
 153 
 154   assert((! is_in(p)) || (! heap_region_containing(p)->is_in_collection_set())
 155          || is_full_gc_in_progress(),
 156          "never update refs in from-space, unless evacuation has been cancelled");
 157 
 158 #ifdef ASSERT
 159   if (! is_in(heap_oop)) {
 160     print_heap_regions();
 161     tty->print_cr("object not in heap: "PTR_FORMAT", referenced by: "PTR_FORMAT, p2i((HeapWord*) heap_oop), p2i(p));
 162     assert(is_in(heap_oop), "object must be in heap");
 163   }
 164 #endif
 165   assert(is_in(heap_oop), "only ever call this on objects in the heap");
 166   if (in_cset_fast_test((HeapWord*) heap_oop)) {
 167     oop forwarded_oop = ShenandoahBarrierSet::resolve_oop_static_not_null(heap_oop); // read brooks ptr
 168     assert(! oopDesc::unsafe_equals(forwarded_oop, heap_oop) || is_full_gc_in_progress(), "expect forwarded object");
 169 
 170     log_develop_trace(gc)("Updating old ref: "PTR_FORMAT" pointing to "PTR_FORMAT" to new ref: "PTR_FORMAT, p2i(p), p2i(heap_oop), p2i(forwarded_oop));
 171 
 172     assert(forwarded_oop->is_oop(), "oop required");
 173     assert(is_in(forwarded_oop), "forwardee must be in heap");
 174     assert(oopDesc::bs()->is_safe(forwarded_oop), "forwardee must not be in collection set");
 175     // If this fails, another thread wrote to p before us, it will be logged in SATB and the
 176     // reference be updated later.
 177     oop result = atomic_compare_exchange_oop(forwarded_oop, p, heap_oop);
 178 
 179     if (oopDesc::unsafe_equals(result, heap_oop)) { // CAS successful.
 180       return forwarded_oop;
 181     } else {
 182       return NULL;
 183     }
 184   } else {
 185     assert(oopDesc::unsafe_equals(heap_oop, ShenandoahBarrierSet::resolve_oop_static_not_null(heap_oop)), "expect not forwarded");
 186     return heap_oop;


 278     return_val = copy_val;
 279 
 280     log_develop_trace(gc, compaction)("Copy of "PTR_FORMAT" to "PTR_FORMAT" succeeded \n", p2i((HeapWord*) p), p2i(copy));
 281 
 282 #ifdef ASSERT
 283     assert(return_val->is_oop(), "expect oop");
 284     assert(p->klass() == return_val->klass(), "Should have the same class p: "PTR_FORMAT", copy: "PTR_FORMAT, p2i((HeapWord*) p), p2i((HeapWord*) copy));
 285 #endif
 286   }  else {
 287     if (alloc_from_gclab) {
 288       thread->gclab().rollback(required);
 289     }
 290     log_develop_trace(gc, compaction)("Copy of "PTR_FORMAT" to "PTR_FORMAT" failed, use other: "PTR_FORMAT, p2i((HeapWord*) p), p2i(copy), p2i((HeapWord*) result));
 291     return_val = result;
 292   }
 293 
 294   return return_val;
 295 }
 296 
 297 inline bool ShenandoahHeap::requires_marking(const void* entry) const {
 298   return ! is_marked_current(oop(entry));








 299 }
 300 
 301 inline bool ShenandoahHeap::in_cset_fast_test(HeapWord* obj) {


 302   assert(_in_cset_fast_test != NULL, "sanity");
 303   assert(is_in(obj), "should be in heap");
 304 
 305   // no need to subtract the bottom of the heap from obj,
 306   // _in_cset_fast_test is biased
 307   uintx index = ((uintx) obj) >> ShenandoahHeapRegion::RegionSizeShift;
 308   bool ret = _in_cset_fast_test[index];
 309 
 310   // let's make sure the result is consistent with what the slower
 311   // test returns
 312   assert( ret || !is_in_collection_set(obj), "sanity");
 313   assert(!ret ||  is_in_collection_set(obj), "sanity");
 314   return ret;
 315 }
 316 
 317 inline bool ShenandoahHeap::concurrent_mark_in_progress() {
 318   return _concurrent_mark_in_progress != 0;
 319 }
 320 
 321 inline address ShenandoahHeap::concurrent_mark_in_progress_addr() {
 322   return (address) &(ShenandoahHeap::heap()->_concurrent_mark_in_progress);
 323 }
 324 
 325 inline bool ShenandoahHeap::is_evacuation_in_progress() {
 326   return _evacuation_in_progress != 0;
 327 }
 328 
 329 inline bool ShenandoahHeap::allocated_after_mark_start(HeapWord* addr) const {
 330   uintx index = ((uintx) addr) >> ShenandoahHeapRegion::RegionSizeShift;
 331   HeapWord* top_at_mark_start = _top_at_mark_starts[index];
 332   bool alloc_after_mark_start = addr >= top_at_mark_start;
 333 #ifdef ASSERT
 334   ShenandoahHeapRegion* r = heap_region_containing(addr);
 335   assert(alloc_after_mark_start == r->allocated_after_mark_start(addr), "sanity");
 336 #endif
 337   return alloc_after_mark_start;
 338 }
 339 
 340 template<class T>
 341 inline void ShenandoahHeap::marked_prev_object_iterate(ShenandoahHeapRegion* region, T* cl) {
 342   marked_object_iterate(region, cl, _prev_mark_bit_map, region->top_at_prev_mark_start());
 343 }
 344 
 345 template<class T>
 346 inline void ShenandoahHeap::marked_next_object_iterate(ShenandoahHeapRegion* region, T* cl) {
 347   marked_object_iterate(region, cl, _next_mark_bit_map, region->top_at_mark_start());
 348 }
 349 
 350 template<class T>
 351 inline void ShenandoahHeap::marked_object_iterate(ShenandoahHeapRegion* region, T* cl, CMBitMap* mark_bit_map, HeapWord* top_at_mark_start) {
 352   assert(BrooksPointer::word_offset() < 0, "skip_delta calculation below assumes the forwarding ptr is before obj");




 353   size_t skip_bitmap_delta = BrooksPointer::word_size() + 1;
 354   size_t skip_objsize_delta = BrooksPointer::word_size() /* + actual obj.size() below */;
 355   HeapWord* start = region->bottom() + BrooksPointer::word_size();
 356 
 357   HeapWord* limit = region->top();
 358   HeapWord* end = MIN2(top_at_mark_start + BrooksPointer::word_size(), _ordered_regions->end());
 359   HeapWord* addr = mark_bit_map->getNextMarkedWordAddress(start, end);
 360 
 361   intx dist = ShenandoahMarkScanPrefetch;
 362   if (dist > 0) {
 363     // Batched scan that prefetches the oop data, anticipating the access to
 364     // either header, oop field, or forwarding pointer. Not that we cannot
 365     // touch anything in oop, while it still being prefetched to get enough
 366     // time for prefetch to work. This is why we try to scan the bitmap linearly,
 367     // disregarding the object size. However, since we know forwarding pointer
 368     // preceeds the object, we can skip over it. Once we cannot trust the bitmap,
 369     // there is no point for prefetching the oop contents, as oop->size() will
 370     // touch it prematurely.
 371 
 372     oop slots[dist];


 403     }
 404   } else {
 405     while (addr < limit) {
 406       oop obj = oop(addr);
 407       int size = obj->size();
 408       do_marked_object(mark_bit_map, cl, obj);
 409       addr += size + skip_objsize_delta;
 410       if (addr < top_at_mark_start) {
 411         addr = mark_bit_map->getNextMarkedWordAddress(addr, end);
 412       }
 413     }
 414   }
 415 }
 416 
 417 template<class T>
 418 inline void ShenandoahHeap::do_marked_object(CMBitMap* bitmap, T* cl, oop obj) {
 419 #ifdef ASSERT
 420   assert(!oopDesc::is_null(obj), "sanity");
 421   assert(obj->is_oop(), "sanity");
 422   assert(is_in(obj), "sanity");
 423   if (bitmap == _prev_mark_bit_map) {
 424     assert(is_marked_prev(obj), "object expected to be marked");
 425   } else {
 426     assert(is_marked_current(obj), "object expected to be marked");
 427   }
 428 #endif
 429   cl->do_object(obj);
 430 }
 431 
 432 #endif // SHARE_VM_GC_SHENANDOAH_SHENANDOAHHEAP_INLINE_HPP


  25 #define SHARE_VM_GC_SHENANDOAH_SHENANDOAHHEAP_INLINE_HPP
  26 
  27 #include "gc/shared/cmBitMap.inline.hpp"
  28 #include "gc/shared/threadLocalAllocBuffer.inline.hpp"
  29 #include "gc/shenandoah/brooksPointer.inline.hpp"
  30 #include "gc/shenandoah/shenandoahBarrierSet.inline.hpp"
  31 #include "gc/shenandoah/shenandoahHeap.hpp"
  32 #include "gc/shenandoah/shenandoahHeapRegionSet.hpp"
  33 #include "gc/shenandoah/shenandoahHeapRegion.inline.hpp"
  34 #include "oops/oop.inline.hpp"
  35 #include "runtime/atomic.hpp"
  36 #include "runtime/prefetch.hpp"
  37 #include "runtime/prefetch.inline.hpp"
  38 #include "utilities/copy.hpp"
  39 
  40 /*
  41  * Marks the object. Returns true if the object has not been marked before and has
  42  * been marked by this thread. Returns false if the object has already been marked,
  43  * or if a competing thread succeeded in marking this object.
  44  */
  45 inline bool ShenandoahHeap::mark_next(oop obj) const {
  46 #ifdef ASSERT
  47   if (! oopDesc::unsafe_equals(obj, oopDesc::bs()->read_barrier(obj))) {
  48     tty->print_cr("heap region containing obj:");
  49     ShenandoahHeapRegion* obj_region = heap_region_containing(obj);
  50     obj_region->print();
  51     tty->print_cr("heap region containing forwardee:");
  52     ShenandoahHeapRegion* forward_region = heap_region_containing(oopDesc::bs()->read_barrier(obj));
  53     forward_region->print();
  54   }
  55 #endif
  56 
  57   assert(oopDesc::unsafe_equals(obj, oopDesc::bs()->read_barrier(obj)), "only mark forwarded copy of objects");
  58   return mark_next_no_checks(obj);
  59 }
  60 
  61 inline bool ShenandoahHeap::mark_next_no_checks(oop obj) const {
  62   HeapWord* addr = (HeapWord*) obj;
  63   return (! allocated_after_next_mark_start(addr)) && _next_mark_bit_map->parMark(addr);
  64 }
  65 
  66 inline bool ShenandoahHeap::is_marked_next(oop obj) const {
  67   HeapWord* addr = (HeapWord*) obj;
  68   return allocated_after_next_mark_start(addr) || _next_mark_bit_map->isMarked(addr);
  69 }
  70 
  71 inline bool ShenandoahHeap::is_marked_complete(oop obj) const {
  72   HeapWord* addr = (HeapWord*) obj;
  73   return allocated_after_complete_mark_start(addr) || _complete_mark_bit_map->isMarked(addr);










  74 }
  75 
  76 inline bool ShenandoahHeap::need_update_refs() const {
  77   return _need_update_refs;
  78 }
  79 
  80 inline uint ShenandoahHeap::heap_region_index_containing(const void* addr) const {
  81   uintptr_t region_start = ((uintptr_t) addr);
  82   uintptr_t index = (region_start - (uintptr_t) _first_region_bottom) >> ShenandoahHeapRegion::RegionSizeShift;
  83 #ifdef ASSERT
  84   if (!(index < _num_regions)) {
  85     tty->print_cr("heap region does not contain address, first_region_bottom: "PTR_FORMAT", real bottom of first region: "PTR_FORMAT", num_regions: "SIZE_FORMAT", region_size: "SIZE_FORMAT, p2i(_first_region_bottom), p2i(_ordered_regions->get(0)->bottom()), _num_regions, ShenandoahHeapRegion::RegionSizeBytes);
  86   }
  87 #endif
  88   assert(index < _num_regions, "heap region index must be in range");
  89   return index;
  90 }
  91 
  92 inline ShenandoahHeapRegion* ShenandoahHeap::heap_region_containing(const void* addr) const {
  93   uint index = heap_region_index_containing(addr);
  94   ShenandoahHeapRegion* result = _ordered_regions->get(index);
  95 #ifdef ASSERT
  96   if (!(addr >= result->bottom() && addr < result->end())) {
  97     tty->print_cr("heap region does not contain address, first_region_bottom: "PTR_FORMAT", real bottom of first region: "PTR_FORMAT", num_regions: "SIZE_FORMAT, p2i(_first_region_bottom), p2i(_ordered_regions->get(0)->bottom()), _num_regions);
  98   }
  99 #endif
 100   assert(addr >= result->bottom() && addr < result->end(), "address must be in found region");
 101   return result;
 102 }
 103 
 104 template <class T>
 105 inline oop ShenandoahHeap::update_oop_ref_not_null(T* p, oop obj) {
 106   if (in_collection_set(obj)) {
 107     oop forw = ShenandoahBarrierSet::resolve_oop_static_not_null(obj);
 108     assert(! oopDesc::unsafe_equals(forw, obj) || is_full_gc_in_progress(), "expect forwarded object");
 109     obj = forw;
 110     oopDesc::encode_store_heap_oop(p, obj);
 111   }
 112 #ifdef ASSERT
 113   else {
 114     assert(oopDesc::unsafe_equals(obj, ShenandoahBarrierSet::resolve_oop_static_not_null(obj)), "expect not forwarded");
 115   }
 116 #endif
 117   return obj;
 118 }
 119 
 120 template <class T>
 121 inline oop ShenandoahHeap::maybe_update_oop_ref(T* p) {
 122   T o = oopDesc::load_heap_oop(p);
 123   if (! oopDesc::is_null(o)) {
 124     oop obj = oopDesc::decode_heap_oop_not_null(o);
 125     return maybe_update_oop_ref_not_null(p, obj);
 126   } else {
 127     return NULL;
 128   }
 129 }
 130 
 131 inline oop ShenandoahHeap::atomic_compare_exchange_oop(oop n, oop* addr, oop c) {
 132   return (oop) Atomic::cmpxchg_ptr(n, addr, c);
 133 }
 134 
 135 inline oop ShenandoahHeap::atomic_compare_exchange_oop(oop n, narrowOop* addr, oop c) {
 136   narrowOop cmp = oopDesc::encode_heap_oop(c);
 137   narrowOop val = oopDesc::encode_heap_oop(n);
 138   return oopDesc::decode_heap_oop((narrowOop) Atomic::cmpxchg(val, addr, cmp));
 139 }
 140 
 141 template <class T>
 142 inline oop ShenandoahHeap::maybe_update_oop_ref_not_null(T* p, oop heap_oop) {
 143 
 144   assert((! is_in(p)) || (! in_collection_set(p))
 145          || is_full_gc_in_progress(),
 146          "never update refs in from-space, unless evacuation has been cancelled");
 147 
 148 #ifdef ASSERT
 149   if (! is_in(heap_oop)) {
 150     print_heap_regions();
 151     tty->print_cr("object not in heap: "PTR_FORMAT", referenced by: "PTR_FORMAT, p2i((HeapWord*) heap_oop), p2i(p));
 152     assert(is_in(heap_oop), "object must be in heap");
 153   }
 154 #endif
 155   assert(is_in(heap_oop), "only ever call this on objects in the heap");
 156   if (in_collection_set(heap_oop)) {
 157     oop forwarded_oop = ShenandoahBarrierSet::resolve_oop_static_not_null(heap_oop); // read brooks ptr
 158     assert(! oopDesc::unsafe_equals(forwarded_oop, heap_oop) || is_full_gc_in_progress(), "expect forwarded object");
 159 
 160     log_develop_trace(gc)("Updating old ref: "PTR_FORMAT" pointing to "PTR_FORMAT" to new ref: "PTR_FORMAT, p2i(p), p2i(heap_oop), p2i(forwarded_oop));
 161 
 162     assert(forwarded_oop->is_oop(), "oop required");
 163     assert(is_in(forwarded_oop), "forwardee must be in heap");
 164     assert(oopDesc::bs()->is_safe(forwarded_oop), "forwardee must not be in collection set");
 165     // If this fails, another thread wrote to p before us, it will be logged in SATB and the
 166     // reference be updated later.
 167     oop result = atomic_compare_exchange_oop(forwarded_oop, p, heap_oop);
 168 
 169     if (oopDesc::unsafe_equals(result, heap_oop)) { // CAS successful.
 170       return forwarded_oop;
 171     } else {
 172       return NULL;
 173     }
 174   } else {
 175     assert(oopDesc::unsafe_equals(heap_oop, ShenandoahBarrierSet::resolve_oop_static_not_null(heap_oop)), "expect not forwarded");
 176     return heap_oop;


 268     return_val = copy_val;
 269 
 270     log_develop_trace(gc, compaction)("Copy of "PTR_FORMAT" to "PTR_FORMAT" succeeded \n", p2i((HeapWord*) p), p2i(copy));
 271 
 272 #ifdef ASSERT
 273     assert(return_val->is_oop(), "expect oop");
 274     assert(p->klass() == return_val->klass(), "Should have the same class p: "PTR_FORMAT", copy: "PTR_FORMAT, p2i((HeapWord*) p), p2i((HeapWord*) copy));
 275 #endif
 276   }  else {
 277     if (alloc_from_gclab) {
 278       thread->gclab().rollback(required);
 279     }
 280     log_develop_trace(gc, compaction)("Copy of "PTR_FORMAT" to "PTR_FORMAT" failed, use other: "PTR_FORMAT, p2i((HeapWord*) p), p2i(copy), p2i((HeapWord*) result));
 281     return_val = result;
 282   }
 283 
 284   return return_val;
 285 }
 286 
 287 inline bool ShenandoahHeap::requires_marking(const void* entry) const {
 288   return ! is_marked_next(oop(entry));
 289 }
 290 
 291 bool ShenandoahHeap::region_in_collection_set(size_t region_index) const {
 292   return _in_cset_fast_test_base[region_index];
 293 }
 294 
 295 bool ShenandoahHeap::in_collection_set(ShenandoahHeapRegion* r) const {
 296   return region_in_collection_set(r->region_number());
 297 }
 298 
 299 template <class T>
 300 inline bool ShenandoahHeap::in_collection_set(T p) const {
 301   HeapWord* obj = (HeapWord*) p;
 302   assert(_in_cset_fast_test != NULL, "sanity");
 303   assert(is_in(obj), "should be in heap");
 304 
 305   // no need to subtract the bottom of the heap from obj,
 306   // _in_cset_fast_test is biased
 307   uintx index = ((uintx) obj) >> ShenandoahHeapRegion::RegionSizeShift;
 308   return _in_cset_fast_test[index];






 309 }
 310 
 311 inline bool ShenandoahHeap::concurrent_mark_in_progress() {
 312   return _concurrent_mark_in_progress != 0;
 313 }
 314 
 315 inline address ShenandoahHeap::concurrent_mark_in_progress_addr() {
 316   return (address) &(ShenandoahHeap::heap()->_concurrent_mark_in_progress);
 317 }
 318 
 319 inline bool ShenandoahHeap::is_evacuation_in_progress() {
 320   return _evacuation_in_progress != 0;
 321 }
 322 
 323 inline bool ShenandoahHeap::allocated_after_next_mark_start(HeapWord* addr) const {
 324   uintx index = ((uintx) addr) >> ShenandoahHeapRegion::RegionSizeShift;
 325   HeapWord* top_at_mark_start = _next_top_at_mark_starts[index];
 326   bool alloc_after_mark_start = addr >= top_at_mark_start;




 327   return alloc_after_mark_start;
 328 }
 329 
 330 inline bool ShenandoahHeap::allocated_after_complete_mark_start(HeapWord* addr) const {
 331   uintx index = ((uintx) addr) >> ShenandoahHeapRegion::RegionSizeShift;
 332   HeapWord* top_at_mark_start = _complete_top_at_mark_starts[index];
 333   bool alloc_after_mark_start = addr >= top_at_mark_start;
 334   return alloc_after_mark_start;



 335 }
 336 
 337 template<class T>
 338 inline void ShenandoahHeap::marked_object_iterate(ShenandoahHeapRegion* region, T* cl) {
 339   assert(BrooksPointer::word_offset() < 0, "skip_delta calculation below assumes the forwarding ptr is before obj");
 340 
 341   CMBitMap* mark_bit_map = _complete_mark_bit_map;
 342   HeapWord* top_at_mark_start = complete_top_at_mark_start(region->bottom());
 343 
 344   size_t skip_bitmap_delta = BrooksPointer::word_size() + 1;
 345   size_t skip_objsize_delta = BrooksPointer::word_size() /* + actual obj.size() below */;
 346   HeapWord* start = region->bottom() + BrooksPointer::word_size();
 347 
 348   HeapWord* limit = region->top();
 349   HeapWord* end = MIN2(top_at_mark_start + BrooksPointer::word_size(), _ordered_regions->end());
 350   HeapWord* addr = mark_bit_map->getNextMarkedWordAddress(start, end);
 351 
 352   intx dist = ShenandoahMarkScanPrefetch;
 353   if (dist > 0) {
 354     // Batched scan that prefetches the oop data, anticipating the access to
 355     // either header, oop field, or forwarding pointer. Not that we cannot
 356     // touch anything in oop, while it still being prefetched to get enough
 357     // time for prefetch to work. This is why we try to scan the bitmap linearly,
 358     // disregarding the object size. However, since we know forwarding pointer
 359     // preceeds the object, we can skip over it. Once we cannot trust the bitmap,
 360     // there is no point for prefetching the oop contents, as oop->size() will
 361     // touch it prematurely.
 362 
 363     oop slots[dist];


 394     }
 395   } else {
 396     while (addr < limit) {
 397       oop obj = oop(addr);
 398       int size = obj->size();
 399       do_marked_object(mark_bit_map, cl, obj);
 400       addr += size + skip_objsize_delta;
 401       if (addr < top_at_mark_start) {
 402         addr = mark_bit_map->getNextMarkedWordAddress(addr, end);
 403       }
 404     }
 405   }
 406 }
 407 
 408 template<class T>
 409 inline void ShenandoahHeap::do_marked_object(CMBitMap* bitmap, T* cl, oop obj) {
 410 #ifdef ASSERT
 411   assert(!oopDesc::is_null(obj), "sanity");
 412   assert(obj->is_oop(), "sanity");
 413   assert(is_in(obj), "sanity");
 414   assert(bitmap == _complete_mark_bit_map, "only iterate completed mark bitmap");
 415   assert(is_marked_complete(obj), "object expected to be marked");



 416 #endif
 417   cl->do_object(obj);
 418 }
 419 
 420 #endif // SHARE_VM_GC_SHENANDOAH_SHENANDOAHHEAP_INLINE_HPP
< prev index next >