1 /*
   2  * Copyright (c) 2015, Red Hat, Inc. and/or its affiliates.
   3  *
   4  * This code is free software; you can redistribute it and/or modify it
   5  * under the terms of the GNU General Public License version 2 only, as
   6  * published by the Free Software Foundation.
   7  *
   8  * This code is distributed in the hope that it will be useful, but WITHOUT
   9  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  10  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  11  * version 2 for more details (a copy is included in the LICENSE file that
  12  * accompanied this code).
  13  *
  14  * You should have received a copy of the GNU General Public License version
  15  * 2 along with this work; if not, write to the Free Software Foundation,
  16  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  17  *
  18  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  19  * or visit www.oracle.com if you need additional information or have any
  20  * questions.
  21  *
  22  */
  23 
  24 #ifndef SHARE_VM_GC_SHENANDOAH_SHENANDOAHHEAP_INLINE_HPP
  25 #define SHARE_VM_GC_SHENANDOAH_SHENANDOAHHEAP_INLINE_HPP
  26 
  27 #include "gc/shared/cmBitMap.inline.hpp"
  28 #include "gc/shared/threadLocalAllocBuffer.inline.hpp"
  29 #include "gc/shenandoah/brooksPointer.inline.hpp"
  30 #include "gc/shenandoah/shenandoahBarrierSet.inline.hpp"
  31 #include "gc/shenandoah/shenandoahHeap.hpp"
  32 #include "gc/shenandoah/shenandoahHeapRegionSet.hpp"
  33 #include "gc/shenandoah/shenandoahHeapRegion.inline.hpp"
  34 #include "oops/oop.inline.hpp"
  35 #include "runtime/atomic.hpp"
  36 #include "runtime/prefetch.hpp"
  37 #include "runtime/prefetch.inline.hpp"
  38 #include "utilities/copy.hpp"
  39 
  40 /*
  41  * Marks the object. Returns true if the object has not been marked before and has
  42  * been marked by this thread. Returns false if the object has already been marked,
  43  * or if a competing thread succeeded in marking this object.
  44  */
  45 inline bool ShenandoahHeap::mark_current(oop obj) const {
  46 #ifdef ASSERT
  47   if (! oopDesc::unsafe_equals(obj, oopDesc::bs()->read_barrier(obj))) {
  48     tty->print_cr("heap region containing obj:");
  49     ShenandoahHeapRegion* obj_region = heap_region_containing(obj);
  50     obj_region->print();
  51     tty->print_cr("heap region containing forwardee:");
  52     ShenandoahHeapRegion* forward_region = heap_region_containing(oopDesc::bs()->read_barrier(obj));
  53     forward_region->print();
  54   }
  55 #endif
  56 
  57   assert(oopDesc::unsafe_equals(obj, oopDesc::bs()->read_barrier(obj)), "only mark forwarded copy of objects");
  58   return mark_current_no_checks(obj);
  59 }
  60 
  61 inline bool ShenandoahHeap::mark_current_no_checks(oop obj) const {
  62   HeapWord* addr = (HeapWord*) obj;
  63   return (! allocated_after_mark_start(addr)) && _next_mark_bit_map->parMark(addr);
  64 }
  65 
  66 inline bool ShenandoahHeap::is_marked_current(oop obj) const {
  67   HeapWord* addr = (HeapWord*) obj;
  68   return allocated_after_mark_start(addr) || _next_mark_bit_map->isMarked(addr);
  69 }
  70 
  71 inline bool ShenandoahHeap::is_marked_current(oop obj, ShenandoahHeapRegion* r) const {
  72   HeapWord* addr = (HeapWord*) obj;
  73   return _next_mark_bit_map->isMarked(addr) || r->allocated_after_mark_start(addr);
  74 }
  75 
  76 inline bool ShenandoahHeap::is_marked_prev(oop obj) const {
  77   ShenandoahHeapRegion* r = heap_region_containing((void*) obj);
  78   return is_marked_prev(obj, r);
  79 }
  80 
  81 inline bool ShenandoahHeap::is_marked_prev(oop obj, const ShenandoahHeapRegion* r) const {
  82   HeapWord* addr = (HeapWord*) obj;
  83   return _prev_mark_bit_map->isMarked(addr) || r->allocated_after_prev_mark_start(addr);
  84 }
  85 
  86 inline bool ShenandoahHeap::need_update_refs() const {
  87   return _need_update_refs;
  88 }
  89 
  90 inline uint ShenandoahHeap::heap_region_index_containing(const void* addr) const {
  91   uintptr_t region_start = ((uintptr_t) addr);
  92   uintptr_t index = (region_start - (uintptr_t) _first_region_bottom) >> ShenandoahHeapRegion::RegionSizeShift;
  93 #ifdef ASSERT
  94   if (!(index < _num_regions)) {
  95     tty->print_cr("heap region does not contain address, first_region_bottom: "PTR_FORMAT", real bottom of first region: "PTR_FORMAT", num_regions: "SIZE_FORMAT", region_size: "SIZE_FORMAT, p2i(_first_region_bottom), p2i(_ordered_regions->get(0)->bottom()), _num_regions, ShenandoahHeapRegion::RegionSizeBytes);
  96   }
  97 #endif
  98   assert(index < _num_regions, "heap region index must be in range");
  99   return index;
 100 }
 101 
 102 inline ShenandoahHeapRegion* ShenandoahHeap::heap_region_containing(const void* addr) const {
 103   uint index = heap_region_index_containing(addr);
 104   ShenandoahHeapRegion* result = _ordered_regions->get(index);
 105 #ifdef ASSERT
 106   if (!(addr >= result->bottom() && addr < result->end())) {
 107     tty->print_cr("heap region does not contain address, first_region_bottom: "PTR_FORMAT", real bottom of first region: "PTR_FORMAT", num_regions: "SIZE_FORMAT, p2i(_first_region_bottom), p2i(_ordered_regions->get(0)->bottom()), _num_regions);
 108   }
 109 #endif
 110   assert(addr >= result->bottom() && addr < result->end(), "address must be in found region");
 111   return result;
 112 }
 113 
 114 template <class T>
 115 inline oop ShenandoahHeap::update_oop_ref_not_null(T* p, oop obj) {
 116   if (in_cset_fast_test((HeapWord*) obj)) {
 117     oop forw = ShenandoahBarrierSet::resolve_oop_static_not_null(obj);
 118     assert(! oopDesc::unsafe_equals(forw, obj) || is_full_gc_in_progress(), "expect forwarded object");
 119     obj = forw;
 120     oopDesc::encode_store_heap_oop(p, obj);
 121   }
 122 #ifdef ASSERT
 123   else {
 124     assert(oopDesc::unsafe_equals(obj, ShenandoahBarrierSet::resolve_oop_static_not_null(obj)), "expect not forwarded");
 125   }
 126 #endif
 127   return obj;
 128 }
 129 
 130 template <class T>
 131 inline oop ShenandoahHeap::maybe_update_oop_ref(T* p) {
 132   T o = oopDesc::load_heap_oop(p);
 133   if (! oopDesc::is_null(o)) {
 134     oop obj = oopDesc::decode_heap_oop_not_null(o);
 135     return maybe_update_oop_ref_not_null(p, obj);
 136   } else {
 137     return NULL;
 138   }
 139 }
 140 
 141 inline oop ShenandoahHeap::atomic_compare_exchange_oop(oop n, oop* addr, oop c) {
 142   return (oop) Atomic::cmpxchg_ptr(n, addr, c);
 143 }
 144 
 145 inline oop ShenandoahHeap::atomic_compare_exchange_oop(oop n, narrowOop* addr, oop c) {
 146   narrowOop cmp = oopDesc::encode_heap_oop(c);
 147   narrowOop val = oopDesc::encode_heap_oop(n);
 148   return oopDesc::decode_heap_oop((narrowOop) Atomic::cmpxchg(val, addr, cmp));
 149 }
 150 
 151 template <class T>
 152 inline oop ShenandoahHeap::maybe_update_oop_ref_not_null(T* p, oop heap_oop) {
 153 
 154   assert((! is_in(p)) || (! heap_region_containing(p)->is_in_collection_set())
 155          || is_full_gc_in_progress(),
 156          "never update refs in from-space, unless evacuation has been cancelled");
 157 
 158 #ifdef ASSERT
 159   if (! is_in(heap_oop)) {
 160     print_heap_regions();
 161     tty->print_cr("object not in heap: "PTR_FORMAT", referenced by: "PTR_FORMAT, p2i((HeapWord*) heap_oop), p2i(p));
 162     assert(is_in(heap_oop), "object must be in heap");
 163   }
 164 #endif
 165   assert(is_in(heap_oop), "only ever call this on objects in the heap");
 166   if (in_cset_fast_test((HeapWord*) heap_oop)) {
 167     oop forwarded_oop = ShenandoahBarrierSet::resolve_oop_static_not_null(heap_oop); // read brooks ptr
 168     assert(! oopDesc::unsafe_equals(forwarded_oop, heap_oop) || is_full_gc_in_progress(), "expect forwarded object");
 169 
 170     log_develop_trace(gc)("Updating old ref: "PTR_FORMAT" pointing to "PTR_FORMAT" to new ref: "PTR_FORMAT, p2i(p), p2i(heap_oop), p2i(forwarded_oop));
 171 
 172     assert(forwarded_oop->is_oop(), "oop required");
 173     assert(is_in(forwarded_oop), "forwardee must be in heap");
 174     assert(oopDesc::bs()->is_safe(forwarded_oop), "forwardee must not be in collection set");
 175     // If this fails, another thread wrote to p before us, it will be logged in SATB and the
 176     // reference be updated later.
 177     oop result = atomic_compare_exchange_oop(forwarded_oop, p, heap_oop);
 178 
 179     if (oopDesc::unsafe_equals(result, heap_oop)) { // CAS successful.
 180       return forwarded_oop;
 181     } else {
 182       return NULL;
 183     }
 184   } else {
 185     assert(oopDesc::unsafe_equals(heap_oop, ShenandoahBarrierSet::resolve_oop_static_not_null(heap_oop)), "expect not forwarded");
 186     return heap_oop;
 187   }
 188 }
 189 
 190 inline bool ShenandoahHeap::cancelled_concgc() const {
 191   return (jbyte) OrderAccess::load_acquire((jbyte*) &_cancelled_concgc);
 192 }
 193 
 194 inline void ShenandoahHeap::set_cancelled_concgc(bool v) {
 195   OrderAccess::release_store_fence((jbyte*) &_cancelled_concgc, (jbyte) v);
 196 }
 197 
 198 inline HeapWord* ShenandoahHeap::allocate_from_gclab(Thread* thread, size_t size) {
 199   if (UseTLAB) {
 200     HeapWord* obj = thread->gclab().allocate(size);
 201     if (obj != NULL) {
 202       return obj;
 203     }
 204     // Otherwise...
 205     return allocate_from_gclab_slow(thread, size);
 206   } else {
 207     return NULL;
 208   }
 209 }
 210 
 211 inline void ShenandoahHeap::copy_object(oop p, HeapWord* s, size_t words) {
 212   assert(s != NULL, "allocation of brooks pointer must not fail");
 213   HeapWord* copy = s + BrooksPointer::word_size();
 214 
 215   guarantee(copy != NULL, "allocation of copy object must not fail");
 216   Copy::aligned_disjoint_words((HeapWord*) p, copy, words);
 217   BrooksPointer::initialize(oop(copy));
 218 
 219   log_develop_trace(gc, compaction)("copy object from "PTR_FORMAT" to: "PTR_FORMAT, p2i((HeapWord*) p), p2i(copy));
 220 }
 221 
 222 inline oop ShenandoahHeap::evacuate_object(oop p, Thread* thread) {
 223   size_t required;
 224 
 225 #ifdef ASSERT
 226   ShenandoahHeapRegion* hr = NULL;
 227   if (ShenandoahVerifyReadsToFromSpace) {
 228     hr = heap_region_containing(p);
 229     {
 230       hr->memProtectionOff();
 231       required  = BrooksPointer::word_size() + p->size();
 232       hr->memProtectionOn();
 233     }
 234   } else {
 235     required  = BrooksPointer::word_size() + p->size();
 236   }
 237 #else
 238     required  = BrooksPointer::word_size() + p->size();
 239 #endif
 240 
 241   assert(! heap_region_containing(p)->is_humongous(), "never evacuate humongous objects");
 242 
 243   bool alloc_from_gclab = true;
 244   HeapWord* filler = allocate_from_gclab(thread, required);
 245   if (filler == NULL) {
 246     filler = allocate_memory(required, true);
 247     alloc_from_gclab = false;
 248   }
 249 
 250   if (filler == NULL) {
 251     oom_during_evacuation();
 252     // If this is a Java thread, it should have waited
 253     // until all GC threads are done, and then we
 254     // return the forwardee.
 255     oop resolved = ShenandoahBarrierSet::resolve_oop_static(p);
 256     return resolved;
 257   }
 258 
 259   HeapWord* copy = filler + BrooksPointer::word_size();
 260 
 261 #ifdef ASSERT
 262   if (ShenandoahVerifyReadsToFromSpace) {
 263     hr->memProtectionOff();
 264     copy_object(p, filler, required - BrooksPointer::word_size());
 265     hr->memProtectionOn();
 266   } else {
 267     copy_object(p, filler, required - BrooksPointer::word_size());
 268   }
 269 #else
 270     copy_object(p, filler, required - BrooksPointer::word_size());
 271 #endif
 272 
 273   oop copy_val = oop(copy);
 274   oop result = BrooksPointer::try_update_forwardee(p, copy_val);
 275 
 276   oop return_val;
 277   if (oopDesc::unsafe_equals(result, p)) {
 278     return_val = copy_val;
 279 
 280     log_develop_trace(gc, compaction)("Copy of "PTR_FORMAT" to "PTR_FORMAT" succeeded \n", p2i((HeapWord*) p), p2i(copy));
 281 
 282 #ifdef ASSERT
 283     assert(return_val->is_oop(), "expect oop");
 284     assert(p->klass() == return_val->klass(), "Should have the same class p: "PTR_FORMAT", copy: "PTR_FORMAT, p2i((HeapWord*) p), p2i((HeapWord*) copy));
 285 #endif
 286   }  else {
 287     if (alloc_from_gclab) {
 288       thread->gclab().rollback(required);
 289     }
 290     log_develop_trace(gc, compaction)("Copy of "PTR_FORMAT" to "PTR_FORMAT" failed, use other: "PTR_FORMAT, p2i((HeapWord*) p), p2i(copy), p2i((HeapWord*) result));
 291     return_val = result;
 292   }
 293 
 294   return return_val;
 295 }
 296 
 297 inline bool ShenandoahHeap::requires_marking(const void* entry) const {
 298   return ! is_marked_current(oop(entry));
 299 }
 300 
 301 inline bool ShenandoahHeap::in_cset_fast_test(HeapWord* obj) {
 302   assert(_in_cset_fast_test != NULL, "sanity");
 303   assert(is_in(obj), "should be in heap");
 304 
 305   // no need to subtract the bottom of the heap from obj,
 306   // _in_cset_fast_test is biased
 307   uintx index = ((uintx) obj) >> ShenandoahHeapRegion::RegionSizeShift;
 308   bool ret = _in_cset_fast_test[index];
 309 
 310   // let's make sure the result is consistent with what the slower
 311   // test returns
 312   assert( ret || !is_in_collection_set(obj), "sanity");
 313   assert(!ret ||  is_in_collection_set(obj), "sanity");
 314   return ret;
 315 }
 316 
 317 inline bool ShenandoahHeap::concurrent_mark_in_progress() {
 318   return _concurrent_mark_in_progress != 0;
 319 }
 320 
 321 inline address ShenandoahHeap::concurrent_mark_in_progress_addr() {
 322   return (address) &(ShenandoahHeap::heap()->_concurrent_mark_in_progress);
 323 }
 324 
 325 inline bool ShenandoahHeap::is_evacuation_in_progress() {
 326   return _evacuation_in_progress != 0;
 327 }
 328 
 329 inline bool ShenandoahHeap::allocated_after_mark_start(HeapWord* addr) const {
 330   uintx index = ((uintx) addr) >> ShenandoahHeapRegion::RegionSizeShift;
 331   HeapWord* top_at_mark_start = _top_at_mark_starts[index];
 332   bool alloc_after_mark_start = addr >= top_at_mark_start;
 333 #ifdef ASSERT
 334   ShenandoahHeapRegion* r = heap_region_containing(addr);
 335   assert(alloc_after_mark_start == r->allocated_after_mark_start(addr), "sanity");
 336 #endif
 337   return alloc_after_mark_start;
 338 }
 339 
 340 template<class T>
 341 inline void ShenandoahHeap::marked_prev_object_iterate(ShenandoahHeapRegion* region, T* cl) {
 342   marked_object_iterate(region, cl, _prev_mark_bit_map, region->top_at_prev_mark_start());
 343 }
 344 
 345 template<class T>
 346 inline void ShenandoahHeap::marked_next_object_iterate(ShenandoahHeapRegion* region, T* cl) {
 347   marked_object_iterate(region, cl, _next_mark_bit_map, region->top_at_mark_start());
 348 }
 349 
 350 template<class T>
 351 inline void ShenandoahHeap::marked_object_iterate(ShenandoahHeapRegion* region, T* cl, CMBitMap* mark_bit_map, HeapWord* top_at_mark_start) {
 352   assert(BrooksPointer::word_offset() < 0, "skip_delta calculation below assumes the forwarding ptr is before obj");
 353   size_t skip_bitmap_delta = BrooksPointer::word_size() + 1;
 354   size_t skip_objsize_delta = BrooksPointer::word_size() /* + actual obj.size() below */;
 355   HeapWord* start = region->bottom() + BrooksPointer::word_size();
 356 
 357   HeapWord* limit = region->top();
 358   HeapWord* end = MIN2(top_at_mark_start + BrooksPointer::word_size(), _ordered_regions->end());
 359   HeapWord* addr = mark_bit_map->getNextMarkedWordAddress(start, end);
 360 
 361   intx dist = ShenandoahMarkScanPrefetch;
 362   if (dist > 0) {
 363     // Batched scan that prefetches the oop data, anticipating the access to
 364     // either header, oop field, or forwarding pointer. Not that we cannot
 365     // touch anything in oop, while it still being prefetched to get enough
 366     // time for prefetch to work. This is why we try to scan the bitmap linearly,
 367     // disregarding the object size. However, since we know forwarding pointer
 368     // preceeds the object, we can skip over it. Once we cannot trust the bitmap,
 369     // there is no point for prefetching the oop contents, as oop->size() will
 370     // touch it prematurely.
 371 
 372     oop slots[dist];
 373     bool aborting = false;
 374     int avail;
 375     do {
 376       avail = 0;
 377       for (int c = 0; (c < dist) && (addr < limit); c++) {
 378         Prefetch::read(addr, 1);
 379         oop obj = oop(addr);
 380         slots[avail++] = obj;
 381         if (addr < top_at_mark_start) {
 382           addr += skip_bitmap_delta;
 383           addr = mark_bit_map->getNextMarkedWordAddress(addr, end);
 384         } else {
 385           // cannot trust mark bitmap anymore, finish the current stride,
 386           // and switch to accurate traversal
 387           addr += obj->size() + skip_objsize_delta;
 388           aborting = true;
 389         }
 390       }
 391 
 392       for (int c = 0; c < avail; c++) {
 393         do_marked_object(mark_bit_map, cl, slots[c]);
 394       }
 395     } while (avail > 0 && !aborting);
 396 
 397     // accurate traversal
 398     while (addr < limit) {
 399       oop obj = oop(addr);
 400       int size = obj->size();
 401       do_marked_object(mark_bit_map, cl, obj);
 402       addr += size + skip_objsize_delta;
 403     }
 404   } else {
 405     while (addr < limit) {
 406       oop obj = oop(addr);
 407       int size = obj->size();
 408       do_marked_object(mark_bit_map, cl, obj);
 409       addr += size + skip_objsize_delta;
 410       if (addr < top_at_mark_start) {
 411         addr = mark_bit_map->getNextMarkedWordAddress(addr, end);
 412       }
 413     }
 414   }
 415 }
 416 
 417 template<class T>
 418 inline void ShenandoahHeap::do_marked_object(CMBitMap* bitmap, T* cl, oop obj) {
 419 #ifdef ASSERT
 420   assert(!oopDesc::is_null(obj), "sanity");
 421   assert(obj->is_oop(), "sanity");
 422   assert(is_in(obj), "sanity");
 423   if (bitmap == _prev_mark_bit_map) {
 424     assert(is_marked_prev(obj), "object expected to be marked");
 425   } else {
 426     assert(is_marked_current(obj), "object expected to be marked");
 427   }
 428 #endif
 429   cl->do_object(obj);
 430 }
 431 
 432 #endif // SHARE_VM_GC_SHENANDOAH_SHENANDOAHHEAP_INLINE_HPP