1 /*
   2  * Copyright (c) 2015, Red Hat, Inc. and/or its affiliates.
   3  *
   4  * This code is free software; you can redistribute it and/or modify it
   5  * under the terms of the GNU General Public License version 2 only, as
   6  * published by the Free Software Foundation.
   7  *
   8  * This code is distributed in the hope that it will be useful, but WITHOUT
   9  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  10  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  11  * version 2 for more details (a copy is included in the LICENSE file that
  12  * accompanied this code).
  13  *
  14  * You should have received a copy of the GNU General Public License version
  15  * 2 along with this work; if not, write to the Free Software Foundation,
  16  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  17  *
  18  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  19  * or visit www.oracle.com if you need additional information or have any
  20  * questions.
  21  *
  22  */
  23 
  24 #ifndef SHARE_VM_GC_SHENANDOAH_SHENANDOAHHEAP_INLINE_HPP
  25 #define SHARE_VM_GC_SHENANDOAH_SHENANDOAHHEAP_INLINE_HPP
  26 
  27 #include "gc_implementation/g1/concurrentMark.inline.hpp"
  28 #include "memory/threadLocalAllocBuffer.inline.hpp"
  29 #include "gc_implementation/shenandoah/brooksPointer.inline.hpp"
  30 #include "gc_implementation/shenandoah/shenandoahBarrierSet.inline.hpp"
  31 #include "gc_implementation/shenandoah/shenandoahHeap.hpp"
  32 #include "gc_implementation/shenandoah/shenandoahHeapRegionSet.hpp"
  33 #include "gc_implementation/shenandoah/shenandoahHeapRegion.inline.hpp"
  34 #include "oops/oop.inline.hpp"
  35 #include "runtime/atomic.hpp"
  36 #include "runtime/prefetch.hpp"
  37 #include "runtime/prefetch.inline.hpp"
  38 #include "utilities/copy.hpp"
  39 
  40 template <class T>
  41 void SCMUpdateRefsClosure::do_oop_work(T* p) {
  42   T o = oopDesc::load_heap_oop(p);
  43   if (! oopDesc::is_null(o)) {
  44     oop obj = oopDesc::decode_heap_oop_not_null(o);
  45     _heap->update_oop_ref_not_null(p, obj);
  46   }
  47 }
  48 
  49 void SCMUpdateRefsClosure::do_oop(oop* p)       { do_oop_work(p); }
  50 void SCMUpdateRefsClosure::do_oop(narrowOop* p) { do_oop_work(p); }
  51 
  52 /*
  53  * Marks the object. Returns true if the object has not been marked before and has
  54  * been marked by this thread. Returns false if the object has already been marked,
  55  * or if a competing thread succeeded in marking this object.
  56  */
  57 inline bool ShenandoahHeap::mark_next(oop obj) const {
  58 #ifdef ASSERT
  59   if (! oopDesc::unsafe_equals(obj, oopDesc::bs()->read_barrier(obj))) {
  60     tty->print_cr("heap region containing obj:");
  61     ShenandoahHeapRegion* obj_region = heap_region_containing(obj);
  62     obj_region->print();
  63     tty->print_cr("heap region containing forwardee:");
  64     ShenandoahHeapRegion* forward_region = heap_region_containing(oopDesc::bs()->read_barrier(obj));
  65     forward_region->print();
  66   }
  67 #endif
  68 
  69   assert(oopDesc::unsafe_equals(obj, oopDesc::bs()->read_barrier(obj)), "only mark forwarded copy of objects");
  70   return mark_next_no_checks(obj);
  71 }
  72 
  73 inline bool ShenandoahHeap::mark_next_no_checks(oop obj) const {
  74   HeapWord* addr = (HeapWord*) obj;
  75   return (! allocated_after_next_mark_start(addr)) && _next_mark_bit_map->parMark(addr);
  76 }
  77 
  78 inline bool ShenandoahHeap::is_marked_next(oop obj) const {
  79   HeapWord* addr = (HeapWord*) obj;
  80   return allocated_after_next_mark_start(addr) || _next_mark_bit_map->isMarked(addr);
  81 }
  82 
  83 inline bool ShenandoahHeap::is_marked_complete(oop obj) const {
  84   HeapWord* addr = (HeapWord*) obj;
  85   return allocated_after_complete_mark_start(addr) || _complete_mark_bit_map->isMarked(addr);
  86 }
  87 
  88 inline bool ShenandoahHeap::need_update_refs() const {
  89   return _need_update_refs;
  90 }
  91 
  92 inline size_t ShenandoahHeap::heap_region_index_containing(const void* addr) const {
  93   uintptr_t region_start = ((uintptr_t) addr);
  94   uintptr_t index = (region_start - (uintptr_t) base()) >> ShenandoahHeapRegion::region_size_shift();
  95 #ifdef ASSERT
  96   if (!(index < _num_regions)) {
  97     tty->print_cr("heap region does not contain address, heap base: "PTR_FORMAT \
  98                   ", real bottom of first region: "PTR_FORMAT", num_regions: "SIZE_FORMAT", region_size: "SIZE_FORMAT,
  99                   p2i(base()),
 100                   p2i(_ordered_regions->get(0)->bottom()),
 101                   _num_regions,
 102                   ShenandoahHeapRegion::region_size_bytes());
 103   }
 104 #endif
 105   assert(index < _num_regions, "heap region index must be in range");
 106   return index;
 107 }
 108 
 109 inline ShenandoahHeapRegion* ShenandoahHeap::heap_region_containing(const void* addr) const {
 110   size_t index = heap_region_index_containing(addr);
 111   ShenandoahHeapRegion* result = _ordered_regions->get(index);
 112 #ifdef ASSERT
 113   if (!(addr >= result->bottom() && addr < result->end())) {
 114     tty->print_cr("heap region does not contain address, heap base: "PTR_FORMAT \
 115                   ", real bottom of first region: "PTR_FORMAT", num_regions: "SIZE_FORMAT,
 116                   p2i(base()),
 117                   p2i(_ordered_regions->get(0)->bottom()),
 118                   _num_regions);
 119   }
 120 #endif
 121   assert(addr >= result->bottom() && addr < result->end(), "address must be in found region");
 122   return result;
 123 }
 124 
 125 template <class T>
 126 inline oop ShenandoahHeap::update_oop_ref_not_null(T* p, oop obj) {
 127   if (in_collection_set(obj)) {
 128     oop forw = ShenandoahBarrierSet::resolve_oop_static_not_null(obj);
 129     assert(! oopDesc::unsafe_equals(forw, obj) || is_full_gc_in_progress() || cancelled_concgc(), "expect forwarded object");
 130     obj = forw;
 131     oopDesc::encode_store_heap_oop(p, obj);
 132   }
 133 #ifdef ASSERT
 134   else {
 135     assert(oopDesc::unsafe_equals(obj, ShenandoahBarrierSet::resolve_oop_static_not_null(obj)), "expect not forwarded");
 136   }
 137 #endif
 138   return obj;
 139 }
 140 
 141 template <class T>
 142 inline oop ShenandoahHeap::maybe_update_oop_ref(T* p) {
 143   T o = oopDesc::load_heap_oop(p);
 144   if (! oopDesc::is_null(o)) {
 145     oop obj = oopDesc::decode_heap_oop_not_null(o);
 146     return maybe_update_oop_ref_not_null(p, obj);
 147   } else {
 148     return NULL;
 149   }
 150 }
 151 
 152 inline oop ShenandoahHeap::atomic_compare_exchange_oop(oop n, oop* addr, oop c) {
 153   return (oop) Atomic::cmpxchg_ptr(n, addr, c);
 154 }
 155 
 156 inline oop ShenandoahHeap::atomic_compare_exchange_oop(oop n, narrowOop* addr, oop c) {
 157   narrowOop cmp = oopDesc::encode_heap_oop(c);
 158   narrowOop val = oopDesc::encode_heap_oop(n);
 159   return oopDesc::decode_heap_oop((narrowOop) Atomic::cmpxchg(val, addr, cmp));
 160 }
 161 
 162 template <class T>
 163 inline oop ShenandoahHeap::maybe_update_oop_ref_not_null(T* p, oop heap_oop) {
 164 
 165   assert((! is_in(p)) || (! in_collection_set(p))
 166          || is_full_gc_in_progress(),
 167          "never update refs in from-space, unless evacuation has been cancelled");
 168 
 169 #ifdef ASSERT
 170   if (! is_in(heap_oop)) {
 171     print_heap_regions();
 172     tty->print_cr("object not in heap: "PTR_FORMAT", referenced by: "PTR_FORMAT, p2i((HeapWord*) heap_oop), p2i(p));
 173     assert(is_in(heap_oop), "object must be in heap");
 174   }
 175 #endif
 176   assert(is_in(heap_oop), "only ever call this on objects in the heap");
 177   if (in_collection_set(heap_oop)) {
 178     oop forwarded_oop = ShenandoahBarrierSet::resolve_oop_static_not_null(heap_oop); // read brooks ptr
 179     assert(! oopDesc::unsafe_equals(forwarded_oop, heap_oop) || is_full_gc_in_progress(), "expect forwarded object");
 180 
 181     log_develop_trace(gc)("Updating old ref: "PTR_FORMAT" pointing to "PTR_FORMAT" to new ref: "PTR_FORMAT,
 182                           p2i(p), p2i(heap_oop), p2i(forwarded_oop));
 183 
 184     assert(forwarded_oop->is_oop(), "oop required");
 185     assert(is_in(forwarded_oop), "forwardee must be in heap");
 186     assert(oopDesc::bs()->is_safe(forwarded_oop), "forwardee must not be in collection set");
 187     // If this fails, another thread wrote to p before us, it will be logged in SATB and the
 188     // reference be updated later.
 189     oop result = atomic_compare_exchange_oop(forwarded_oop, p, heap_oop);
 190 
 191     if (oopDesc::unsafe_equals(result, heap_oop)) { // CAS successful.
 192       return forwarded_oop;
 193     } else {
 194       return NULL;
 195     }
 196   } else {
 197     assert(oopDesc::unsafe_equals(heap_oop, ShenandoahBarrierSet::resolve_oop_static_not_null(heap_oop)),
 198            "expect not forwarded");
 199     return heap_oop;
 200   }
 201 }
 202 
 203 inline bool ShenandoahHeap::cancelled_concgc() const {
 204   return OrderAccess::load_acquire((jbyte*) &_cancelled_concgc) == 1;
 205 }
 206 
 207 inline bool ShenandoahHeap::try_cancel_concgc() {
 208   return Atomic::cmpxchg(1, &_cancelled_concgc, 0) == 0;
 209 }
 210 
 211 inline void ShenandoahHeap::clear_cancelled_concgc() {
 212   OrderAccess::release_store_fence(&_cancelled_concgc, 0);
 213 }
 214 
 215 inline HeapWord* ShenandoahHeap::allocate_from_gclab(Thread* thread, size_t size) {
 216   if (UseTLAB) {
 217     HeapWord* obj = thread->gclab().allocate(size);
 218     if (obj != NULL) {
 219       return obj;
 220     }
 221     // Otherwise...
 222     return allocate_from_gclab_slow(thread, size);
 223   } else {
 224     return NULL;
 225   }
 226 }
 227 
 228 inline void ShenandoahHeap::copy_object(oop p, HeapWord* s, size_t words) {
 229   assert(s != NULL, "allocation of brooks pointer must not fail");
 230   HeapWord* copy = s + BrooksPointer::word_size();
 231 
 232   guarantee(copy != NULL, "allocation of copy object must not fail");
 233   Copy::aligned_disjoint_words((HeapWord*) p, copy, words);
 234   BrooksPointer::initialize(oop(copy));
 235 
 236   log_develop_trace(gc, compaction)("copy object from "PTR_FORMAT" to: "PTR_FORMAT, p2i((HeapWord*) p), p2i(copy));
 237 }
 238 
 239 inline oop ShenandoahHeap::evacuate_object(oop p, Thread* thread, bool& evacuated) {
 240   evacuated = false;
 241 
 242   size_t required  = BrooksPointer::word_size() + p->size();
 243 
 244   assert(! heap_region_containing(p)->is_humongous(), "never evacuate humongous objects");
 245 
 246   bool alloc_from_gclab = true;
 247   HeapWord* filler = allocate_from_gclab(thread, required);
 248   if (filler == NULL) {
 249     filler = allocate_memory(required, true);
 250     alloc_from_gclab = false;
 251   }
 252 
 253 #ifdef ASSERT
 254   // Checking that current Java thread does not hold Threads_lock when we get here.
 255   // If that ever be the case, we'd deadlock in oom_during_evacuation.
 256   if ((! Thread::current()->is_GC_task_thread()) && (! Thread::current()->is_ConcurrentGC_thread())) {
 257     assert(! Threads_lock->owned_by_self(), "must not hold Threads_lock here");
 258   }
 259 #endif
 260 
 261   if (filler == NULL) {
 262     oom_during_evacuation();
 263     // If this is a Java thread, it should have waited
 264     // until all GC threads are done, and then we
 265     // return the forwardee.
 266     oop resolved = ShenandoahBarrierSet::resolve_oop_static(p);
 267     return resolved;
 268   }
 269 
 270   HeapWord* copy = filler + BrooksPointer::word_size();
 271   copy_object(p, filler, required - BrooksPointer::word_size());
 272 
 273   oop copy_val = oop(copy);
 274   oop result = BrooksPointer::try_update_forwardee(p, copy_val);
 275 
 276   oop return_val;
 277   if (oopDesc::unsafe_equals(result, p)) {
 278     evacuated = true;
 279     return_val = copy_val;
 280 
 281     log_develop_trace(gc, compaction)("Copy of "PTR_FORMAT" to "PTR_FORMAT" succeeded \n",
 282                                       p2i((HeapWord*) p), p2i(copy));
 283 
 284 #ifdef ASSERT
 285     assert(return_val->is_oop(), "expect oop");
 286     assert(p->klass() == return_val->klass(), err_msg("Should have the same class p: "PTR_FORMAT", copy: "PTR_FORMAT,
 287                                                       p2i((HeapWord*) p), p2i((HeapWord*) copy)));
 288 #endif
 289   }  else {
 290     if (alloc_from_gclab) {
 291       thread->gclab().rollback(required);
 292     }
 293     log_develop_trace(gc, compaction)("Copy of "PTR_FORMAT" to "PTR_FORMAT" failed, use other: "PTR_FORMAT,
 294                                       p2i((HeapWord*) p), p2i(copy), p2i((HeapWord*) result));
 295     return_val = result;
 296   }
 297 
 298   return return_val;
 299 }
 300 
 301 inline bool ShenandoahHeap::requires_marking(const void* entry) const {
 302   return ! is_marked_next(oop(entry));
 303 }
 304 
 305 bool ShenandoahHeap::region_in_collection_set(size_t region_index) const {
 306   return _in_cset_fast_test_base[region_index];
 307 }
 308 
 309 bool ShenandoahHeap::in_collection_set(ShenandoahHeapRegion* r) const {
 310   return region_in_collection_set(r->region_number());
 311 }
 312 
 313 template <class T>
 314 inline bool ShenandoahHeap::in_collection_set(T p) const {
 315   HeapWord* obj = (HeapWord*) p;
 316   assert(_in_cset_fast_test != NULL, "sanity");
 317   assert(is_in(obj), "should be in heap");
 318 
 319   // no need to subtract the bottom of the heap from obj,
 320   // _in_cset_fast_test is biased
 321   uintx index = ((uintx) obj) >> ShenandoahHeapRegion::region_size_shift();
 322   return _in_cset_fast_test[index];
 323 }
 324 
 325 inline bool ShenandoahHeap::concurrent_mark_in_progress() {
 326   return _concurrent_mark_in_progress != 0;
 327 }
 328 
 329 inline address ShenandoahHeap::concurrent_mark_in_progress_addr() {
 330   return (address) &(ShenandoahHeap::heap()->_concurrent_mark_in_progress);
 331 }
 332 
 333 inline bool ShenandoahHeap::is_evacuation_in_progress() {
 334   return _evacuation_in_progress != 0;
 335 }
 336 
 337 inline bool ShenandoahHeap::allocated_after_next_mark_start(HeapWord* addr) const {
 338   uintx index = ((uintx) addr) >> ShenandoahHeapRegion::region_size_shift();
 339   HeapWord* top_at_mark_start = _next_top_at_mark_starts[index];
 340   bool alloc_after_mark_start = addr >= top_at_mark_start;
 341   return alloc_after_mark_start;
 342 }
 343 
 344 inline bool ShenandoahHeap::allocated_after_complete_mark_start(HeapWord* addr) const {
 345   uintx index = ((uintx) addr) >> ShenandoahHeapRegion::region_size_shift();
 346   HeapWord* top_at_mark_start = _complete_top_at_mark_starts[index];
 347   bool alloc_after_mark_start = addr >= top_at_mark_start;
 348   return alloc_after_mark_start;
 349 }
 350 
 351 template<class T>
 352 inline void ShenandoahHeap::marked_object_iterate(ShenandoahHeapRegion* region, T* cl) {
 353   assert(BrooksPointer::word_offset() < 0, "skip_delta calculation below assumes the forwarding ptr is before obj");
 354 
 355   CMBitMap* mark_bit_map = _complete_mark_bit_map;
 356   HeapWord* top_at_mark_start = complete_top_at_mark_start(region->bottom());
 357 
 358   size_t skip_bitmap_delta = BrooksPointer::word_size() + 1;
 359   size_t skip_objsize_delta = BrooksPointer::word_size() /* + actual obj.size() below */;
 360   HeapWord* start = region->bottom() + BrooksPointer::word_size();
 361 
 362   HeapWord* limit = region->top();
 363   HeapWord* end = MIN2(top_at_mark_start + BrooksPointer::word_size(), _ordered_regions->end());
 364   HeapWord* addr = mark_bit_map->getNextMarkedWordAddress(start, end);
 365 
 366   intx dist = ShenandoahMarkScanPrefetch;
 367   if (dist > 0) {
 368     // Batched scan that prefetches the oop data, anticipating the access to
 369     // either header, oop field, or forwarding pointer. Not that we cannot
 370     // touch anything in oop, while it still being prefetched to get enough
 371     // time for prefetch to work. This is why we try to scan the bitmap linearly,
 372     // disregarding the object size. However, since we know forwarding pointer
 373     // preceeds the object, we can skip over it. Once we cannot trust the bitmap,
 374     // there is no point for prefetching the oop contents, as oop->size() will
 375     // touch it prematurely.
 376 
 377     // No variable-length arrays in standard C++, have enough slots to fit
 378     // the prefetch distance.
 379     static const int SLOT_COUNT = 256;
 380     guarantee(dist <= SLOT_COUNT, "adjust slot count");
 381     oop slots[SLOT_COUNT];
 382 
 383     bool aborting = false;
 384     int avail;
 385     do {
 386       avail = 0;
 387       for (int c = 0; (c < dist) && (addr < limit); c++) {
 388         Prefetch::read(addr, BrooksPointer::byte_offset());
 389         oop obj = oop(addr);
 390         slots[avail++] = obj;
 391         if (addr < top_at_mark_start) {
 392           addr += skip_bitmap_delta;
 393           addr = mark_bit_map->getNextMarkedWordAddress(addr, end);
 394         } else {
 395           // cannot trust mark bitmap anymore, finish the current stride,
 396           // and switch to accurate traversal
 397           addr += obj->size() + skip_objsize_delta;
 398           aborting = true;
 399         }
 400       }
 401 
 402       for (int c = 0; c < avail; c++) {
 403         do_marked_object(mark_bit_map, cl, slots[c]);
 404       }
 405     } while (avail > 0 && !aborting);
 406 
 407     // accurate traversal
 408     while (addr < limit) {
 409       oop obj = oop(addr);
 410       int size = obj->size();
 411       do_marked_object(mark_bit_map, cl, obj);
 412       addr += size + skip_objsize_delta;
 413     }
 414   } else {
 415     while (addr < limit) {
 416       oop obj = oop(addr);
 417       int size = obj->size();
 418       do_marked_object(mark_bit_map, cl, obj);
 419       addr += size + skip_objsize_delta;
 420       if (addr < top_at_mark_start) {
 421         addr = mark_bit_map->getNextMarkedWordAddress(addr, end);
 422       }
 423     }
 424   }
 425 }
 426 
 427 template<class T>
 428 inline void ShenandoahHeap::do_marked_object(CMBitMap* bitmap, T* cl, oop obj) {
 429 #ifdef ASSERT
 430   assert(!oopDesc::is_null(obj), "sanity");
 431   assert(obj->is_oop(), "sanity");
 432   assert(is_in(obj), "sanity");
 433   assert(bitmap == _complete_mark_bit_map, "only iterate completed mark bitmap");
 434   assert(is_marked_complete(obj), "object expected to be marked");
 435 #endif
 436   cl->do_object(obj);
 437 }
 438 
 439 #endif // SHARE_VM_GC_SHENANDOAH_SHENANDOAHHEAP_INLINE_HPP