1 /*
   2  * Copyright (c) 2015, Red Hat, Inc. and/or its affiliates.
   3  *
   4  * This code is free software; you can redistribute it and/or modify it
   5  * under the terms of the GNU General Public License version 2 only, as
   6  * published by the Free Software Foundation.
   7  *
   8  * This code is distributed in the hope that it will be useful, but WITHOUT
   9  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  10  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  11  * version 2 for more details (a copy is included in the LICENSE file that
  12  * accompanied this code).
  13  *
  14  * You should have received a copy of the GNU General Public License version
  15  * 2 along with this work; if not, write to the Free Software Foundation,
  16  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  17  *
  18  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  19  * or visit www.oracle.com if you need additional information or have any
  20  * questions.
  21  *
  22  */
  23 
  24 #ifndef SHARE_VM_GC_SHENANDOAH_SHENANDOAHHEAP_INLINE_HPP
  25 #define SHARE_VM_GC_SHENANDOAH_SHENANDOAHHEAP_INLINE_HPP
  26 
  27 #include "gc/shared/cmBitMap.inline.hpp"
  28 #include "gc/shared/threadLocalAllocBuffer.inline.hpp"
  29 #include "gc/shenandoah/brooksPointer.inline.hpp"
  30 #include "gc/shenandoah/shenandoahBarrierSet.inline.hpp"
  31 #include "gc/shenandoah/shenandoahHeap.hpp"
  32 #include "gc/shenandoah/shenandoahHeapRegionSet.hpp"
  33 #include "gc/shenandoah/shenandoahHeapRegion.inline.hpp"
  34 #include "oops/oop.inline.hpp"
  35 #include "runtime/atomic.hpp"
  36 #include "runtime/prefetch.hpp"
  37 #include "runtime/prefetch.inline.hpp"
  38 #include "utilities/copy.hpp"
  39 
  40 /*
  41  * Marks the object. Returns true if the object has not been marked before and has
  42  * been marked by this thread. Returns false if the object has already been marked,
  43  * or if a competing thread succeeded in marking this object.
  44  */
  45 inline bool ShenandoahHeap::mark_next(oop obj) const {
  46 #ifdef ASSERT
  47   if (! oopDesc::unsafe_equals(obj, oopDesc::bs()->read_barrier(obj))) {
  48     tty->print_cr("heap region containing obj:");
  49     ShenandoahHeapRegion* obj_region = heap_region_containing(obj);
  50     obj_region->print();
  51     tty->print_cr("heap region containing forwardee:");
  52     ShenandoahHeapRegion* forward_region = heap_region_containing(oopDesc::bs()->read_barrier(obj));
  53     forward_region->print();
  54   }
  55 #endif
  56 
  57   assert(oopDesc::unsafe_equals(obj, oopDesc::bs()->read_barrier(obj)), "only mark forwarded copy of objects");
  58   return mark_next_no_checks(obj);
  59 }
  60 
  61 inline bool ShenandoahHeap::mark_next_no_checks(oop obj) const {
  62   HeapWord* addr = (HeapWord*) obj;
  63   return (! allocated_after_next_mark_start(addr)) && _next_mark_bit_map->parMark(addr);
  64 }
  65 
  66 inline bool ShenandoahHeap::is_marked_next(oop obj) const {
  67   HeapWord* addr = (HeapWord*) obj;
  68   return allocated_after_next_mark_start(addr) || _next_mark_bit_map->isMarked(addr);
  69 }
  70 
  71 inline bool ShenandoahHeap::is_marked_complete(oop obj) const {
  72   HeapWord* addr = (HeapWord*) obj;
  73   return allocated_after_complete_mark_start(addr) || _complete_mark_bit_map->isMarked(addr);
  74 }
  75 
  76 inline bool ShenandoahHeap::need_update_refs() const {
  77   return _need_update_refs;
  78 }
  79 
  80 inline uint ShenandoahHeap::heap_region_index_containing(const void* addr) const {
  81   uintptr_t region_start = ((uintptr_t) addr);
  82   uintptr_t index = (region_start - (uintptr_t) _first_region_bottom) >> ShenandoahHeapRegion::RegionSizeShift;
  83 #ifdef ASSERT
  84   if (!(index < _num_regions)) {
  85     tty->print_cr("heap region does not contain address, first_region_bottom: "PTR_FORMAT \
  86                   ", real bottom of first region: "PTR_FORMAT", num_regions: "SIZE_FORMAT", region_size: "SIZE_FORMAT,
  87                   p2i(_first_region_bottom),
  88                   p2i(_ordered_regions->get(0)->bottom()),
  89                   _num_regions,
  90                   ShenandoahHeapRegion::RegionSizeBytes);
  91   }
  92 #endif
  93   assert(index < _num_regions, "heap region index must be in range");
  94   return index;
  95 }
  96 
  97 inline ShenandoahHeapRegion* ShenandoahHeap::heap_region_containing(const void* addr) const {
  98   uint index = heap_region_index_containing(addr);
  99   ShenandoahHeapRegion* result = _ordered_regions->get(index);
 100 #ifdef ASSERT
 101   if (!(addr >= result->bottom() && addr < result->end())) {
 102     tty->print_cr("heap region does not contain address, first_region_bottom: "PTR_FORMAT \
 103                   ", real bottom of first region: "PTR_FORMAT", num_regions: "SIZE_FORMAT,
 104                   p2i(_first_region_bottom),
 105                   p2i(_ordered_regions->get(0)->bottom()),
 106                   _num_regions);
 107   }
 108 #endif
 109   assert(addr >= result->bottom() && addr < result->end(), "address must be in found region");
 110   return result;
 111 }
 112 
 113 template <class T>
 114 inline oop ShenandoahHeap::update_oop_ref_not_null(T* p, oop obj) {
 115   if (in_collection_set(obj)) {
 116     oop forw = ShenandoahBarrierSet::resolve_oop_static_not_null(obj);
 117     assert(! oopDesc::unsafe_equals(forw, obj) || is_full_gc_in_progress(), "expect forwarded object");
 118     obj = forw;
 119     oopDesc::encode_store_heap_oop(p, obj);
 120   }
 121 #ifdef ASSERT
 122   else {
 123     assert(oopDesc::unsafe_equals(obj, ShenandoahBarrierSet::resolve_oop_static_not_null(obj)), "expect not forwarded");
 124   }
 125 #endif
 126   return obj;
 127 }
 128 
 129 template <class T>
 130 inline oop ShenandoahHeap::maybe_update_oop_ref(T* p) {
 131   T o = oopDesc::load_heap_oop(p);
 132   if (! oopDesc::is_null(o)) {
 133     oop obj = oopDesc::decode_heap_oop_not_null(o);
 134     return maybe_update_oop_ref_not_null(p, obj);
 135   } else {
 136     return NULL;
 137   }
 138 }
 139 
 140 inline oop ShenandoahHeap::atomic_compare_exchange_oop(oop n, oop* addr, oop c) {
 141   return (oop) Atomic::cmpxchg_ptr(n, addr, c);
 142 }
 143 
 144 inline oop ShenandoahHeap::atomic_compare_exchange_oop(oop n, narrowOop* addr, oop c) {
 145   narrowOop cmp = oopDesc::encode_heap_oop(c);
 146   narrowOop val = oopDesc::encode_heap_oop(n);
 147   return oopDesc::decode_heap_oop((narrowOop) Atomic::cmpxchg(val, addr, cmp));
 148 }
 149 
 150 template <class T>
 151 inline oop ShenandoahHeap::maybe_update_oop_ref_not_null(T* p, oop heap_oop) {
 152 
 153   assert((! is_in(p)) || (! in_collection_set(p))
 154          || is_full_gc_in_progress(),
 155          "never update refs in from-space, unless evacuation has been cancelled");
 156 
 157 #ifdef ASSERT
 158   if (! is_in(heap_oop)) {
 159     print_heap_regions();
 160     tty->print_cr("object not in heap: "PTR_FORMAT", referenced by: "PTR_FORMAT, p2i((HeapWord*) heap_oop), p2i(p));
 161     assert(is_in(heap_oop), "object must be in heap");
 162   }
 163 #endif
 164   assert(is_in(heap_oop), "only ever call this on objects in the heap");
 165   if (in_collection_set(heap_oop)) {
 166     oop forwarded_oop = ShenandoahBarrierSet::resolve_oop_static_not_null(heap_oop); // read brooks ptr
 167     assert(! oopDesc::unsafe_equals(forwarded_oop, heap_oop) || is_full_gc_in_progress(), "expect forwarded object");
 168 
 169     log_develop_trace(gc)("Updating old ref: "PTR_FORMAT" pointing to "PTR_FORMAT" to new ref: "PTR_FORMAT,
 170                           p2i(p), p2i(heap_oop), p2i(forwarded_oop));
 171 
 172     assert(forwarded_oop->is_oop(), "oop required");
 173     assert(is_in(forwarded_oop), "forwardee must be in heap");
 174     assert(oopDesc::bs()->is_safe(forwarded_oop), "forwardee must not be in collection set");
 175     // If this fails, another thread wrote to p before us, it will be logged in SATB and the
 176     // reference be updated later.
 177     oop result = atomic_compare_exchange_oop(forwarded_oop, p, heap_oop);
 178 
 179     if (oopDesc::unsafe_equals(result, heap_oop)) { // CAS successful.
 180       return forwarded_oop;
 181     } else {
 182       return NULL;
 183     }
 184   } else {
 185     assert(oopDesc::unsafe_equals(heap_oop, ShenandoahBarrierSet::resolve_oop_static_not_null(heap_oop)),
 186            "expect not forwarded");
 187     return heap_oop;
 188   }
 189 }
 190 
 191 inline bool ShenandoahHeap::cancelled_concgc() const {
 192   return (jbyte) OrderAccess::load_acquire((jbyte*) &_cancelled_concgc);
 193 }
 194 
 195 inline bool ShenandoahHeap::try_cancel_concgc() const {
 196   return Atomic::cmpxchg(true, (jbyte*) &_cancelled_concgc, false) == false;
 197 }
 198 
 199 inline void ShenandoahHeap::set_cancelled_concgc(bool v) {
 200   OrderAccess::release_store_fence((jbyte*) &_cancelled_concgc, (jbyte) v);
 201 }
 202 
 203 inline HeapWord* ShenandoahHeap::allocate_from_gclab(Thread* thread, size_t size) {
 204   if (UseTLAB) {
 205     HeapWord* obj = thread->gclab().allocate(size);
 206     if (obj != NULL) {
 207       return obj;
 208     }
 209     // Otherwise...
 210     return allocate_from_gclab_slow(thread, size);
 211   } else {
 212     return NULL;
 213   }
 214 }
 215 
 216 inline void ShenandoahHeap::copy_object(oop p, HeapWord* s, size_t words) {
 217   assert(s != NULL, "allocation of brooks pointer must not fail");
 218   HeapWord* copy = s + BrooksPointer::word_size();
 219 
 220   guarantee(copy != NULL, "allocation of copy object must not fail");
 221   Copy::aligned_disjoint_words((HeapWord*) p, copy, words);
 222   BrooksPointer::initialize(oop(copy));
 223 
 224   log_develop_trace(gc, compaction)("copy object from "PTR_FORMAT" to: "PTR_FORMAT, p2i((HeapWord*) p), p2i(copy));
 225 }
 226 
 227 inline oop ShenandoahHeap::evacuate_object(oop p, Thread* thread) {
 228   size_t required;
 229 
 230 #ifdef ASSERT
 231   ShenandoahHeapRegion* hr = NULL;
 232   if (ShenandoahVerifyReadsToFromSpace) {
 233     hr = heap_region_containing(p);
 234     {
 235       hr->memProtectionOff();
 236       required  = BrooksPointer::word_size() + p->size();
 237       hr->memProtectionOn();
 238     }
 239   } else {
 240     required  = BrooksPointer::word_size() + p->size();
 241   }
 242 #else
 243     required  = BrooksPointer::word_size() + p->size();
 244 #endif
 245 
 246   assert(! heap_region_containing(p)->is_humongous(), "never evacuate humongous objects");
 247 
 248   bool alloc_from_gclab = true;
 249   HeapWord* filler = allocate_from_gclab(thread, required);
 250   if (filler == NULL) {
 251     filler = allocate_memory(required, true);
 252     alloc_from_gclab = false;
 253   }
 254 
 255 #ifdef ASSERT
 256   // Checking that current Java thread does not hold Threads_lock when we get here.
 257   // If that ever be the case, we'd deadlock in oom_during_evacuation.
 258   if ((! Thread::current()->is_GC_task_thread()) && (! Thread::current()->is_ConcurrentGC_thread())) {
 259     assert(! Threads_lock->owned_by_self(), "must not hold Threads_lock here");
 260   }
 261 #endif
 262 
 263   if (filler == NULL) {
 264     oom_during_evacuation();
 265     // If this is a Java thread, it should have waited
 266     // until all GC threads are done, and then we
 267     // return the forwardee.
 268     oop resolved = ShenandoahBarrierSet::resolve_oop_static(p);
 269     return resolved;
 270   }
 271 
 272   HeapWord* copy = filler + BrooksPointer::word_size();
 273 
 274 #ifdef ASSERT
 275   if (ShenandoahVerifyReadsToFromSpace) {
 276     hr->memProtectionOff();
 277     copy_object(p, filler, required - BrooksPointer::word_size());
 278     hr->memProtectionOn();
 279   } else {
 280     copy_object(p, filler, required - BrooksPointer::word_size());
 281   }
 282 #else
 283     copy_object(p, filler, required - BrooksPointer::word_size());
 284 #endif
 285 
 286   oop copy_val = oop(copy);
 287   oop result = BrooksPointer::try_update_forwardee(p, copy_val);
 288 
 289   oop return_val;
 290   if (oopDesc::unsafe_equals(result, p)) {
 291     return_val = copy_val;
 292 
 293     log_develop_trace(gc, compaction)("Copy of "PTR_FORMAT" to "PTR_FORMAT" succeeded \n",
 294                                       p2i((HeapWord*) p), p2i(copy));
 295 
 296 #ifdef ASSERT
 297     assert(return_val->is_oop(), "expect oop");
 298     assert(p->klass() == return_val->klass(), "Should have the same class p: "PTR_FORMAT", copy: "PTR_FORMAT,
 299                                               p2i((HeapWord*) p), p2i((HeapWord*) copy));
 300 #endif
 301   }  else {
 302     if (alloc_from_gclab) {
 303       thread->gclab().rollback(required);
 304     }
 305     log_develop_trace(gc, compaction)("Copy of "PTR_FORMAT" to "PTR_FORMAT" failed, use other: "PTR_FORMAT,
 306                                       p2i((HeapWord*) p), p2i(copy), p2i((HeapWord*) result));
 307     return_val = result;
 308   }
 309 
 310   return return_val;
 311 }
 312 
 313 inline bool ShenandoahHeap::requires_marking(const void* entry) const {
 314   return ! is_marked_next(oop(entry));
 315 }
 316 
 317 bool ShenandoahHeap::region_in_collection_set(size_t region_index) const {
 318   return _in_cset_fast_test_base[region_index];
 319 }
 320 
 321 bool ShenandoahHeap::in_collection_set(ShenandoahHeapRegion* r) const {
 322   return region_in_collection_set(r->region_number());
 323 }
 324 
 325 template <class T>
 326 inline bool ShenandoahHeap::in_collection_set(T p) const {
 327   HeapWord* obj = (HeapWord*) p;
 328   assert(_in_cset_fast_test != NULL, "sanity");
 329   assert(is_in(obj), "should be in heap");
 330 
 331   // no need to subtract the bottom of the heap from obj,
 332   // _in_cset_fast_test is biased
 333   uintx index = ((uintx) obj) >> ShenandoahHeapRegion::RegionSizeShift;
 334   return _in_cset_fast_test[index];
 335 }
 336 
 337 inline bool ShenandoahHeap::concurrent_mark_in_progress() {
 338   return _concurrent_mark_in_progress != 0;
 339 }
 340 
 341 inline address ShenandoahHeap::concurrent_mark_in_progress_addr() {
 342   return (address) &(ShenandoahHeap::heap()->_concurrent_mark_in_progress);
 343 }
 344 
 345 inline bool ShenandoahHeap::is_evacuation_in_progress() {
 346   return _evacuation_in_progress != 0;
 347 }
 348 
 349 inline bool ShenandoahHeap::allocated_after_next_mark_start(HeapWord* addr) const {
 350   uintx index = ((uintx) addr) >> ShenandoahHeapRegion::RegionSizeShift;
 351   HeapWord* top_at_mark_start = _next_top_at_mark_starts[index];
 352   bool alloc_after_mark_start = addr >= top_at_mark_start;
 353   return alloc_after_mark_start;
 354 }
 355 
 356 inline bool ShenandoahHeap::allocated_after_complete_mark_start(HeapWord* addr) const {
 357   uintx index = ((uintx) addr) >> ShenandoahHeapRegion::RegionSizeShift;
 358   HeapWord* top_at_mark_start = _complete_top_at_mark_starts[index];
 359   bool alloc_after_mark_start = addr >= top_at_mark_start;
 360   return alloc_after_mark_start;
 361 }
 362 
 363 template<class T>
 364 inline void ShenandoahHeap::marked_object_iterate(ShenandoahHeapRegion* region, T* cl) {
 365   assert(BrooksPointer::word_offset() < 0, "skip_delta calculation below assumes the forwarding ptr is before obj");
 366 
 367   CMBitMap* mark_bit_map = _complete_mark_bit_map;
 368   HeapWord* top_at_mark_start = complete_top_at_mark_start(region->bottom());
 369 
 370   size_t skip_bitmap_delta = BrooksPointer::word_size() + 1;
 371   size_t skip_objsize_delta = BrooksPointer::word_size() /* + actual obj.size() below */;
 372   HeapWord* start = region->bottom() + BrooksPointer::word_size();
 373 
 374   HeapWord* limit = region->top();
 375   HeapWord* end = MIN2(top_at_mark_start + BrooksPointer::word_size(), _ordered_regions->end());
 376   HeapWord* addr = mark_bit_map->getNextMarkedWordAddress(start, end);
 377 
 378   intx dist = ShenandoahMarkScanPrefetch;
 379   if (dist > 0) {
 380     // Batched scan that prefetches the oop data, anticipating the access to
 381     // either header, oop field, or forwarding pointer. Not that we cannot
 382     // touch anything in oop, while it still being prefetched to get enough
 383     // time for prefetch to work. This is why we try to scan the bitmap linearly,
 384     // disregarding the object size. However, since we know forwarding pointer
 385     // preceeds the object, we can skip over it. Once we cannot trust the bitmap,
 386     // there is no point for prefetching the oop contents, as oop->size() will
 387     // touch it prematurely.
 388 
 389     oop slots[dist];
 390     bool aborting = false;
 391     int avail;
 392     do {
 393       avail = 0;
 394       for (int c = 0; (c < dist) && (addr < limit); c++) {
 395         Prefetch::read(addr, 1);
 396         oop obj = oop(addr);
 397         slots[avail++] = obj;
 398         if (addr < top_at_mark_start) {
 399           addr += skip_bitmap_delta;
 400           addr = mark_bit_map->getNextMarkedWordAddress(addr, end);
 401         } else {
 402           // cannot trust mark bitmap anymore, finish the current stride,
 403           // and switch to accurate traversal
 404           addr += obj->size() + skip_objsize_delta;
 405           aborting = true;
 406         }
 407       }
 408 
 409       for (int c = 0; c < avail; c++) {
 410         do_marked_object(mark_bit_map, cl, slots[c]);
 411       }
 412     } while (avail > 0 && !aborting);
 413 
 414     // accurate traversal
 415     while (addr < limit) {
 416       oop obj = oop(addr);
 417       int size = obj->size();
 418       do_marked_object(mark_bit_map, cl, obj);
 419       addr += size + skip_objsize_delta;
 420     }
 421   } else {
 422     while (addr < limit) {
 423       oop obj = oop(addr);
 424       int size = obj->size();
 425       do_marked_object(mark_bit_map, cl, obj);
 426       addr += size + skip_objsize_delta;
 427       if (addr < top_at_mark_start) {
 428         addr = mark_bit_map->getNextMarkedWordAddress(addr, end);
 429       }
 430     }
 431   }
 432 }
 433 
 434 template<class T>
 435 inline void ShenandoahHeap::do_marked_object(CMBitMap* bitmap, T* cl, oop obj) {
 436 #ifdef ASSERT
 437   assert(!oopDesc::is_null(obj), "sanity");
 438   assert(obj->is_oop(), "sanity");
 439   assert(is_in(obj), "sanity");
 440   assert(bitmap == _complete_mark_bit_map, "only iterate completed mark bitmap");
 441   assert(is_marked_complete(obj), "object expected to be marked");
 442 #endif
 443   cl->do_object(obj);
 444 }
 445 
 446 #endif // SHARE_VM_GC_SHENANDOAH_SHENANDOAHHEAP_INLINE_HPP