1 /*
   2  * Copyright (c) 2015, Red Hat, Inc. and/or its affiliates.
   3  *
   4  * This code is free software; you can redistribute it and/or modify it
   5  * under the terms of the GNU General Public License version 2 only, as
   6  * published by the Free Software Foundation.
   7  *
   8  * This code is distributed in the hope that it will be useful, but WITHOUT
   9  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  10  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  11  * version 2 for more details (a copy is included in the LICENSE file that
  12  * accompanied this code).
  13  *
  14  * You should have received a copy of the GNU General Public License version
  15  * 2 along with this work; if not, write to the Free Software Foundation,
  16  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  17  *
  18  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  19  * or visit www.oracle.com if you need additional information or have any
  20  * questions.
  21  *
  22  */
  23 
  24 #ifndef SHARE_VM_GC_SHENANDOAH_SHENANDOAHHEAP_INLINE_HPP
  25 #define SHARE_VM_GC_SHENANDOAH_SHENANDOAHHEAP_INLINE_HPP
  26 
  27 #include "gc/shared/cmBitMap.inline.hpp"
  28 #include "gc/shared/threadLocalAllocBuffer.inline.hpp"
  29 #include "gc/shenandoah/brooksPointer.inline.hpp"
  30 #include "gc/shenandoah/shenandoahBarrierSet.inline.hpp"
  31 #include "gc/shenandoah/shenandoahHeap.hpp"
  32 #include "gc/shenandoah/shenandoahHeapRegionSet.hpp"
  33 #include "gc/shenandoah/shenandoahHeapRegion.inline.hpp"
  34 #include "oops/oop.inline.hpp"
  35 #include "runtime/atomic.hpp"
  36 #include "runtime/prefetch.hpp"
  37 #include "runtime/prefetch.inline.hpp"
  38 #include "utilities/copy.hpp"
  39 
  40 /*
  41  * Marks the object. Returns true if the object has not been marked before and has
  42  * been marked by this thread. Returns false if the object has already been marked,
  43  * or if a competing thread succeeded in marking this object.
  44  */
  45 inline bool ShenandoahHeap::mark_current(oop obj) const {
  46 #ifdef ASSERT
  47   if (! oopDesc::unsafe_equals(obj, oopDesc::bs()->read_barrier(obj))) {
  48     tty->print_cr("heap region containing obj:");
  49     ShenandoahHeapRegion* obj_region = heap_region_containing(obj);
  50     obj_region->print();
  51     tty->print_cr("heap region containing forwardee:");
  52     ShenandoahHeapRegion* forward_region = heap_region_containing(oopDesc::bs()->read_barrier(obj));
  53     forward_region->print();
  54   }
  55 #endif
  56 
  57   assert(oopDesc::unsafe_equals(obj, oopDesc::bs()->read_barrier(obj)), "only mark forwarded copy of objects");
  58   return mark_current_no_checks(obj);
  59 }
  60 
  61 inline bool ShenandoahHeap::mark_current_no_checks(oop obj) const {
  62   HeapWord* addr = (HeapWord*) obj;
  63   return (! allocated_after_mark_start(addr)) && _next_mark_bit_map->parMark(addr);
  64 }
  65 
  66 inline bool ShenandoahHeap::is_marked_current(oop obj) const {
  67   HeapWord* addr = (HeapWord*) obj;
  68   return allocated_after_mark_start(addr) || _next_mark_bit_map->isMarked(addr);
  69 }
  70 
  71 inline bool ShenandoahHeap::is_marked_current(oop obj, ShenandoahHeapRegion* r) const {
  72   HeapWord* addr = (HeapWord*) obj;
  73   return _next_mark_bit_map->isMarked(addr) || r->allocated_after_mark_start(addr);
  74 }
  75 
  76 inline bool ShenandoahHeap::is_marked_prev(oop obj) const {
  77   ShenandoahHeapRegion* r = heap_region_containing((void*) obj);
  78   return is_marked_prev(obj, r);
  79 }
  80 
  81 inline bool ShenandoahHeap::is_marked_prev(oop obj, const ShenandoahHeapRegion* r) const {
  82   HeapWord* addr = (HeapWord*) obj;
  83   return _prev_mark_bit_map->isMarked(addr) || r->allocated_after_prev_mark_start(addr);
  84 }
  85 
  86 inline bool ShenandoahHeap::need_update_refs() const {
  87   return _need_update_refs;
  88 }
  89 
  90 inline uint ShenandoahHeap::heap_region_index_containing(const void* addr) const {
  91   uintptr_t region_start = ((uintptr_t) addr);
  92   uintptr_t index = (region_start - (uintptr_t) _first_region_bottom) >> ShenandoahHeapRegion::RegionSizeShift;
  93 #ifdef ASSERT
  94   if (!(index < _num_regions)) {
  95     tty->print_cr("heap region does not contain address, first_region_bottom: "PTR_FORMAT", real bottom of first region: "PTR_FORMAT", num_regions: "SIZE_FORMAT", region_size: "SIZE_FORMAT, p2i(_first_region_bottom), p2i(_ordered_regions->get(0)->bottom()), _num_regions, ShenandoahHeapRegion::RegionSizeBytes);
  96   }
  97 #endif
  98   assert(index < _num_regions, "heap region index must be in range");
  99   return index;
 100 }
 101 
 102 inline ShenandoahHeapRegion* ShenandoahHeap::heap_region_containing(const void* addr) const {
 103   uint index = heap_region_index_containing(addr);
 104   ShenandoahHeapRegion* result = _ordered_regions->get(index);
 105 #ifdef ASSERT
 106   if (!(addr >= result->bottom() && addr < result->end())) {
 107     tty->print_cr("heap region does not contain address, first_region_bottom: "PTR_FORMAT", real bottom of first region: "PTR_FORMAT", num_regions: "SIZE_FORMAT, p2i(_first_region_bottom), p2i(_ordered_regions->get(0)->bottom()), _num_regions);
 108   }
 109 #endif
 110   assert(addr >= result->bottom() && addr < result->end(), "address must be in found region");
 111   return result;
 112 }
 113 
 114 template <class T>
 115 inline oop ShenandoahHeap::update_oop_ref_not_null(T* p, oop obj) {
 116   if (in_cset_fast_test((HeapWord*) obj)) {
 117     oop forw = ShenandoahBarrierSet::resolve_oop_static_not_null(obj);
 118     assert(! oopDesc::unsafe_equals(forw, obj) || is_full_gc_in_progress(), "expect forwarded object");
 119     obj = forw;
 120     oopDesc::encode_store_heap_oop(p, obj);
 121   }
 122 #ifdef ASSERT
 123   else {
 124     assert(oopDesc::unsafe_equals(obj, ShenandoahBarrierSet::resolve_oop_static_not_null(obj)), "expect not forwarded");
 125   }
 126 #endif
 127   return obj;
 128 }
 129 
 130 template <class T>
 131 inline oop ShenandoahHeap::maybe_update_oop_ref(T* p) {
 132   T o = oopDesc::load_heap_oop(p);
 133   if (! oopDesc::is_null(o)) {
 134     oop obj = oopDesc::decode_heap_oop_not_null(o);
 135     return maybe_update_oop_ref_not_null(p, obj);
 136   } else {
 137     return NULL;
 138   }
 139 }
 140 
 141 inline oop ShenandoahHeap::atomic_compare_exchange_oop(oop n, oop* addr, oop c) {
 142   return (oop) Atomic::cmpxchg_ptr(n, addr, c);
 143 }
 144 
 145 inline oop ShenandoahHeap::atomic_compare_exchange_oop(oop n, narrowOop* addr, oop c) {
 146   narrowOop cmp = oopDesc::encode_heap_oop(c);
 147   narrowOop val = oopDesc::encode_heap_oop(n);
 148   return oopDesc::decode_heap_oop((narrowOop) Atomic::cmpxchg(val, addr, cmp));
 149 }
 150 
 151 template <class T>
 152 inline oop ShenandoahHeap::maybe_update_oop_ref_not_null(T* p, oop heap_oop) {
 153 
 154   assert((! is_in(p)) || (! heap_region_containing(p)->is_in_collection_set())
 155          || is_full_gc_in_progress(),
 156          "never update refs in from-space, unless evacuation has been cancelled");
 157 
 158 #ifdef ASSERT
 159   if (! is_in(heap_oop)) {
 160     print_heap_regions();
 161     tty->print_cr("object not in heap: "PTR_FORMAT", referenced by: "PTR_FORMAT, p2i((HeapWord*) heap_oop), p2i(p));
 162     assert(is_in(heap_oop), "object must be in heap");
 163   }
 164 #endif
 165   assert(is_in(heap_oop), "only ever call this on objects in the heap");
 166   if (in_cset_fast_test((HeapWord*) heap_oop)) {
 167     oop forwarded_oop = ShenandoahBarrierSet::resolve_oop_static_not_null(heap_oop); // read brooks ptr
 168     assert(! oopDesc::unsafe_equals(forwarded_oop, heap_oop) || is_full_gc_in_progress(), "expect forwarded object");
 169 
 170     log_develop_trace(gc)("Updating old ref: "PTR_FORMAT" pointing to "PTR_FORMAT" to new ref: "PTR_FORMAT, p2i(p), p2i(heap_oop), p2i(forwarded_oop));
 171 
 172     assert(forwarded_oop->is_oop(), "oop required");
 173     assert(is_in(forwarded_oop), "forwardee must be in heap");
 174     assert(oopDesc::bs()->is_safe(forwarded_oop), "forwardee must not be in collection set");
 175     // If this fails, another thread wrote to p before us, it will be logged in SATB and the
 176     // reference be updated later.
 177     oop result = atomic_compare_exchange_oop(forwarded_oop, p, heap_oop);
 178 
 179     if (oopDesc::unsafe_equals(result, heap_oop)) { // CAS successful.
 180       return forwarded_oop;
 181     } else {
 182       return NULL;
 183     }
 184   } else {
 185     assert(oopDesc::unsafe_equals(heap_oop, ShenandoahBarrierSet::resolve_oop_static_not_null(heap_oop)), "expect not forwarded");
 186     return heap_oop;
 187   }
 188 }
 189 
 190 inline bool ShenandoahHeap::cancelled_concgc() const {
 191   bool cancelled = _cancelled_concgc;
 192   return cancelled;
 193 }
 194 
 195 inline HeapWord* ShenandoahHeap::allocate_from_gclab(Thread* thread, size_t size) {
 196   if (UseTLAB) {
 197     HeapWord* obj = thread->gclab().allocate(size);
 198     if (obj != NULL) {
 199       return obj;
 200     }
 201     // Otherwise...
 202     return allocate_from_gclab_slow(thread, size);
 203   } else {
 204     return NULL;
 205   }
 206 }
 207 
 208 inline void ShenandoahHeap::copy_object(oop p, HeapWord* s, size_t words) {
 209   assert(s != NULL, "allocation of brooks pointer must not fail");
 210   HeapWord* copy = s + BrooksPointer::word_size();
 211 
 212   guarantee(copy != NULL, "allocation of copy object must not fail");
 213   Copy::aligned_disjoint_words((HeapWord*) p, copy, words);
 214   BrooksPointer::initialize(oop(copy));
 215 
 216   log_develop_trace(gc, compaction)("copy object from "PTR_FORMAT" to: "PTR_FORMAT, p2i((HeapWord*) p), p2i(copy));
 217 }
 218 
 219 inline oop ShenandoahHeap::evacuate_object(oop p, Thread* thread) {
 220   size_t required;
 221 
 222 #ifdef ASSERT
 223   ShenandoahHeapRegion* hr = NULL;
 224   if (ShenandoahVerifyReadsToFromSpace) {
 225     hr = heap_region_containing(p);
 226     {
 227       hr->memProtectionOff();
 228       required  = BrooksPointer::word_size() + p->size();
 229       hr->memProtectionOn();
 230     }
 231   } else {
 232     required  = BrooksPointer::word_size() + p->size();
 233   }
 234 #else
 235     required  = BrooksPointer::word_size() + p->size();
 236 #endif
 237 
 238   assert(! heap_region_containing(p)->is_humongous(), "never evacuate humongous objects");
 239 
 240   // Don't even attempt to evacuate anything if evacuation has been cancelled.
 241   if (_cancelled_concgc) {
 242     return ShenandoahBarrierSet::resolve_oop_static(p);
 243   }
 244 
 245   bool alloc_from_gclab = true;
 246   HeapWord* filler = allocate_from_gclab(thread, required);
 247   if (filler == NULL) {
 248     filler = allocate_memory(required, true);
 249     alloc_from_gclab = false;
 250   }
 251 
 252   if (filler == NULL) {
 253     oom_during_evacuation();
 254     // If this is a Java thread, it should have waited
 255     // until all GC threads are done, and then we
 256     // return the forwardee.
 257     oop resolved = ShenandoahBarrierSet::resolve_oop_static(p);
 258     return resolved;
 259   }
 260 
 261   HeapWord* copy = filler + BrooksPointer::word_size();
 262 
 263 #ifdef ASSERT
 264   if (ShenandoahVerifyReadsToFromSpace) {
 265     hr->memProtectionOff();
 266     copy_object(p, filler, required - BrooksPointer::word_size());
 267     hr->memProtectionOn();
 268   } else {
 269     copy_object(p, filler, required - BrooksPointer::word_size());
 270   }
 271 #else
 272     copy_object(p, filler, required - BrooksPointer::word_size());
 273 #endif
 274 
 275   oop copy_val = oop(copy);
 276   oop result = BrooksPointer::try_update_forwardee(p, copy_val);
 277 
 278   oop return_val;
 279   if (oopDesc::unsafe_equals(result, p)) {
 280     return_val = copy_val;
 281 
 282     log_develop_trace(gc, compaction)("Copy of "PTR_FORMAT" to "PTR_FORMAT" succeeded \n", p2i((HeapWord*) p), p2i(copy));
 283 
 284 #ifdef ASSERT
 285     assert(return_val->is_oop(), "expect oop");
 286     assert(p->klass() == return_val->klass(), "Should have the same class p: "PTR_FORMAT", copy: "PTR_FORMAT, p2i((HeapWord*) p), p2i((HeapWord*) copy));
 287 #endif
 288   }  else {
 289     if (alloc_from_gclab) {
 290       thread->gclab().rollback(required);
 291     }
 292     log_develop_trace(gc, compaction)("Copy of "PTR_FORMAT" to "PTR_FORMAT" failed, use other: "PTR_FORMAT, p2i((HeapWord*) p), p2i(copy), p2i((HeapWord*) result));
 293     return_val = result;
 294   }
 295 
 296   return return_val;
 297 }
 298 
 299 inline bool ShenandoahHeap::requires_marking(const void* entry) const {
 300   return ! is_marked_current(oop(entry));
 301 }
 302 
 303 inline bool ShenandoahHeap::in_cset_fast_test(HeapWord* obj) {
 304   assert(_in_cset_fast_test != NULL, "sanity");
 305   assert(is_in(obj), "should be in heap");
 306 
 307   // no need to subtract the bottom of the heap from obj,
 308   // _in_cset_fast_test is biased
 309   uintx index = ((uintx) obj) >> ShenandoahHeapRegion::RegionSizeShift;
 310   bool ret = _in_cset_fast_test[index];
 311 
 312   // let's make sure the result is consistent with what the slower
 313   // test returns
 314   assert( ret || !is_in_collection_set(obj), "sanity");
 315   assert(!ret ||  is_in_collection_set(obj), "sanity");
 316   return ret;
 317 }
 318 
 319 inline bool ShenandoahHeap::concurrent_mark_in_progress() {
 320   return _concurrent_mark_in_progress != 0;
 321 }
 322 
 323 inline address ShenandoahHeap::concurrent_mark_in_progress_addr() {
 324   return (address) &(ShenandoahHeap::heap()->_concurrent_mark_in_progress);
 325 }
 326 
 327 inline bool ShenandoahHeap::is_evacuation_in_progress() {
 328   return _evacuation_in_progress != 0;
 329 }
 330 
 331 inline bool ShenandoahHeap::allocated_after_mark_start(HeapWord* addr) const {
 332   uintx index = ((uintx) addr) >> ShenandoahHeapRegion::RegionSizeShift;
 333   HeapWord* top_at_mark_start = _top_at_mark_starts[index];
 334   bool alloc_after_mark_start = addr >= top_at_mark_start;
 335 #ifdef ASSERT
 336   ShenandoahHeapRegion* r = heap_region_containing(addr);
 337   assert(alloc_after_mark_start == r->allocated_after_mark_start(addr), "sanity");
 338 #endif
 339   return alloc_after_mark_start;
 340 }
 341 
 342 template<class T>
 343 inline void ShenandoahHeap::marked_prev_object_iterate(ShenandoahHeapRegion* region, T* cl) {
 344   marked_object_iterate(region, cl, _prev_mark_bit_map, region->top_at_prev_mark_start());
 345 }
 346 
 347 template<class T>
 348 inline void ShenandoahHeap::marked_next_object_iterate(ShenandoahHeapRegion* region, T* cl) {
 349   marked_object_iterate(region, cl, _next_mark_bit_map, region->top_at_mark_start());
 350 }
 351 
 352 template<class T>
 353 inline void ShenandoahHeap::marked_object_iterate(ShenandoahHeapRegion* region, T* cl, CMBitMap* mark_bit_map, HeapWord* top_at_mark_start) {
 354   assert(BrooksPointer::word_offset() < 0, "skip_delta calculation below assumes the forwarding ptr is before obj");
 355   size_t skip_bitmap_delta = BrooksPointer::word_size() + 1;
 356   size_t skip_objsize_delta = BrooksPointer::word_size() /* + actual obj.size() below */;
 357   HeapWord* start = region->bottom() + BrooksPointer::word_size();
 358 
 359   HeapWord* limit = region->top();
 360   HeapWord* end = MIN2(top_at_mark_start + BrooksPointer::word_size(), _ordered_regions->end());
 361   HeapWord* addr = mark_bit_map->getNextMarkedWordAddress(start, end);
 362 
 363   intx dist = ShenandoahMarkScanPrefetch;
 364   if (dist > 0) {
 365     // Batched scan that prefetches the oop data, anticipating the access to
 366     // either header, oop field, or forwarding pointer. Not that we cannot
 367     // touch anything in oop, while it still being prefetched to get enough
 368     // time for prefetch to work. This is why we try to scan the bitmap linearly,
 369     // disregarding the object size. However, since we know forwarding pointer
 370     // preceeds the object, we can skip over it. Once we cannot trust the bitmap,
 371     // there is no point for prefetching the oop contents, as oop->size() will
 372     // touch it prematurely.
 373 
 374     oop slots[dist];
 375     bool aborting = false;
 376     int avail;
 377     do {
 378       avail = 0;
 379       for (int c = 0; (c < dist) && (addr < limit); c++) {
 380         Prefetch::read(addr, 1);
 381         oop obj = oop(addr);
 382         slots[avail++] = obj;
 383         if (addr < top_at_mark_start) {
 384           addr += skip_bitmap_delta;
 385           addr = mark_bit_map->getNextMarkedWordAddress(addr, end);
 386         } else {
 387           // cannot trust mark bitmap anymore, finish the current stride,
 388           // and switch to accurate traversal
 389           addr += obj->size() + skip_objsize_delta;
 390           aborting = true;
 391         }
 392       }
 393 
 394       for (int c = 0; c < avail; c++) {
 395         do_marked_object(mark_bit_map, cl, slots[c]);
 396       }
 397     } while (avail > 0 && !aborting);
 398 
 399     // accurate traversal
 400     while (addr < limit) {
 401       oop obj = oop(addr);
 402       int size = obj->size();
 403       do_marked_object(mark_bit_map, cl, obj);
 404       addr += size + skip_objsize_delta;
 405     }
 406   } else {
 407     while (addr < limit) {
 408       oop obj = oop(addr);
 409       int size = obj->size();
 410       do_marked_object(mark_bit_map, cl, obj);
 411       addr += size + skip_objsize_delta;
 412       if (addr < top_at_mark_start) {
 413         addr = mark_bit_map->getNextMarkedWordAddress(addr, end);
 414       }
 415     }
 416   }
 417 }
 418 
 419 template<class T>
 420 inline void ShenandoahHeap::do_marked_object(CMBitMap* bitmap, T* cl, oop obj) {
 421 #ifdef ASSERT
 422   assert(!oopDesc::is_null(obj), "sanity");
 423   assert(obj->is_oop(), "sanity");
 424   assert(is_in(obj), "sanity");
 425   if (bitmap == _prev_mark_bit_map) {
 426     assert(is_marked_prev(obj), "object expected to be marked");
 427   } else {
 428     assert(is_marked_current(obj), "object expected to be marked");
 429   }
 430 #endif
 431   cl->do_object(obj);
 432 }
 433 
 434 #endif // SHARE_VM_GC_SHENANDOAH_SHENANDOAHHEAP_INLINE_HPP