1 /*
   2  * Copyright (c) 2013, 2015, Red Hat, Inc. and/or its affiliates.
   3  *
   4  * This code is free software; you can redistribute it and/or modify it
   5  * under the terms of the GNU General Public License version 2 only, as
   6  * published by the Free Software Foundation.
   7  *
   8  * This code is distributed in the hope that it will be useful, but WITHOUT
   9  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  10  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  11  * version 2 for more details (a copy is included in the LICENSE file that
  12  * accompanied this code).
  13  *
  14  * You should have received a copy of the GNU General Public License version
  15  * 2 along with this work; if not, write to the Free Software Foundation,
  16  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  17  *
  18  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  19  * or visit www.oracle.com if you need additional information or have any
  20  * questions.
  21  *
  22  */
  23 
  24 #include "precompiled.hpp"
  25 #include "memory/allocation.hpp"
  26 
  27 #include "gc/shared/gcTimer.hpp"
  28 #include "gc/shared/gcTraceTime.inline.hpp"
  29 #include "gc/shared/parallelCleaning.hpp"
  30 
  31 #include "gc/shenandoah/brooksPointer.hpp"
  32 #include "gc/shenandoah/shenandoahBarrierSet.hpp"
  33 #include "gc/shenandoah/shenandoahCollectionSet.hpp"
  34 #include "gc/shenandoah/shenandoahCollectorPolicy.hpp"
  35 #include "gc/shenandoah/shenandoahConcurrentMark.hpp"
  36 #include "gc/shenandoah/shenandoahConcurrentMark.inline.hpp"
  37 #include "gc/shenandoah/shenandoahConcurrentThread.hpp"
  38 #include "gc/shenandoah/shenandoahFreeSet.hpp"
  39 #include "gc/shenandoah/shenandoahHeap.inline.hpp"
  40 #include "gc/shenandoah/shenandoahHeapRegion.hpp"
  41 #include "gc/shenandoah/shenandoahHeapRegionSet.hpp"
  42 #include "gc/shenandoah/shenandoahHumongous.hpp"
  43 #include "gc/shenandoah/shenandoahMarkCompact.hpp"
  44 #include "gc/shenandoah/shenandoahMonitoringSupport.hpp"
  45 #include "gc/shenandoah/shenandoahOopClosures.inline.hpp"
  46 #include "gc/shenandoah/shenandoahPartialGC.hpp"
  47 #include "gc/shenandoah/shenandoahRootProcessor.hpp"
  48 #include "gc/shenandoah/vm_operations_shenandoah.hpp"
  49 
  50 #include "runtime/vmThread.hpp"
  51 #include "services/mallocTracker.hpp"
  52 
  53 SCMUpdateRefsClosure::SCMUpdateRefsClosure() : _heap(ShenandoahHeap::heap()) {}
  54 
  55 #ifdef ASSERT
  56 template <class T>
  57 void AssertToSpaceClosure::do_oop_nv(T* p) {
  58   T o = oopDesc::load_heap_oop(p);
  59   if (! oopDesc::is_null(o)) {
  60     oop obj = oopDesc::decode_heap_oop_not_null(o);
  61     assert(oopDesc::unsafe_equals(obj, ShenandoahBarrierSet::resolve_oop_static_not_null(obj)),
  62            "need to-space object here obj: "PTR_FORMAT" , rb(obj): "PTR_FORMAT", p: "PTR_FORMAT,
  63            p2i(obj), p2i(ShenandoahBarrierSet::resolve_oop_static_not_null(obj)), p2i(p));
  64   }
  65 }
  66 
  67 void AssertToSpaceClosure::do_oop(narrowOop* p) { do_oop_nv(p); }
  68 void AssertToSpaceClosure::do_oop(oop* p)       { do_oop_nv(p); }
  69 #endif
  70 
  71 const char* ShenandoahHeap::name() const {
  72   return "Shenandoah";
  73 }
  74 
  75 void ShenandoahHeap::print_heap_locations(HeapWord* start, HeapWord* end) {
  76   HeapWord* cur = NULL;
  77   for (cur = start; cur < end; cur++) {
  78     tty->print_cr(PTR_FORMAT" : "PTR_FORMAT, p2i(cur), p2i(*((HeapWord**) cur)));
  79   }
  80 }
  81 
  82 class ShenandoahPretouchTask : public AbstractGangTask {
  83 private:
  84   ShenandoahHeapRegionSet* _regions;
  85   const size_t _bitmap_size;
  86   const size_t _page_size;
  87   char* _bitmap0_base;
  88   char* _bitmap1_base;
  89 public:
  90   ShenandoahPretouchTask(ShenandoahHeapRegionSet* regions,
  91                          char* bitmap0_base, char* bitmap1_base, size_t bitmap_size,
  92                          size_t page_size) :
  93     AbstractGangTask("Shenandoah PreTouch",
  94                      Universe::is_fully_initialized() ? GCId::current_raw() :
  95                                                         // During VM initialization there is
  96                                                         // no GC cycle that this task can be
  97                                                         // associated with.
  98                                                         GCId::undefined()),
  99     _bitmap0_base(bitmap0_base),
 100     _bitmap1_base(bitmap1_base),
 101     _regions(regions),
 102     _bitmap_size(bitmap_size),
 103     _page_size(page_size) {
 104     _regions->clear_current_index();
 105   };
 106 
 107   virtual void work(uint worker_id) {
 108     ShenandoahHeapRegion* r = _regions->claim_next();
 109     while (r != NULL) {
 110       log_trace(gc, heap)("Pretouch region " SIZE_FORMAT ": " PTR_FORMAT " -> " PTR_FORMAT,
 111                           r->region_number(), p2i(r->bottom()), p2i(r->end()));
 112       os::pretouch_memory(r->bottom(), r->end(), _page_size);
 113 
 114       size_t start = r->region_number()       * ShenandoahHeapRegion::region_size_bytes() / CMBitMap::heap_map_factor();
 115       size_t end   = (r->region_number() + 1) * ShenandoahHeapRegion::region_size_bytes() / CMBitMap::heap_map_factor();
 116       assert (end <= _bitmap_size, "end is sane: " SIZE_FORMAT " < " SIZE_FORMAT, end, _bitmap_size);
 117 
 118       log_trace(gc, heap)("Pretouch bitmap under region " SIZE_FORMAT ": " PTR_FORMAT " -> " PTR_FORMAT,
 119                           r->region_number(), p2i(_bitmap0_base + start), p2i(_bitmap0_base + end));
 120       os::pretouch_memory(_bitmap0_base + start, _bitmap0_base + end, _page_size);
 121 
 122       log_trace(gc, heap)("Pretouch bitmap under region " SIZE_FORMAT ": " PTR_FORMAT " -> " PTR_FORMAT,
 123                           r->region_number(), p2i(_bitmap1_base + start), p2i(_bitmap1_base + end));
 124       os::pretouch_memory(_bitmap1_base + start, _bitmap1_base + end, _page_size);
 125 
 126       r = _regions->claim_next();
 127     }
 128   }
 129 };
 130 
 131 jint ShenandoahHeap::initialize() {
 132   CollectedHeap::pre_initialize();
 133 
 134   BrooksPointer::initial_checks();
 135 
 136   size_t init_byte_size = collector_policy()->initial_heap_byte_size();
 137   size_t max_byte_size = collector_policy()->max_heap_byte_size();
 138 
 139   Universe::check_alignment(max_byte_size,
 140                             ShenandoahHeapRegion::region_size_bytes(),
 141                             "shenandoah heap");
 142   Universe::check_alignment(init_byte_size,
 143                             ShenandoahHeapRegion::region_size_bytes(),
 144                             "shenandoah heap");
 145 
 146   ReservedSpace heap_rs = Universe::reserve_heap(max_byte_size,
 147                                                  Arguments::conservative_max_heap_alignment());
 148   initialize_reserved_region((HeapWord*)heap_rs.base(), (HeapWord*) (heap_rs.base() + heap_rs.size()));
 149 
 150   set_barrier_set(new ShenandoahBarrierSet(this));
 151   ReservedSpace pgc_rs = heap_rs.first_part(max_byte_size);
 152   _storage.initialize(pgc_rs, init_byte_size);
 153 
 154   _num_regions = init_byte_size / ShenandoahHeapRegion::region_size_bytes();
 155   _max_regions = max_byte_size / ShenandoahHeapRegion::region_size_bytes();
 156   _initialSize = _num_regions * ShenandoahHeapRegion::region_size_bytes();
 157   size_t regionSizeWords = ShenandoahHeapRegion::region_size_bytes() / HeapWordSize;
 158   assert(init_byte_size == _initialSize, "tautology");
 159   _ordered_regions = new ShenandoahHeapRegionSet(_max_regions);
 160   _collection_set = new ShenandoahCollectionSet(_max_regions);
 161   _free_regions = new ShenandoahFreeSet(_max_regions);
 162 
 163   // Initialize fast collection set test structure.
 164   _in_cset_fast_test_length = _max_regions;
 165   _in_cset_fast_test_base =
 166                    NEW_C_HEAP_ARRAY(bool, _in_cset_fast_test_length, mtGC);
 167   _in_cset_fast_test = _in_cset_fast_test_base -
 168                ((uintx) pgc_rs.base() >> ShenandoahHeapRegion::region_size_shift());
 169 
 170   _next_top_at_mark_starts_base =
 171                    NEW_C_HEAP_ARRAY(HeapWord*, _max_regions, mtGC);
 172   _next_top_at_mark_starts = _next_top_at_mark_starts_base -
 173                ((uintx) pgc_rs.base() >> ShenandoahHeapRegion::region_size_shift());
 174 
 175   _complete_top_at_mark_starts_base =
 176                    NEW_C_HEAP_ARRAY(HeapWord*, _max_regions, mtGC);
 177   _complete_top_at_mark_starts = _complete_top_at_mark_starts_base -
 178                ((uintx) pgc_rs.base() >> ShenandoahHeapRegion::region_size_shift());
 179 
 180   size_t i = 0;
 181   for (i = 0; i < _num_regions; i++) {
 182     _in_cset_fast_test_base[i] = false; // Not in cset
 183     HeapWord* bottom = (HeapWord*) pgc_rs.base() + regionSizeWords * i;
 184     _complete_top_at_mark_starts_base[i] = bottom;
 185     _next_top_at_mark_starts_base[i] = bottom;
 186   }
 187 
 188   {
 189     ShenandoahHeapLock lock(this);
 190     for (i = 0; i < _num_regions; i++) {
 191       ShenandoahHeapRegion* current = new ShenandoahHeapRegion(this, (HeapWord*) pgc_rs.base() +
 192                                                                regionSizeWords * i, regionSizeWords, i);
 193       _free_regions->add_region(current);
 194       _ordered_regions->add_region(current);
 195     }
 196   }
 197   assert(((size_t) _ordered_regions->active_regions()) == _num_regions, "");
 198   _first_region = _ordered_regions->get(0);
 199   _first_region_bottom = _first_region->bottom();
 200   assert((((size_t) _first_region_bottom) &
 201           (ShenandoahHeapRegion::region_size_bytes() - 1)) == 0,
 202          "misaligned heap: "PTR_FORMAT, p2i(_first_region_bottom));
 203 
 204   if (log_is_enabled(Trace, gc, region)) {
 205     ResourceMark rm;
 206     outputStream* out = Log(gc, region)::trace_stream();
 207     log_trace(gc, region)("All Regions");
 208     _ordered_regions->print(out);
 209     log_trace(gc, region)("Free Regions");
 210     _free_regions->print(out);
 211   }
 212 
 213   // The call below uses stuff (the SATB* things) that are in G1, but probably
 214   // belong into a shared location.
 215   JavaThread::satb_mark_queue_set().initialize(SATB_Q_CBL_mon,
 216                                                SATB_Q_FL_lock,
 217                                                20 /*G1SATBProcessCompletedThreshold */,
 218                                                Shared_SATB_Q_lock);
 219 
 220   // Reserve space for prev and next bitmap.
 221   _bitmap_size = CMBitMap::compute_size(heap_rs.size());
 222   _heap_region = MemRegion((HeapWord*) heap_rs.base(), heap_rs.size() / HeapWordSize);
 223 
 224   size_t page_size = UseLargePages ? (size_t)os::large_page_size() : (size_t)os::vm_page_size();
 225 
 226   ReservedSpace bitmap0(_bitmap_size, page_size);
 227   os::commit_memory_or_exit(bitmap0.base(), bitmap0.size(), false, "couldn't allocate mark bitmap");
 228   MemTracker::record_virtual_memory_type(bitmap0.base(), mtGC);
 229   MemRegion bitmap_region0 = MemRegion((HeapWord*) bitmap0.base(), bitmap0.size() / HeapWordSize);
 230 
 231   ReservedSpace bitmap1(_bitmap_size, page_size);
 232   os::commit_memory_or_exit(bitmap1.base(), bitmap1.size(), false, "couldn't allocate mark bitmap");
 233   MemTracker::record_virtual_memory_type(bitmap1.base(), mtGC);
 234   MemRegion bitmap_region1 = MemRegion((HeapWord*) bitmap1.base(), bitmap1.size() / HeapWordSize);
 235 
 236   if (ShenandoahVerify || (UseShenandoahMatrix && VerifyShenandoahMatrix)) {
 237     ReservedSpace verify_bitmap(_bitmap_size, page_size);
 238     os::commit_memory_or_exit(verify_bitmap.base(), verify_bitmap.size(), false,
 239                               "couldn't allocate verification bitmap");
 240     MemTracker::record_virtual_memory_type(verify_bitmap.base(), mtGC);
 241     MemRegion verify_bitmap_region = MemRegion((HeapWord *) verify_bitmap.base(), verify_bitmap.size() / HeapWordSize);
 242     _verification_bit_map.initialize(_heap_region, verify_bitmap_region);
 243   }
 244 
 245   if (ShenandoahAlwaysPreTouch) {
 246     assert (!AlwaysPreTouch, "Should have been overridden");
 247 
 248     // For NUMA, it is important to pre-touch the storage under bitmaps with worker threads,
 249     // before initialize() below zeroes it with initializing thread. For any given region,
 250     // we touch the region and the corresponding bitmaps from the same thread.
 251 
 252     log_info(gc, heap)("Parallel pretouch " SIZE_FORMAT " regions with " SIZE_FORMAT " byte pages",
 253                        _ordered_regions->count(), page_size);
 254     ShenandoahPretouchTask cl(_ordered_regions, bitmap0.base(), bitmap1.base(), _bitmap_size, page_size);
 255     _workers->run_task(&cl);
 256   }
 257 
 258   _mark_bit_map0.initialize(_heap_region, bitmap_region0);
 259   _complete_mark_bit_map = &_mark_bit_map0;
 260 
 261   _mark_bit_map1.initialize(_heap_region, bitmap_region1);
 262   _next_mark_bit_map = &_mark_bit_map1;
 263 
 264   _connection_matrix = new ShenandoahConnectionMatrix(_max_regions);
 265   _partial_gc = new ShenandoahPartialGC(this, _max_regions);
 266 
 267   _monitoring_support = new ShenandoahMonitoringSupport(this);
 268 
 269   _concurrent_gc_thread = new ShenandoahConcurrentThread();
 270 
 271   ShenandoahMarkCompact::initialize();
 272 
 273   return JNI_OK;
 274 }
 275 
 276 ShenandoahHeap::ShenandoahHeap(ShenandoahCollectorPolicy* policy) :
 277   CollectedHeap(),
 278   _shenandoah_policy(policy),
 279   _concurrent_mark_in_progress(0),
 280   _evacuation_in_progress(0),
 281   _full_gc_in_progress(false),
 282   _update_refs_in_progress(false),
 283   _free_regions(NULL),
 284   _collection_set(NULL),
 285   _bytes_allocated_since_cm(0),
 286   _bytes_allocated_during_cm(0),
 287   _allocated_last_gc(0),
 288   _used_start_gc(0),
 289   _max_workers(MAX2(ConcGCThreads, ParallelGCThreads)),
 290   _ref_processor(NULL),
 291   _in_cset_fast_test(NULL),
 292   _in_cset_fast_test_base(NULL),
 293   _next_top_at_mark_starts(NULL),
 294   _next_top_at_mark_starts_base(NULL),
 295   _complete_top_at_mark_starts(NULL),
 296   _complete_top_at_mark_starts_base(NULL),
 297   _mark_bit_map0(),
 298   _mark_bit_map1(),
 299   _connection_matrix(NULL),
 300   _cancelled_concgc(false),
 301   _need_update_refs(false),
 302   _need_reset_bitmaps(false),
 303   _heap_lock(0),
 304 #ifdef ASSERT
 305   _heap_lock_owner(NULL),
 306 #endif
 307   _gc_timer(new (ResourceObj::C_HEAP, mtGC) ConcurrentGCTimer())
 308 
 309 {
 310   log_info(gc, init)("Parallel GC threads: "UINT32_FORMAT, ParallelGCThreads);
 311   log_info(gc, init)("Concurrent GC threads: "UINT32_FORMAT, ConcGCThreads);
 312   log_info(gc, init)("Parallel reference processing enabled: %s", BOOL_TO_STR(ParallelRefProcEnabled));
 313 
 314   _scm = new ShenandoahConcurrentMark();
 315   _used = 0;
 316 
 317   _max_workers = MAX2(_max_workers, 1U);
 318   _workers = new ShenandoahWorkGang("Shenandoah GC Threads", _max_workers,
 319                             /* are_GC_task_threads */true,
 320                             /* are_ConcurrentGC_threads */false);
 321   if (_workers == NULL) {
 322     vm_exit_during_initialization("Failed necessary allocation.");
 323   } else {
 324     _workers->initialize_workers();
 325   }
 326 }
 327 
 328 class ResetNextBitmapTask : public AbstractGangTask {
 329 private:
 330   ShenandoahHeapRegionSet* _regions;
 331 
 332 public:
 333   ResetNextBitmapTask(ShenandoahHeapRegionSet* regions) :
 334     AbstractGangTask("Parallel Reset Bitmap Task"),
 335     _regions(regions) {
 336     _regions->clear_current_index();
 337   }
 338 
 339   void work(uint worker_id) {
 340     ShenandoahHeapRegion* region = _regions->claim_next();
 341     ShenandoahHeap* heap = ShenandoahHeap::heap();
 342     while (region != NULL) {
 343       HeapWord* bottom = region->bottom();
 344       HeapWord* top = heap->next_top_at_mark_start(region->bottom());
 345       if (top > bottom) {
 346         heap->next_mark_bit_map()->clear_range_large(MemRegion(bottom, top));
 347       }
 348       region = _regions->claim_next();
 349     }
 350   }
 351 };
 352 
 353 void ShenandoahHeap::reset_next_mark_bitmap(WorkGang* workers) {
 354   ResetNextBitmapTask task = ResetNextBitmapTask(_ordered_regions);
 355   workers->run_task(&task);
 356 }
 357 
 358 class ResetCompleteBitmapTask : public AbstractGangTask {
 359 private:
 360   ShenandoahHeapRegionSet* _regions;
 361 
 362 public:
 363   ResetCompleteBitmapTask(ShenandoahHeapRegionSet* regions) :
 364     AbstractGangTask("Parallel Reset Bitmap Task"),
 365     _regions(regions) {
 366     _regions->clear_current_index();
 367   }
 368 
 369   void work(uint worker_id) {
 370     ShenandoahHeapRegion* region = _regions->claim_next();
 371     ShenandoahHeap* heap = ShenandoahHeap::heap();
 372     while (region != NULL) {
 373       HeapWord* bottom = region->bottom();
 374       HeapWord* top = heap->complete_top_at_mark_start(region->bottom());
 375       if (top > bottom) {
 376         heap->complete_mark_bit_map()->clear_range_large(MemRegion(bottom, top));
 377       }
 378       region = _regions->claim_next();
 379     }
 380   }
 381 };
 382 
 383 void ShenandoahHeap::reset_complete_mark_bitmap(WorkGang* workers) {
 384   ResetCompleteBitmapTask task = ResetCompleteBitmapTask(_ordered_regions);
 385   workers->run_task(&task);
 386 }
 387 
 388 bool ShenandoahHeap::is_next_bitmap_clear() {
 389   HeapWord* start = _ordered_regions->bottom();
 390   HeapWord* end = _ordered_regions->end();
 391   return _next_mark_bit_map->getNextMarkedWordAddress(start, end) == end;
 392 }
 393 
 394 bool ShenandoahHeap::is_complete_bitmap_clear_range(HeapWord* start, HeapWord* end) {
 395   return _complete_mark_bit_map->getNextMarkedWordAddress(start, end) == end;
 396 }
 397 
 398 void ShenandoahHeap::print_on(outputStream* st) const {
 399   st->print("Shenandoah Heap");
 400   st->print(" total = " SIZE_FORMAT " K, used " SIZE_FORMAT " K ", capacity()/ K, used() /K);
 401   st->print(" [" PTR_FORMAT ", " PTR_FORMAT ") ",
 402             p2i(reserved_region().start()),
 403             p2i(reserved_region().end()));
 404   st->print("Region size = " SIZE_FORMAT "K ", ShenandoahHeapRegion::region_size_bytes() / K);
 405   if (_concurrent_mark_in_progress) {
 406     st->print("marking ");
 407   }
 408   if (_evacuation_in_progress) {
 409     st->print("evacuating ");
 410   }
 411   if (cancelled_concgc()) {
 412     st->print("cancelled ");
 413   }
 414   st->print("\n");
 415 
 416   // Adapted from VirtualSpace::print_on(), which is non-PRODUCT only
 417   st->print   ("Virtual space:");
 418   if (_storage.special()) st->print(" (pinned in memory)");
 419   st->cr();
 420   st->print_cr(" - committed: " SIZE_FORMAT, _storage.committed_size());
 421   st->print_cr(" - reserved:  " SIZE_FORMAT, _storage.reserved_size());
 422   st->print_cr(" - [low, high]:     [" INTPTR_FORMAT ", " INTPTR_FORMAT "]",  p2i(_storage.low()), p2i(_storage.high()));
 423   st->print_cr(" - [low_b, high_b]: [" INTPTR_FORMAT ", " INTPTR_FORMAT "]",  p2i(_storage.low_boundary()), p2i(_storage.high_boundary()));
 424 
 425   if (Verbose) {
 426     print_heap_regions(st);
 427   }
 428 }
 429 
 430 class InitGCLABClosure : public ThreadClosure {
 431 public:
 432   void do_thread(Thread* thread) {
 433     thread->gclab().initialize(true);
 434   }
 435 };
 436 
 437 void ShenandoahHeap::post_initialize() {
 438   if (UseTLAB) {
 439     // This is a very tricky point in VM lifetime. We cannot easily call Threads::threads_do
 440     // here, because some system threads (VMThread, WatcherThread, etc) are not yet available.
 441     // Their initialization should be handled separately. Is we miss some threads here,
 442     // then any other TLAB-related activity would fail with asserts.
 443 
 444     InitGCLABClosure init_gclabs;
 445     {
 446       MutexLocker ml(Threads_lock);
 447       for (JavaThread *thread = Threads::first(); thread != NULL; thread = thread->next()) {
 448         init_gclabs.do_thread(thread);
 449       }
 450     }
 451     gc_threads_do(&init_gclabs);
 452 
 453     // gclab can not be initialized early during VM startup, as it can not determinate its max_size.
 454     // Now, we will let WorkGang to initialize gclab when new worker is created.
 455     _workers->set_initialize_gclab();
 456   }
 457 
 458   _scm->initialize(_max_workers);
 459 
 460   ref_processing_init();
 461 }
 462 
 463 class CalculateUsedRegionClosure : public ShenandoahHeapRegionClosure {
 464   size_t sum;
 465 public:
 466 
 467   CalculateUsedRegionClosure() {
 468     sum = 0;
 469   }
 470 
 471   bool doHeapRegion(ShenandoahHeapRegion* r) {
 472     sum = sum + r->used();
 473     return false;
 474   }
 475 
 476   size_t getResult() { return sum;}
 477 };
 478 
 479 size_t ShenandoahHeap::calculateUsed() {
 480   CalculateUsedRegionClosure cl;
 481   heap_region_iterate(&cl);
 482   return cl.getResult();
 483 }
 484 
 485 void ShenandoahHeap::verify_heap_size_consistency() {
 486 
 487   assert(calculateUsed() == used(),
 488          "heap used size must be consistent heap-used: "SIZE_FORMAT" regions-used: "SIZE_FORMAT, used(), calculateUsed());
 489 }
 490 
 491 size_t ShenandoahHeap::used() const {
 492   OrderAccess::acquire();
 493   return _used;
 494 }
 495 
 496 void ShenandoahHeap::increase_used(size_t bytes) {
 497   assert_heaplock_or_safepoint();
 498   _used += bytes;
 499 }
 500 
 501 void ShenandoahHeap::set_used(size_t bytes) {
 502   assert_heaplock_or_safepoint();
 503   _used = bytes;
 504 }
 505 
 506 void ShenandoahHeap::decrease_used(size_t bytes) {
 507   assert_heaplock_or_safepoint();
 508   assert(_used >= bytes, "never decrease heap size by more than we've left");
 509   _used -= bytes;
 510 }
 511 
 512 size_t ShenandoahHeap::capacity() const {
 513   return _num_regions * ShenandoahHeapRegion::region_size_bytes();
 514 }
 515 
 516 bool ShenandoahHeap::is_maximal_no_gc() const {
 517   Unimplemented();
 518   return true;
 519 }
 520 
 521 size_t ShenandoahHeap::max_capacity() const {
 522   return _max_regions * ShenandoahHeapRegion::region_size_bytes();
 523 }
 524 
 525 size_t ShenandoahHeap::min_capacity() const {
 526   return _initialSize;
 527 }
 528 
 529 VirtualSpace* ShenandoahHeap::storage() const {
 530   return (VirtualSpace*) &_storage;
 531 }
 532 
 533 bool ShenandoahHeap::is_in(const void* p) const {
 534   HeapWord* first_region_bottom = _first_region->bottom();
 535   HeapWord* last_region_end = first_region_bottom + (ShenandoahHeapRegion::region_size_bytes() / HeapWordSize) * _num_regions;
 536   return p >= _first_region_bottom && p < last_region_end;
 537 }
 538 
 539 bool ShenandoahHeap::is_scavengable(const void* p) {
 540   return true;
 541 }
 542 
 543 HeapWord* ShenandoahHeap::allocate_from_gclab_slow(Thread* thread, size_t size) {
 544   // Retain tlab and allocate object in shared space if
 545   // the amount free in the tlab is too large to discard.
 546   if (thread->gclab().free() > thread->gclab().refill_waste_limit()) {
 547     thread->gclab().record_slow_allocation(size);
 548     return NULL;
 549   }
 550 
 551   // Discard gclab and allocate a new one.
 552   // To minimize fragmentation, the last GCLAB may be smaller than the rest.
 553   size_t new_gclab_size = thread->gclab().compute_size(size);
 554 
 555   thread->gclab().clear_before_allocation();
 556 
 557   if (new_gclab_size == 0) {
 558     return NULL;
 559   }
 560 
 561   // Allocate a new GCLAB...
 562   HeapWord* obj = allocate_new_gclab(new_gclab_size);
 563   if (obj == NULL) {
 564     return NULL;
 565   }
 566 
 567   if (ZeroTLAB) {
 568     // ..and clear it.
 569     Copy::zero_to_words(obj, new_gclab_size);
 570   } else {
 571     // ...and zap just allocated object.
 572 #ifdef ASSERT
 573     // Skip mangling the space corresponding to the object header to
 574     // ensure that the returned space is not considered parsable by
 575     // any concurrent GC thread.
 576     size_t hdr_size = oopDesc::header_size();
 577     Copy::fill_to_words(obj + hdr_size, new_gclab_size - hdr_size, badHeapWordVal);
 578 #endif // ASSERT
 579   }
 580   thread->gclab().fill(obj, obj + size, new_gclab_size);
 581   return obj;
 582 }
 583 
 584 HeapWord* ShenandoahHeap::allocate_new_tlab(size_t word_size) {
 585   return allocate_new_tlab(word_size, false);
 586 }
 587 
 588 HeapWord* ShenandoahHeap::allocate_new_gclab(size_t word_size) {
 589   return allocate_new_tlab(word_size, true);
 590 }
 591 
 592 HeapWord* ShenandoahHeap::allocate_new_tlab(size_t word_size, bool evacuating) {
 593   HeapWord* result = allocate_memory(word_size, evacuating);
 594 
 595   if (result != NULL) {
 596     assert(! in_collection_set(result), "Never allocate in dirty region");
 597     _bytes_allocated_since_cm += word_size * HeapWordSize;
 598 
 599     log_develop_trace(gc, tlab)("allocating new tlab of size "SIZE_FORMAT" at addr "PTR_FORMAT, word_size, p2i(result));
 600 
 601   }
 602   return result;
 603 }
 604 
 605 ShenandoahHeap* ShenandoahHeap::heap() {
 606   CollectedHeap* heap = Universe::heap();
 607   assert(heap != NULL, "Unitialized access to ShenandoahHeap::heap()");
 608   assert(heap->kind() == CollectedHeap::ShenandoahHeap, "not a shenandoah heap");
 609   return (ShenandoahHeap*) heap;
 610 }
 611 
 612 ShenandoahHeap* ShenandoahHeap::heap_no_check() {
 613   CollectedHeap* heap = Universe::heap();
 614   return (ShenandoahHeap*) heap;
 615 }
 616 
 617 HeapWord* ShenandoahHeap::allocate_memory_work(size_t word_size) {
 618 
 619   ShenandoahHeapLock heap_lock(this);
 620 
 621   HeapWord* result = allocate_memory_under_lock(word_size);
 622   size_t grow_by = (word_size * HeapWordSize + ShenandoahHeapRegion::region_size_bytes() - 1) / ShenandoahHeapRegion::region_size_bytes();
 623 
 624   while (result == NULL && _num_regions + grow_by <= _max_regions) {
 625     grow_heap_by(grow_by);
 626     result = allocate_memory_under_lock(word_size);
 627   }
 628 
 629   return result;
 630 }
 631 
 632 HeapWord* ShenandoahHeap::allocate_memory(size_t word_size, bool evacuating) {
 633   HeapWord* result = NULL;
 634   result = allocate_memory_work(word_size);
 635 
 636   if (!evacuating) {
 637     // Allocation failed, try full-GC, then retry allocation.
 638     //
 639     // It might happen that one of the threads requesting allocation would unblock
 640     // way later after full-GC happened, only to fail the second allocation, because
 641     // other threads have already depleted the free storage. In this case, a better
 642     // strategy would be to try full-GC again.
 643     //
 644     // Lacking the way to detect progress from "collect" call, we are left with blindly
 645     // retrying for some bounded number of times.
 646     // TODO: Poll if Full GC made enough progress to warrant retry.
 647     int tries = 0;
 648     while ((result == NULL) && (tries++ < ShenandoahFullGCTries)) {
 649       log_debug(gc)("[" PTR_FORMAT " Failed to allocate " SIZE_FORMAT " bytes, doing full GC, try %d",
 650                     p2i(Thread::current()), word_size * HeapWordSize, tries);
 651       collect(GCCause::_allocation_failure);
 652       result = allocate_memory_work(word_size);
 653     }
 654   }
 655 
 656   // Only update monitoring counters when not calling from a write-barrier.
 657   // Otherwise we might attempt to grab the Service_lock, which we must
 658   // not do when coming from a write-barrier (because the thread might
 659   // already hold the Compile_lock).
 660   if (! evacuating) {
 661     monitoring_support()->update_counters();
 662   }
 663 
 664   log_develop_trace(gc, alloc)("allocate memory chunk of size "SIZE_FORMAT" at addr "PTR_FORMAT " by thread %d ",
 665                                word_size, p2i(result), Thread::current()->osthread()->thread_id());
 666 
 667   return result;
 668 }
 669 
 670 HeapWord* ShenandoahHeap::allocate_memory_under_lock(size_t word_size) {
 671   assert_heaplock_owned_by_current_thread();
 672 
 673   if (word_size * HeapWordSize > ShenandoahHeapRegion::region_size_bytes()) {
 674     return allocate_large_memory(word_size);
 675   }
 676 
 677   // Not enough memory in free region set.
 678   // Coming out of full GC, it is possible that there is not
 679   // free region available, so current_index may not be valid.
 680   if (word_size * HeapWordSize > _free_regions->capacity()) return NULL;
 681 
 682   ShenandoahHeapRegion* my_current_region = _free_regions->current_no_humongous();
 683 
 684   if (my_current_region == NULL) {
 685     return NULL; // No more room to make a new region. OOM.
 686   }
 687   assert(my_current_region != NULL, "should have a region at this point");
 688 
 689 #ifdef ASSERT
 690   if (in_collection_set(my_current_region)) {
 691     print_heap_regions();
 692   }
 693 #endif
 694   assert(! in_collection_set(my_current_region), "never get targetted regions in free-lists");
 695   assert(! my_current_region->is_humongous(), "never attempt to allocate from humongous object regions");
 696 
 697   HeapWord* result = my_current_region->allocate(word_size);
 698 
 699   while (result == NULL) {
 700     // 2nd attempt. Try next region.
 701     _free_regions->increase_used(my_current_region->free());
 702     ShenandoahHeapRegion* next_region = _free_regions->next_no_humongous();
 703     assert(next_region != my_current_region, "must not get current again");
 704     my_current_region = next_region;
 705 
 706     if (my_current_region == NULL) {
 707       return NULL; // No more room to make a new region. OOM.
 708     }
 709     assert(my_current_region != NULL, "should have a region at this point");
 710     assert(! in_collection_set(my_current_region), "never get targetted regions in free-lists");
 711     assert(! my_current_region->is_humongous(), "never attempt to allocate from humongous object regions");
 712     result = my_current_region->allocate(word_size);
 713   }
 714 
 715   my_current_region->increase_live_data_words(word_size);
 716   increase_used(word_size * HeapWordSize);
 717   _free_regions->increase_used(word_size * HeapWordSize);
 718   return result;
 719 }
 720 
 721 HeapWord* ShenandoahHeap::allocate_large_memory(size_t words) {
 722   assert_heaplock_owned_by_current_thread();
 723 
 724   size_t required_regions = ShenandoahHumongous::required_regions(words * HeapWordSize);
 725   if (required_regions > _max_regions) return NULL;
 726 
 727   ShenandoahHeapRegion* r = _free_regions->allocate_contiguous(required_regions);
 728 
 729   HeapWord* result = NULL;
 730 
 731   if (r != NULL)  {
 732     result = r->bottom();
 733 
 734     log_debug(gc, humongous)("allocating humongous object of size: "SIZE_FORMAT" KB at location "PTR_FORMAT" in start region "SIZE_FORMAT,
 735                              (words * HeapWordSize) / K, p2i(result), r->region_number());
 736   } else {
 737     log_debug(gc, humongous)("allocating humongous object of size: "SIZE_FORMAT" KB at location "PTR_FORMAT" failed",
 738                              (words * HeapWordSize) / K, p2i(result));
 739   }
 740 
 741 
 742   return result;
 743 
 744 }
 745 
 746 HeapWord*  ShenandoahHeap::mem_allocate(size_t size,
 747                                         bool*  gc_overhead_limit_was_exceeded) {
 748 
 749   HeapWord* filler = allocate_memory(size + BrooksPointer::word_size(), false);
 750   HeapWord* result = filler + BrooksPointer::word_size();
 751   if (filler != NULL) {
 752     BrooksPointer::initialize(oop(result));
 753     _bytes_allocated_since_cm += size * HeapWordSize;
 754 
 755     assert(! in_collection_set(result), "never allocate in targetted region");
 756     return result;
 757   } else {
 758     /*
 759     tty->print_cr("Out of memory. Requested number of words: "SIZE_FORMAT" used heap: "INT64_FORMAT", bytes allocated since last CM: "INT64_FORMAT,
 760                   size, used(), _bytes_allocated_since_cm);
 761     {
 762       print_heap_regions();
 763       tty->print("Printing "SIZE_FORMAT" free regions:\n", _free_regions->count());
 764       _free_regions->print();
 765     }
 766     */
 767     return NULL;
 768   }
 769 }
 770 
 771 class ParallelEvacuateRegionObjectClosure : public ObjectClosure {
 772 private:
 773   ShenandoahHeap* _heap;
 774   Thread* _thread;
 775   public:
 776   ParallelEvacuateRegionObjectClosure(ShenandoahHeap* heap) :
 777     _heap(heap), _thread(Thread::current()) {
 778   }
 779 
 780   void do_object(oop p) {
 781 
 782     log_develop_trace(gc, compaction)("Calling ParallelEvacuateRegionObjectClosure on "PTR_FORMAT" of size %d\n", p2i((HeapWord*) p), p->size());
 783 
 784     assert(_heap->is_marked_complete(p), "expect only marked objects");
 785     if (oopDesc::unsafe_equals(p, ShenandoahBarrierSet::resolve_oop_static_not_null(p))) {
 786       bool evac;
 787       _heap->evacuate_object(p, _thread, evac);
 788     }
 789   }
 790 };
 791 
 792 #ifdef ASSERT
 793 class VerifyEvacuatedObjectClosure : public ObjectClosure {
 794 
 795 public:
 796 
 797   void do_object(oop p) {
 798     if (ShenandoahHeap::heap()->is_marked_complete(p)) {
 799       oop p_prime = oopDesc::bs()->read_barrier(p);
 800       assert(! oopDesc::unsafe_equals(p, p_prime), "Should point to evacuated copy");
 801       if (p->klass() != p_prime->klass()) {
 802         tty->print_cr("copy has different class than original:");
 803         p->klass()->print_on(tty);
 804         p_prime->klass()->print_on(tty);
 805       }
 806       assert(p->klass() == p_prime->klass(), "Should have the same class p: "PTR_FORMAT", p_prime: "PTR_FORMAT, p2i(p), p2i(p_prime));
 807       //      assert(p->mark() == p_prime->mark(), "Should have the same mark");
 808       assert(p->size() == p_prime->size(), "Should be the same size");
 809       assert(oopDesc::unsafe_equals(p_prime, oopDesc::bs()->read_barrier(p_prime)), "One forward once");
 810     }
 811   }
 812 };
 813 
 814 void ShenandoahHeap::verify_evacuated_region(ShenandoahHeapRegion* from_region) {
 815   VerifyEvacuatedObjectClosure verify_evacuation;
 816   marked_object_iterate(from_region, &verify_evacuation);
 817 }
 818 #endif
 819 
 820 void ShenandoahHeap::parallel_evacuate_region(ShenandoahHeapRegion* from_region) {
 821 
 822   assert(from_region->has_live(), "all-garbage regions are reclaimed earlier");
 823 
 824   ParallelEvacuateRegionObjectClosure evacuate_region(this);
 825 
 826   marked_object_iterate(from_region, &evacuate_region);
 827 
 828 #ifdef ASSERT
 829   if (ShenandoahVerify && ! cancelled_concgc()) {
 830     verify_evacuated_region(from_region);
 831   }
 832 #endif
 833 }
 834 
 835 class ParallelEvacuationTask : public AbstractGangTask {
 836 private:
 837   ShenandoahHeap* _sh;
 838   ShenandoahCollectionSet* _cs;
 839 
 840 public:
 841   ParallelEvacuationTask(ShenandoahHeap* sh,
 842                          ShenandoahCollectionSet* cs) :
 843     AbstractGangTask("Parallel Evacuation Task"),
 844     _cs(cs),
 845     _sh(sh) {}
 846 
 847   void work(uint worker_id) {
 848 
 849     ShenandoahHeapRegion* from_hr = _cs->claim_next();
 850 
 851     while (from_hr != NULL) {
 852       log_develop_trace(gc, region)("Thread "INT32_FORMAT" claimed Heap Region "SIZE_FORMAT,
 853                                     worker_id,
 854                                     from_hr->region_number());
 855 
 856       assert(from_hr->has_live(), "all-garbage regions are reclaimed early");
 857       _sh->parallel_evacuate_region(from_hr);
 858 
 859       if (_sh->cancelled_concgc()) {
 860         log_develop_trace(gc, region)("Cancelled concgc while evacuating region " SIZE_FORMAT "\n", from_hr->region_number());
 861         break;
 862       }
 863       from_hr = _cs->claim_next();
 864     }
 865   }
 866 };
 867 
 868 class RecycleDirtyRegionsClosure: public ShenandoahHeapRegionClosure {
 869 private:
 870   ShenandoahHeap* _heap;
 871   size_t _bytes_reclaimed;
 872 public:
 873   RecycleDirtyRegionsClosure() : _heap(ShenandoahHeap::heap()) {}
 874 
 875   bool doHeapRegion(ShenandoahHeapRegion* r) {
 876 
 877     assert (! _heap->cancelled_concgc(), "no recycling after cancelled marking");
 878 
 879     if (_heap->in_collection_set(r)) {
 880       log_develop_trace(gc, region)("Recycling region " SIZE_FORMAT ":", r->region_number());
 881       _heap->decrease_used(r->used());
 882       _bytes_reclaimed += r->used();
 883       r->recycle();
 884     }
 885 
 886     return false;
 887   }
 888   size_t bytes_reclaimed() { return _bytes_reclaimed;}
 889   void clear_bytes_reclaimed() {_bytes_reclaimed = 0;}
 890 };
 891 
 892 void ShenandoahHeap::recycle_dirty_regions() {
 893   RecycleDirtyRegionsClosure cl;
 894   cl.clear_bytes_reclaimed();
 895 
 896   heap_region_iterate(&cl);
 897 
 898   _shenandoah_policy->record_bytes_reclaimed(cl.bytes_reclaimed());
 899   if (! cancelled_concgc()) {
 900     clear_cset_fast_test();
 901   }
 902 }
 903 
 904 ShenandoahFreeSet* ShenandoahHeap::free_regions() {
 905   return _free_regions;
 906 }
 907 
 908 void ShenandoahHeap::print_heap_regions(outputStream* st) const {
 909   _ordered_regions->print(st);
 910 }
 911 
 912 class PrintAllRefsOopClosure: public ExtendedOopClosure {
 913 private:
 914   int _index;
 915   const char* _prefix;
 916 
 917 public:
 918   PrintAllRefsOopClosure(const char* prefix) : _index(0), _prefix(prefix) {}
 919 
 920 private:
 921   template <class T>
 922   inline void do_oop_work(T* p) {
 923     oop o = oopDesc::load_decode_heap_oop(p);
 924     if (o != NULL) {
 925       if (ShenandoahHeap::heap()->is_in(o) && o->is_oop()) {
 926         tty->print_cr("%s "INT32_FORMAT" ("PTR_FORMAT")-> "PTR_FORMAT" (marked: %s) (%s "PTR_FORMAT")",
 927                       _prefix, _index,
 928                       p2i(p), p2i(o),
 929                       BOOL_TO_STR(ShenandoahHeap::heap()->is_marked_complete(o)),
 930                       o->klass()->internal_name(), p2i(o->klass()));
 931       } else {
 932         tty->print_cr("%s "INT32_FORMAT" ("PTR_FORMAT" dirty -> "PTR_FORMAT" (not in heap, possibly corrupted or dirty)",
 933                       _prefix, _index,
 934                       p2i(p), p2i(o));
 935       }
 936     } else {
 937       tty->print_cr("%s "INT32_FORMAT" ("PTR_FORMAT") -> "PTR_FORMAT, _prefix, _index, p2i(p), p2i((HeapWord*) o));
 938     }
 939     _index++;
 940   }
 941 
 942 public:
 943   void do_oop(oop* p) {
 944     do_oop_work(p);
 945   }
 946 
 947   void do_oop(narrowOop* p) {
 948     do_oop_work(p);
 949   }
 950 
 951 };
 952 
 953 class PrintAllRefsObjectClosure : public ObjectClosure {
 954   const char* _prefix;
 955 
 956 public:
 957   PrintAllRefsObjectClosure(const char* prefix) : _prefix(prefix) {}
 958 
 959   void do_object(oop p) {
 960     if (ShenandoahHeap::heap()->is_in(p)) {
 961         tty->print_cr("%s object "PTR_FORMAT" (marked: %s) (%s "PTR_FORMAT") refers to:",
 962                       _prefix, p2i(p),
 963                       BOOL_TO_STR(ShenandoahHeap::heap()->is_marked_complete(p)),
 964                       p->klass()->internal_name(), p2i(p->klass()));
 965         PrintAllRefsOopClosure cl(_prefix);
 966         p->oop_iterate(&cl);
 967       }
 968   }
 969 };
 970 
 971 void ShenandoahHeap::print_all_refs(const char* prefix) {
 972   tty->print_cr("printing all references in the heap");
 973   tty->print_cr("root references:");
 974 
 975   ensure_parsability(false);
 976 
 977   PrintAllRefsOopClosure cl(prefix);
 978   roots_iterate(&cl);
 979 
 980   tty->print_cr("heap references:");
 981   PrintAllRefsObjectClosure cl2(prefix);
 982   object_iterate(&cl2);
 983 }
 984 
 985 class VerifyAfterMarkingOopClosure: public ExtendedOopClosure {
 986 private:
 987   ShenandoahHeap*  _heap;
 988 
 989 public:
 990   VerifyAfterMarkingOopClosure() :
 991     _heap(ShenandoahHeap::heap()) { }
 992 
 993 private:
 994   template <class T>
 995   inline void do_oop_work(T* p) {
 996     oop o = oopDesc::load_decode_heap_oop(p);
 997     if (o != NULL) {
 998       if (! _heap->is_marked_complete(o)) {
 999         _heap->print_heap_regions();
1000         _heap->print_all_refs("post-mark");
1001         tty->print_cr("oop not marked, although referrer is marked: "PTR_FORMAT": in_heap: %s, is_marked: %s",
1002                       p2i((HeapWord*) o), BOOL_TO_STR(_heap->is_in(o)), BOOL_TO_STR(_heap->is_marked_complete(o)));
1003         _heap->print_heap_locations((HeapWord*) o, (HeapWord*) o + o->size());
1004 
1005         tty->print_cr("oop class: %s", o->klass()->internal_name());
1006         if (_heap->is_in(p)) {
1007           oop referrer = oop(_heap->heap_region_containing(p)->block_start_const(p));
1008           tty->print_cr("Referrer starts at addr "PTR_FORMAT, p2i((HeapWord*) referrer));
1009           referrer->print();
1010           _heap->print_heap_locations((HeapWord*) referrer, (HeapWord*) referrer + referrer->size());
1011         }
1012         tty->print_cr("heap region containing object:");
1013         _heap->heap_region_containing(o)->print();
1014         tty->print_cr("heap region containing referrer:");
1015         _heap->heap_region_containing(p)->print();
1016         tty->print_cr("heap region containing forwardee:");
1017         _heap->heap_region_containing(oopDesc::bs()->read_barrier(o))->print();
1018       }
1019       assert(o->is_oop(), "oop must be an oop");
1020       assert(Metaspace::contains(o->klass()), "klass pointer must go to metaspace");
1021       if (! oopDesc::unsafe_equals(o, oopDesc::bs()->read_barrier(o))) {
1022         tty->print_cr("oops has forwardee: p: "PTR_FORMAT" (%s), o = "PTR_FORMAT" (%s), new-o: "PTR_FORMAT" (%s)",
1023                       p2i(p),
1024                       BOOL_TO_STR(_heap->in_collection_set(p)),
1025                       p2i(o),
1026                       BOOL_TO_STR(_heap->in_collection_set(o)),
1027                       p2i((HeapWord*) oopDesc::bs()->read_barrier(o)),
1028                       BOOL_TO_STR(_heap->in_collection_set(oopDesc::bs()->read_barrier(o))));
1029         tty->print_cr("oop class: %s", o->klass()->internal_name());
1030       }
1031       assert(oopDesc::unsafe_equals(o, oopDesc::bs()->read_barrier(o)), "oops must not be forwarded");
1032       assert(! _heap->in_collection_set(o), "references must not point to dirty heap regions");
1033       assert(_heap->is_marked_complete(o), "live oops must be marked current");
1034     }
1035   }
1036 
1037 public:
1038   void do_oop(oop* p) {
1039     do_oop_work(p);
1040   }
1041 
1042   void do_oop(narrowOop* p) {
1043     do_oop_work(p);
1044   }
1045 
1046 };
1047 
1048 void ShenandoahHeap::verify_heap_after_marking() {
1049 
1050   verify_heap_size_consistency();
1051 
1052   log_trace(gc)("verifying heap after marking");
1053 
1054   VerifyAfterMarkingOopClosure cl;
1055   roots_iterate(&cl);
1056   ObjectToOopClosure objs(&cl);
1057   object_iterate(&objs);
1058 }
1059 
1060 
1061 void ShenandoahHeap::reclaim_humongous_region_at(ShenandoahHeapRegion* r) {
1062   assert(r->is_humongous_start(), "reclaim regions starting with the first one");
1063 
1064   oop humongous_obj = oop(r->bottom() + BrooksPointer::word_size());
1065   size_t size = humongous_obj->size() + BrooksPointer::word_size();
1066   size_t required_regions = ShenandoahHumongous::required_regions(size * HeapWordSize);
1067   size_t index = r->region_number();
1068 
1069 
1070   assert(!r->has_live(), "liveness must be zero");
1071 
1072   for(size_t i = 0; i < required_regions; i++) {
1073 
1074     ShenandoahHeapRegion* region = _ordered_regions->get(index++);
1075 
1076     assert((region->is_humongous_start() || region->is_humongous_continuation()),
1077            "expect correct humongous start or continuation");
1078 
1079     if (log_is_enabled(Debug, gc, humongous)) {
1080       log_debug(gc, humongous)("reclaiming "SIZE_FORMAT" humongous regions for object of size: "SIZE_FORMAT" words", required_regions, size);
1081       ResourceMark rm;
1082       outputStream* out = Log(gc, humongous)::debug_stream();
1083       region->print_on(out);
1084     }
1085 
1086     region->recycle();
1087     ShenandoahHeap::heap()->decrease_used(ShenandoahHeapRegion::region_size_bytes());
1088   }
1089 }
1090 
1091 class ShenandoahReclaimHumongousRegionsClosure : public ShenandoahHeapRegionClosure {
1092 
1093   bool doHeapRegion(ShenandoahHeapRegion* r) {
1094     ShenandoahHeap* heap = ShenandoahHeap::heap();
1095 
1096     if (r->is_humongous_start()) {
1097       oop humongous_obj = oop(r->bottom() + BrooksPointer::word_size());
1098       if (! heap->is_marked_complete(humongous_obj)) {
1099 
1100         heap->reclaim_humongous_region_at(r);
1101       }
1102     }
1103     return false;
1104   }
1105 };
1106 
1107 #ifdef ASSERT
1108 class CheckCollectionSetClosure: public ShenandoahHeapRegionClosure {
1109   bool doHeapRegion(ShenandoahHeapRegion* r) {
1110     assert(! ShenandoahHeap::heap()->in_collection_set(r), "Should have been cleared by now");
1111     return false;
1112   }
1113 };
1114 #endif
1115 
1116 void ShenandoahHeap::prepare_for_concurrent_evacuation() {
1117   assert(_ordered_regions->get(0)->region_number() == 0, "FIXME CHF. FIXME CHF!");
1118 
1119   log_develop_trace(gc)("Thread %d started prepare_for_concurrent_evacuation", Thread::current()->osthread()->thread_id());
1120 
1121   if (!cancelled_concgc()) {
1122 
1123     recycle_dirty_regions();
1124 
1125     ensure_parsability(true);
1126 
1127     if (UseShenandoahMatrix && PrintShenandoahMatrix) {
1128       outputStream* log = Log(gc)::info_stream();
1129       connection_matrix()->print_on(log);
1130     }
1131 
1132     if (ShenandoahVerify || (UseShenandoahMatrix && VerifyShenandoahMatrix)) {
1133       verify_heap_reachable_at_safepoint();
1134     }
1135 
1136 #ifdef ASSERT
1137     if (ShenandoahVerify) {
1138       verify_heap_after_marking();
1139     }
1140 #endif
1141 
1142     // NOTE: This needs to be done during a stop the world pause, because
1143     // putting regions into the collection set concurrently with Java threads
1144     // will create a race. In particular, acmp could fail because when we
1145     // resolve the first operand, the containing region might not yet be in
1146     // the collection set, and thus return the original oop. When the 2nd
1147     // operand gets resolved, the region could be in the collection set
1148     // and the oop gets evacuated. If both operands have originally been
1149     // the same, we get false negatives.
1150 
1151     {
1152       ShenandoahHeapLock lock(this);
1153       _collection_set->clear();
1154       _free_regions->clear();
1155 
1156       ShenandoahReclaimHumongousRegionsClosure reclaim;
1157       heap_region_iterate(&reclaim);
1158 
1159 #ifdef ASSERT
1160       CheckCollectionSetClosure ccsc;
1161       _ordered_regions->heap_region_iterate(&ccsc);
1162 #endif
1163 
1164       _shenandoah_policy->choose_collection_set(_collection_set);
1165 
1166       _shenandoah_policy->choose_free_set(_free_regions);
1167     }
1168 
1169     _bytes_allocated_since_cm = 0;
1170 
1171     Universe::update_heap_info_at_gc();
1172   }
1173 }
1174 
1175 
1176 class RetireTLABClosure : public ThreadClosure {
1177 private:
1178   bool _retire;
1179 
1180 public:
1181   RetireTLABClosure(bool retire) : _retire(retire) {
1182   }
1183 
1184   void do_thread(Thread* thread) {
1185     thread->gclab().make_parsable(_retire);
1186   }
1187 };
1188 
1189 void ShenandoahHeap::ensure_parsability(bool retire_tlabs) {
1190   if (UseTLAB) {
1191     CollectedHeap::ensure_parsability(retire_tlabs);
1192     RetireTLABClosure cl(retire_tlabs);
1193     Threads::threads_do(&cl);
1194   }
1195 }
1196 
1197 class ShenandoahEvacuateUpdateRootsClosure: public ExtendedOopClosure {
1198 private:
1199   ShenandoahHeap* _heap;
1200   Thread* _thread;
1201 public:
1202   ShenandoahEvacuateUpdateRootsClosure() :
1203     _heap(ShenandoahHeap::heap()), _thread(Thread::current()) {
1204   }
1205 
1206 private:
1207   template <class T>
1208   void do_oop_work(T* p) {
1209     assert(_heap->is_evacuation_in_progress(), "Only do this when evacuation is in progress");
1210 
1211     T o = oopDesc::load_heap_oop(p);
1212     if (! oopDesc::is_null(o)) {
1213       oop obj = oopDesc::decode_heap_oop_not_null(o);
1214       if (_heap->in_collection_set(obj)) {
1215         assert(_heap->is_marked_complete(obj), "only evacuate marked objects %d %d",
1216                _heap->is_marked_complete(obj), _heap->is_marked_complete(ShenandoahBarrierSet::resolve_oop_static_not_null(obj)));
1217         oop resolved = ShenandoahBarrierSet::resolve_oop_static_not_null(obj);
1218         if (oopDesc::unsafe_equals(resolved, obj)) {
1219           bool evac;
1220           resolved = _heap->evacuate_object(obj, _thread, evac);
1221         }
1222         oopDesc::encode_store_heap_oop(p, resolved);
1223       }
1224     }
1225 #ifdef ASSERT
1226     else {
1227       // tty->print_cr("not updating root at: "PTR_FORMAT" with object: "PTR_FORMAT", is_in_heap: %s, is_in_cset: %s, is_marked: %s",
1228       //               p2i(p),
1229       //               p2i((HeapWord*) obj),
1230       //               BOOL_TO_STR(_heap->is_in(obj)),
1231       //               BOOL_TO_STR(_heap->in_cset_fast_test(obj)),
1232       //               BOOL_TO_STR(_heap->is_marked_complete(obj)));
1233     }
1234 #endif
1235   }
1236 
1237 public:
1238   void do_oop(oop* p) {
1239     do_oop_work(p);
1240   }
1241   void do_oop(narrowOop* p) {
1242     do_oop_work(p);
1243   }
1244 };
1245 
1246 class ShenandoahEvacuateUpdateRootsTask : public AbstractGangTask {
1247   ShenandoahRootEvacuator* _rp;
1248 public:
1249 
1250   ShenandoahEvacuateUpdateRootsTask(ShenandoahRootEvacuator* rp) :
1251     AbstractGangTask("Shenandoah evacuate and update roots"),
1252     _rp(rp)
1253   {
1254     // Nothing else to do.
1255   }
1256 
1257   void work(uint worker_id) {
1258     ShenandoahEvacuateUpdateRootsClosure cl;
1259     MarkingCodeBlobClosure blobsCl(&cl, CodeBlobToOopClosure::FixRelocations);
1260 
1261     _rp->process_evacuate_roots(&cl, &blobsCl, worker_id);
1262   }
1263 };
1264 
1265 class ShenandoahFixRootsTask : public AbstractGangTask {
1266   ShenandoahRootEvacuator* _rp;
1267 public:
1268 
1269   ShenandoahFixRootsTask(ShenandoahRootEvacuator* rp) :
1270     AbstractGangTask("Shenandoah update roots"),
1271     _rp(rp)
1272   {
1273     // Nothing else to do.
1274   }
1275 
1276   void work(uint worker_id) {
1277     SCMUpdateRefsClosure cl;
1278     MarkingCodeBlobClosure blobsCl(&cl, CodeBlobToOopClosure::FixRelocations);
1279 
1280     _rp->process_evacuate_roots(&cl, &blobsCl, worker_id);
1281   }
1282 };
1283 void ShenandoahHeap::evacuate_and_update_roots() {
1284 
1285   COMPILER2_PRESENT(DerivedPointerTable::clear());
1286 
1287   assert(SafepointSynchronize::is_at_safepoint(), "Only iterate roots while world is stopped");
1288   ClassLoaderDataGraph::clear_claimed_marks();
1289 
1290   {
1291     ShenandoahRootEvacuator rp(this, workers()->active_workers(), ShenandoahCollectorPolicy::evac_thread_roots);
1292     ShenandoahEvacuateUpdateRootsTask roots_task(&rp);
1293     workers()->run_task(&roots_task);
1294   }
1295 
1296   COMPILER2_PRESENT(DerivedPointerTable::update_pointers());
1297 
1298   if (cancelled_concgc()) {
1299     // If initial evacuation has been cancelled, we need to update all references
1300     // after all workers have finished. Otherwise we might run into the following problem:
1301     // GC thread 1 cannot allocate anymore, thus evacuation fails, leaves from-space ptr of object X.
1302     // GC thread 2 evacuates the same object X to to-space
1303     // which leaves a truly dangling from-space reference in the first root oop*. This must not happen.
1304     // clear() and update_pointers() must always be called in pairs,
1305     // cannot nest with above clear()/update_pointers().
1306     COMPILER2_PRESENT(DerivedPointerTable::clear());
1307     ShenandoahRootEvacuator rp(this, workers()->active_workers(), ShenandoahCollectorPolicy::evac_thread_roots);
1308     ShenandoahFixRootsTask update_roots_task(&rp);
1309     workers()->run_task(&update_roots_task);
1310     COMPILER2_PRESENT(DerivedPointerTable::update_pointers());
1311   }
1312 
1313 #ifdef ASSERT
1314   {
1315     AssertToSpaceClosure cl;
1316     CodeBlobToOopClosure code_cl(&cl, !CodeBlobToOopClosure::FixRelocations);
1317     ShenandoahRootEvacuator rp(this, 1);
1318     rp.process_evacuate_roots(&cl, &code_cl, 0);
1319   }
1320 #endif
1321 }
1322 
1323 
1324 void ShenandoahHeap::do_evacuation() {
1325 
1326   parallel_evacuate();
1327 
1328   if (ShenandoahVerify && ! cancelled_concgc()) {
1329     VM_ShenandoahVerifyHeapAfterEvacuation verify_after_evacuation;
1330     if (Thread::current()->is_VM_thread()) {
1331       verify_after_evacuation.doit();
1332     } else {
1333       VMThread::execute(&verify_after_evacuation);
1334     }
1335   }
1336 
1337 }
1338 
1339 void ShenandoahHeap::parallel_evacuate() {
1340   log_develop_trace(gc)("starting parallel_evacuate");
1341 
1342   _shenandoah_policy->record_phase_start(ShenandoahCollectorPolicy::conc_evac);
1343 
1344   if (log_is_enabled(Trace, gc, region)) {
1345     ResourceMark rm;
1346     outputStream *out = Log(gc, region)::trace_stream();
1347     out->print("Printing all available regions");
1348     print_heap_regions(out);
1349   }
1350 
1351   if (log_is_enabled(Trace, gc, cset)) {
1352     ResourceMark rm;
1353     outputStream *out = Log(gc, cset)::trace_stream();
1354     out->print("Printing collection set which contains "SIZE_FORMAT" regions:\n", _collection_set->count());
1355     _collection_set->print(out);
1356 
1357     out->print("Printing free set which contains "SIZE_FORMAT" regions:\n", _free_regions->count());
1358     _free_regions->print(out);
1359   }
1360 
1361   ParallelEvacuationTask evacuationTask = ParallelEvacuationTask(this, _collection_set);
1362 
1363 
1364   workers()->run_task(&evacuationTask);
1365 
1366   if (log_is_enabled(Trace, gc, cset)) {
1367     ResourceMark rm;
1368     outputStream *out = Log(gc, cset)::trace_stream();
1369     out->print("Printing postgc collection set which contains "SIZE_FORMAT" regions:\n",
1370                _collection_set->count());
1371 
1372     _collection_set->print(out);
1373 
1374     out->print("Printing postgc free regions which contain "SIZE_FORMAT" free regions:\n",
1375                _free_regions->count());
1376     _free_regions->print(out);
1377 
1378   }
1379 
1380   if (log_is_enabled(Trace, gc, region)) {
1381     ResourceMark rm;
1382     outputStream *out = Log(gc, region)::trace_stream();
1383     out->print_cr("all regions after evacuation:");
1384     print_heap_regions(out);
1385   }
1386 
1387   _shenandoah_policy->record_phase_end(ShenandoahCollectorPolicy::conc_evac);
1388 }
1389 
1390 class VerifyEvacuationClosure: public ExtendedOopClosure {
1391 private:
1392   ShenandoahHeap*  _heap;
1393   ShenandoahHeapRegion* _from_region;
1394 
1395 public:
1396   VerifyEvacuationClosure(ShenandoahHeapRegion* from_region) :
1397     _heap(ShenandoahHeap::heap()), _from_region(from_region) { }
1398 private:
1399   template <class T>
1400   inline void do_oop_work(T* p) {
1401     oop heap_oop = oopDesc::load_decode_heap_oop(p);
1402     if (! oopDesc::is_null(heap_oop)) {
1403       guarantee(! _from_region->is_in(heap_oop), "no references to from-region allowed after evacuation: "PTR_FORMAT, p2i((HeapWord*) heap_oop));
1404     }
1405   }
1406 
1407 public:
1408   void do_oop(oop* p)       {
1409     do_oop_work(p);
1410   }
1411 
1412   void do_oop(narrowOop* p) {
1413     do_oop_work(p);
1414   }
1415 
1416 };
1417 
1418 void ShenandoahHeap::roots_iterate(OopClosure* cl) {
1419 
1420   assert(SafepointSynchronize::is_at_safepoint(), "Only iterate roots while world is stopped");
1421 
1422   CodeBlobToOopClosure blobsCl(cl, false);
1423   CLDToOopClosure cldCl(cl);
1424 
1425   ClassLoaderDataGraph::clear_claimed_marks();
1426 
1427   ShenandoahRootProcessor rp(this, 1);
1428   rp.process_all_roots(cl, NULL, &cldCl, &blobsCl, 0);
1429 }
1430 
1431 void ShenandoahHeap::verify_evacuation(ShenandoahHeapRegion* from_region) {
1432 
1433   VerifyEvacuationClosure rootsCl(from_region);
1434   roots_iterate(&rootsCl);
1435 
1436 }
1437 
1438 bool ShenandoahHeap::supports_tlab_allocation() const {
1439   return true;
1440 }
1441 
1442 
1443 size_t  ShenandoahHeap::unsafe_max_tlab_alloc(Thread *thread) const {
1444   size_t idx = _free_regions->current_index();
1445   ShenandoahHeapRegion* current = _free_regions->get(idx);
1446   if (current == NULL) {
1447     return 0;
1448   } else if (current->free() > MinTLABSize) {
1449     // Current region has enough space left, can use it.
1450     return current->free();
1451   } else {
1452     // No more space in current region, we will take next free region
1453     // on the next TLAB allocation.
1454     return ShenandoahHeapRegion::region_size_bytes();
1455   }
1456 }
1457 
1458 size_t ShenandoahHeap::max_tlab_size() const {
1459   return ShenandoahHeapRegion::region_size_bytes();
1460 }
1461 
1462 class ResizeGCLABClosure : public ThreadClosure {
1463 public:
1464   void do_thread(Thread* thread) {
1465     thread->gclab().resize();
1466   }
1467 };
1468 
1469 void ShenandoahHeap::resize_all_tlabs() {
1470   CollectedHeap::resize_all_tlabs();
1471 
1472   ResizeGCLABClosure cl;
1473   Threads::threads_do(&cl);
1474 }
1475 
1476 class AccumulateStatisticsGCLABClosure : public ThreadClosure {
1477 public:
1478   void do_thread(Thread* thread) {
1479     thread->gclab().accumulate_statistics();
1480     thread->gclab().initialize_statistics();
1481   }
1482 };
1483 
1484 void ShenandoahHeap::accumulate_statistics_all_gclabs() {
1485   AccumulateStatisticsGCLABClosure cl;
1486   Threads::threads_do(&cl);
1487 }
1488 
1489 bool  ShenandoahHeap::can_elide_tlab_store_barriers() const {
1490   return true;
1491 }
1492 
1493 oop ShenandoahHeap::new_store_pre_barrier(JavaThread* thread, oop new_obj) {
1494   // Overridden to do nothing.
1495   return new_obj;
1496 }
1497 
1498 bool  ShenandoahHeap::can_elide_initializing_store_barrier(oop new_obj) {
1499   return true;
1500 }
1501 
1502 bool ShenandoahHeap::card_mark_must_follow_store() const {
1503   return false;
1504 }
1505 
1506 void ShenandoahHeap::collect(GCCause::Cause cause) {
1507   assert(cause != GCCause::_gc_locker, "no JNI critical callback");
1508   if (GCCause::is_user_requested_gc(cause)) {
1509     if (! DisableExplicitGC) {
1510       _concurrent_gc_thread->do_full_gc(cause);
1511     }
1512   } else if (cause == GCCause::_allocation_failure) {
1513     collector_policy()->set_should_clear_all_soft_refs(true);
1514     _concurrent_gc_thread->do_full_gc(cause);
1515   }
1516 }
1517 
1518 void ShenandoahHeap::do_full_collection(bool clear_all_soft_refs) {
1519   //assert(false, "Shouldn't need to do full collections");
1520 }
1521 
1522 AdaptiveSizePolicy* ShenandoahHeap::size_policy() {
1523   Unimplemented();
1524   return NULL;
1525 
1526 }
1527 
1528 CollectorPolicy* ShenandoahHeap::collector_policy() const {
1529   return _shenandoah_policy;
1530 }
1531 
1532 
1533 HeapWord* ShenandoahHeap::block_start(const void* addr) const {
1534   Space* sp = heap_region_containing(addr);
1535   if (sp != NULL) {
1536     return sp->block_start(addr);
1537   }
1538   return NULL;
1539 }
1540 
1541 size_t ShenandoahHeap::block_size(const HeapWord* addr) const {
1542   Space* sp = heap_region_containing(addr);
1543   assert(sp != NULL, "block_size of address outside of heap");
1544   return sp->block_size(addr);
1545 }
1546 
1547 bool ShenandoahHeap::block_is_obj(const HeapWord* addr) const {
1548   Space* sp = heap_region_containing(addr);
1549   return sp->block_is_obj(addr);
1550 }
1551 
1552 jlong ShenandoahHeap::millis_since_last_gc() {
1553   return 0;
1554 }
1555 
1556 void ShenandoahHeap::prepare_for_verify() {
1557   if (SafepointSynchronize::is_at_safepoint() || ! UseTLAB) {
1558     ensure_parsability(false);
1559   }
1560 }
1561 
1562 void ShenandoahHeap::print_gc_threads_on(outputStream* st) const {
1563   workers()->print_worker_threads_on(st);
1564 }
1565 
1566 void ShenandoahHeap::gc_threads_do(ThreadClosure* tcl) const {
1567   workers()->threads_do(tcl);
1568 }
1569 
1570 void ShenandoahHeap::print_tracing_info() const {
1571   if (log_is_enabled(Info, gc, stats)) {
1572     ResourceMark rm;
1573     outputStream* out = Log(gc, stats)::info_stream();
1574     _shenandoah_policy->print_tracing_info(out);
1575   }
1576 }
1577 
1578 class ShenandoahVerifyRootsClosure: public ExtendedOopClosure {
1579 private:
1580   ShenandoahHeap*  _heap;
1581   VerifyOption     _vo;
1582   bool             _failures;
1583 public:
1584   // _vo == UsePrevMarking -> use "prev" marking information,
1585   // _vo == UseNextMarking -> use "next" marking information,
1586   // _vo == UseMarkWord    -> use mark word from object header.
1587   ShenandoahVerifyRootsClosure(VerifyOption vo) :
1588     _heap(ShenandoahHeap::heap()),
1589     _vo(vo),
1590     _failures(false) { }
1591 
1592   bool failures() { return _failures; }
1593 
1594 private:
1595   template <class T>
1596   inline void do_oop_work(T* p) {
1597     oop obj = oopDesc::load_decode_heap_oop(p);
1598     if (! oopDesc::is_null(obj) && ! obj->is_oop()) {
1599       { // Just for debugging.
1600         tty->print_cr("Root location "PTR_FORMAT
1601                       "verified "PTR_FORMAT, p2i(p), p2i((void*) obj));
1602         //      obj->print_on(tty);
1603       }
1604     }
1605     guarantee(obj->is_oop_or_null(), "is oop or null");
1606   }
1607 
1608 public:
1609   void do_oop(oop* p)       {
1610     do_oop_work(p);
1611   }
1612 
1613   void do_oop(narrowOop* p) {
1614     do_oop_work(p);
1615   }
1616 
1617 };
1618 
1619 class ShenandoahVerifyHeapClosure: public ObjectClosure {
1620 private:
1621   ShenandoahVerifyRootsClosure _rootsCl;
1622 public:
1623   ShenandoahVerifyHeapClosure(ShenandoahVerifyRootsClosure rc) :
1624     _rootsCl(rc) {};
1625 
1626   void do_object(oop p) {
1627     _rootsCl.do_oop(&p);
1628   }
1629 };
1630 
1631 void ShenandoahHeap::verify(VerifyOption vo) {
1632   if (SafepointSynchronize::is_at_safepoint() || ! UseTLAB) {
1633 
1634     ShenandoahVerifyRootsClosure rootsCl(vo);
1635 
1636     assert(Thread::current()->is_VM_thread(),
1637            "Expected to be executed serially by the VM thread at this point");
1638 
1639     roots_iterate(&rootsCl);
1640 
1641     bool failures = rootsCl.failures();
1642     log_trace(gc)("verify failures: %s", BOOL_TO_STR(failures));
1643 
1644     ShenandoahVerifyHeapClosure heapCl(rootsCl);
1645 
1646     object_iterate(&heapCl);
1647     // TODO: Implement rest of it.
1648   } else {
1649     tty->print("(SKIPPING roots, heapRegions, remset) ");
1650   }
1651 }
1652 size_t ShenandoahHeap::tlab_capacity(Thread *thr) const {
1653   return _free_regions->capacity();
1654 }
1655 
1656 class ShenandoahIterateObjectClosureRegionClosure: public ShenandoahHeapRegionClosure {
1657   ObjectClosure* _cl;
1658 public:
1659   ShenandoahIterateObjectClosureRegionClosure(ObjectClosure* cl) : _cl(cl) {}
1660   bool doHeapRegion(ShenandoahHeapRegion* r) {
1661     ShenandoahHeap::heap()->marked_object_iterate(r, _cl);
1662     return false;
1663   }
1664 };
1665 
1666 void ShenandoahHeap::object_iterate(ObjectClosure* cl) {
1667   ShenandoahIterateObjectClosureRegionClosure blk(cl);
1668   heap_region_iterate(&blk, false, true);
1669 }
1670 
1671 class ShenandoahSafeObjectIterateAdjustPtrsClosure : public MetadataAwareOopClosure {
1672 private:
1673   ShenandoahHeap* _heap;
1674 
1675 public:
1676   ShenandoahSafeObjectIterateAdjustPtrsClosure() : _heap(ShenandoahHeap::heap()) {}
1677 
1678 private:
1679   template <class T>
1680   inline void do_oop_work(T* p) {
1681     T o = oopDesc::load_heap_oop(p);
1682     if (!oopDesc::is_null(o)) {
1683       oop obj = oopDesc::decode_heap_oop_not_null(o);
1684       oopDesc::encode_store_heap_oop(p, BrooksPointer::forwardee(obj));
1685     }
1686   }
1687 public:
1688   void do_oop(oop* p) {
1689     do_oop_work(p);
1690   }
1691   void do_oop(narrowOop* p) {
1692     do_oop_work(p);
1693   }
1694 };
1695 
1696 class ShenandoahSafeObjectIterateAndUpdate : public ObjectClosure {
1697 private:
1698   ObjectClosure* _cl;
1699 public:
1700   ShenandoahSafeObjectIterateAndUpdate(ObjectClosure *cl) : _cl(cl) {}
1701 
1702   virtual void do_object(oop obj) {
1703     assert (oopDesc::unsafe_equals(obj, BrooksPointer::forwardee(obj)),
1704             "avoid double-counting: only non-forwarded objects here");
1705 
1706     // Fix up the ptrs.
1707     ShenandoahSafeObjectIterateAdjustPtrsClosure adjust_ptrs;
1708     obj->oop_iterate(&adjust_ptrs);
1709 
1710     // Can reply the object now:
1711     _cl->do_object(obj);
1712   }
1713 };
1714 
1715 void ShenandoahHeap::safe_object_iterate(ObjectClosure* cl) {
1716   assert(SafepointSynchronize::is_at_safepoint(), "safe iteration is only available during safepoints");
1717 
1718   // Safe iteration does objects only with correct references.
1719   // This is why we skip dirty regions that have stale copies of objects,
1720   // and fix up the pointers in the returned objects.
1721 
1722   ShenandoahSafeObjectIterateAndUpdate safe_cl(cl);
1723   ShenandoahIterateObjectClosureRegionClosure blk(&safe_cl);
1724   heap_region_iterate(&blk,
1725                       /* skip_dirty_regions = */ true,
1726                       /* skip_humongous_continuations = */ true);
1727 
1728   _need_update_refs = false; // already updated the references
1729 }
1730 
1731 // Apply blk->doHeapRegion() on all committed regions in address order,
1732 // terminating the iteration early if doHeapRegion() returns true.
1733 void ShenandoahHeap::heap_region_iterate(ShenandoahHeapRegionClosure* blk, bool skip_dirty_regions, bool skip_humongous_continuation) const {
1734   for (size_t i = 0; i < _num_regions; i++) {
1735     ShenandoahHeapRegion* current  = _ordered_regions->get(i);
1736     if (skip_humongous_continuation && current->is_humongous_continuation()) {
1737       continue;
1738     }
1739     if (skip_dirty_regions && in_collection_set(current)) {
1740       continue;
1741     }
1742     if (blk->doHeapRegion(current)) {
1743       return;
1744     }
1745   }
1746 }
1747 
1748 class ClearLivenessClosure : public ShenandoahHeapRegionClosure {
1749   ShenandoahHeap* sh;
1750 public:
1751   ClearLivenessClosure(ShenandoahHeap* heap) : sh(heap) { }
1752 
1753   bool doHeapRegion(ShenandoahHeapRegion* r) {
1754     r->clear_live_data();
1755     sh->set_next_top_at_mark_start(r->bottom(), r->top());
1756     return false;
1757   }
1758 };
1759 
1760 void ShenandoahHeap::start_concurrent_marking() {
1761 
1762   shenandoahPolicy()->record_phase_start(ShenandoahCollectorPolicy::accumulate_stats);
1763   accumulate_statistics_all_tlabs();
1764   shenandoahPolicy()->record_phase_end(ShenandoahCollectorPolicy::accumulate_stats);
1765 
1766   set_concurrent_mark_in_progress(true);
1767   // We need to reset all TLABs because we'd lose marks on all objects allocated in them.
1768   if (UseTLAB) {
1769     shenandoahPolicy()->record_phase_start(ShenandoahCollectorPolicy::make_parsable);
1770     ensure_parsability(true);
1771     shenandoahPolicy()->record_phase_end(ShenandoahCollectorPolicy::make_parsable);
1772   }
1773 
1774   _shenandoah_policy->record_bytes_allocated(_bytes_allocated_since_cm);
1775   _used_start_gc = used();
1776 
1777 #ifdef ASSERT
1778   if (ShenandoahDumpHeapBeforeConcurrentMark) {
1779     ensure_parsability(false);
1780     print_all_refs("pre-mark");
1781   }
1782 #endif
1783 
1784   shenandoahPolicy()->record_phase_start(ShenandoahCollectorPolicy::clear_liveness);
1785   ClearLivenessClosure clc(this);
1786   heap_region_iterate(&clc);
1787   shenandoahPolicy()->record_phase_end(ShenandoahCollectorPolicy::clear_liveness);
1788 
1789   if (UseShenandoahMatrix) {
1790     connection_matrix()->clear_all();
1791   }
1792   // print_all_refs("pre -mark");
1793 
1794   // oopDesc::_debug = true;
1795 
1796   // Make above changes visible to worker threads
1797   OrderAccess::fence();
1798 
1799   shenandoahPolicy()->record_phase_start(ShenandoahCollectorPolicy::scan_roots);
1800   concurrentMark()->init_mark_roots();
1801   shenandoahPolicy()->record_phase_end(ShenandoahCollectorPolicy::scan_roots);
1802 
1803   //  print_all_refs("pre-mark2");
1804 }
1805 
1806 class VerifyAfterEvacuationClosure : public ExtendedOopClosure {
1807 
1808   ShenandoahHeap* _sh;
1809 
1810 public:
1811   VerifyAfterEvacuationClosure() : _sh ( ShenandoahHeap::heap() ) {}
1812 
1813   template<class T> void do_oop_nv(T* p) {
1814     T heap_oop = oopDesc::load_heap_oop(p);
1815     if (!oopDesc::is_null(heap_oop)) {
1816       oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
1817       guarantee(_sh->in_collection_set(obj) == (! oopDesc::unsafe_equals(obj, oopDesc::bs()->read_barrier(obj))),
1818                 "forwarded objects can only exist in dirty (from-space) regions is_dirty: %s, is_forwarded: %s obj-klass: %s, marked: %s",
1819                 BOOL_TO_STR(_sh->in_collection_set(obj)),
1820                 BOOL_TO_STR(! oopDesc::unsafe_equals(obj, oopDesc::bs()->read_barrier(obj))),
1821                 obj->klass()->external_name(),
1822                 BOOL_TO_STR(_sh->is_marked_complete(obj))
1823                 );
1824       obj = oopDesc::bs()->read_barrier(obj);
1825       guarantee(! _sh->in_collection_set(obj), "forwarded oops must not point to dirty regions");
1826       guarantee(obj->is_oop(), "is_oop");
1827       guarantee(Metaspace::contains(obj->klass()), "klass pointer must go to metaspace");
1828     }
1829   }
1830 
1831   void do_oop(oop* p)       { do_oop_nv(p); }
1832   void do_oop(narrowOop* p) { do_oop_nv(p); }
1833 
1834 };
1835 
1836 void ShenandoahHeap::verify_heap_after_evacuation() {
1837 
1838   verify_heap_size_consistency();
1839 
1840   ensure_parsability(false);
1841 
1842   VerifyAfterEvacuationClosure cl;
1843   roots_iterate(&cl);
1844 
1845   ObjectToOopClosure objs(&cl);
1846   object_iterate(&objs);
1847 
1848 }
1849 
1850 void ShenandoahHeap::swap_mark_bitmaps() {
1851   // Swap bitmaps.
1852   CMBitMap* tmp1 = _complete_mark_bit_map;
1853   _complete_mark_bit_map = _next_mark_bit_map;
1854   _next_mark_bit_map = tmp1;
1855 
1856   // Swap top-at-mark-start pointers
1857   HeapWord** tmp2 = _complete_top_at_mark_starts;
1858   _complete_top_at_mark_starts = _next_top_at_mark_starts;
1859   _next_top_at_mark_starts = tmp2;
1860 
1861   HeapWord** tmp3 = _complete_top_at_mark_starts_base;
1862   _complete_top_at_mark_starts_base = _next_top_at_mark_starts_base;
1863   _next_top_at_mark_starts_base = tmp3;
1864 }
1865 
1866 class VerifyReachableHeapClosure : public ExtendedOopClosure {
1867 private:
1868   SCMObjToScanQueue* _queue;
1869   ShenandoahHeap* _heap;
1870   CMBitMap* _map;
1871   bool _check_matrix;
1872   oop _obj;
1873 public:
1874   VerifyReachableHeapClosure(SCMObjToScanQueue* queue, CMBitMap* map, bool check_matrix) :
1875           _queue(queue), _heap(ShenandoahHeap::heap()), _map(map), _check_matrix(check_matrix) {};
1876   template <class T>
1877   void do_oop_work(T* p) {
1878     T o = oopDesc::load_heap_oop(p);
1879     if (!oopDesc::is_null(o)) {
1880       oop obj = oopDesc::decode_heap_oop_not_null(o);
1881       guarantee(check_obj_alignment(obj), "sanity");
1882 
1883       guarantee(!oopDesc::is_null(obj), "sanity");
1884       guarantee(_heap->is_in(obj), "sanity");
1885 
1886       oop forw = BrooksPointer::forwardee(obj);
1887       guarantee(!oopDesc::is_null(forw), "sanity");
1888       guarantee(_heap->is_in(forw), "sanity");
1889 
1890       guarantee(oopDesc::unsafe_equals(obj, forw), "should not be forwarded");
1891 
1892       if (_check_matrix) {
1893         size_t from_idx = _heap->heap_region_index_containing(p);
1894         size_t to_idx = _heap->heap_region_index_containing(obj);
1895         if (!_heap->connection_matrix()->is_connected(from_idx, to_idx)) {
1896           tty->print_cr("from-obj: ");
1897           _obj->print_on(tty);
1898           tty->print_cr("to-obj:");
1899           obj->print_on(tty);
1900           tty->print_cr("from-obj allocated after mark: %s", BOOL_TO_STR(_heap->allocated_after_complete_mark_start((HeapWord*) _obj)));
1901           tty->print_cr("to-obj allocated after mark: %s", BOOL_TO_STR(_heap->allocated_after_complete_mark_start((HeapWord*) obj)));
1902           tty->print_cr("from-obj marked: %s", BOOL_TO_STR(_heap->is_marked_complete(_obj)));
1903           tty->print_cr("to-obj marked: %s", BOOL_TO_STR(_heap->is_marked_complete(obj)));
1904           tty->print_cr("from-idx: " SIZE_FORMAT ", to-idx: " SIZE_FORMAT, from_idx, to_idx);
1905 
1906           oop fwd_from = BrooksPointer::forwardee(_obj);
1907           oop fwd_to = BrooksPointer::forwardee(obj);
1908           tty->print_cr("from-obj forwardee: " PTR_FORMAT, p2i(fwd_from));
1909           tty->print_cr("to-obj forwardee: " PTR_FORMAT, p2i(fwd_to));
1910           tty->print_cr("forward(from-obj) marked: %s", BOOL_TO_STR(_heap->is_marked_complete(fwd_from)));
1911           tty->print_cr("forward(to-obj) marked: %s", BOOL_TO_STR(_heap->is_marked_complete(fwd_to)));
1912           size_t fwd_from_idx = _heap->heap_region_index_containing(fwd_from);
1913           size_t fwd_to_idx = _heap->heap_region_index_containing(fwd_to);
1914           tty->print_cr("forward(from-idx): " SIZE_FORMAT ", forward(to-idx): " SIZE_FORMAT, fwd_from_idx, fwd_to_idx);
1915           tty->print_cr("forward(from) connected with forward(to)? %s", BOOL_TO_STR(_heap->connection_matrix()->is_connected(fwd_from_idx, fwd_to_idx)));
1916         }
1917         guarantee(oopDesc::unsafe_equals(ShenandoahBarrierSet::resolve_oop_static_not_null(obj), obj), "polizeilich verboten");
1918         guarantee(_heap->connection_matrix()->is_connected(from_idx, to_idx), "must be connected");
1919       }
1920 
1921       if (_map->parMark((HeapWord*) obj)) {
1922         _queue->push(SCMTask(obj));
1923       }
1924     }
1925   }
1926 
1927   void do_oop(oop* p) { do_oop_work(p); }
1928   void do_oop(narrowOop* p) { do_oop_work(p); }
1929   void set_obj(oop o) { _obj = o; }
1930 };
1931 
1932 void ShenandoahHeap::verify_heap_reachable_at_safepoint() {
1933   guarantee(SafepointSynchronize::is_at_safepoint(), "only when nothing else happens");
1934   guarantee(ShenandoahVerify || (UseShenandoahMatrix && VerifyShenandoahMatrix),
1935             "only when these are enabled, and bitmap is initialized in ShenandoahHeap::initialize");
1936 
1937   OrderAccess::fence();
1938   ensure_parsability(false);
1939 
1940   // Allocate temporary bitmap for storing marking wavefront:
1941   MemRegion mr = MemRegion(_verification_bit_map.startWord(), _verification_bit_map.endWord());
1942   _verification_bit_map.clear_range_large(mr);
1943 
1944   // Initialize a single queue
1945   SCMObjToScanQueue* q = new SCMObjToScanQueue();
1946   q->initialize();
1947 
1948   // Scan root set
1949   ClassLoaderDataGraph::clear_claimed_marks();
1950   ShenandoahRootProcessor rp(this, 1);
1951 
1952   {
1953     VerifyReachableHeapClosure cl(q, &_verification_bit_map, false);
1954     CLDToOopClosure cld_cl(&cl);
1955     CodeBlobToOopClosure code_cl(&cl, ! CodeBlobToOopClosure::FixRelocations);
1956     rp.process_all_roots(&cl, &cl, &cld_cl, &code_cl, 0);
1957   }
1958 
1959   // Finish the scan
1960   {
1961     VerifyReachableHeapClosure cl(q, &_verification_bit_map, UseShenandoahMatrix && VerifyShenandoahMatrix);
1962     SCMTask task;
1963     while ((q->pop_buffer(task) ||
1964             q->pop_local(task) ||
1965             q->pop_overflow(task))) {
1966       oop obj = task.obj();
1967       assert(!oopDesc::is_null(obj), "must not be null");
1968       cl.set_obj(obj);
1969       obj->oop_iterate(&cl);
1970     }
1971   }
1972 
1973   // Clean up!
1974   delete(q);
1975 }
1976 
1977 void ShenandoahHeap::stop_concurrent_marking() {
1978   assert(concurrent_mark_in_progress(), "How else could we get here?");
1979   if (! cancelled_concgc()) {
1980     // If we needed to update refs, and concurrent marking has been cancelled,
1981     // we need to finish updating references.
1982     set_need_update_refs(false);
1983     swap_mark_bitmaps();
1984   }
1985   set_concurrent_mark_in_progress(false);
1986 
1987   if (log_is_enabled(Trace, gc, region)) {
1988     ResourceMark rm;
1989     outputStream* out = Log(gc, region)::trace_stream();
1990     print_heap_regions(out);
1991   }
1992 
1993 }
1994 
1995 void ShenandoahHeap::set_concurrent_mark_in_progress(bool in_progress) {
1996   _concurrent_mark_in_progress = in_progress ? 1 : 0;
1997   JavaThread::satb_mark_queue_set().set_active_all_threads(in_progress, !in_progress);
1998 }
1999 
2000 void ShenandoahHeap::set_evacuation_in_progress_concurrently(bool in_progress) {
2001   // Note: it is important to first release the _evacuation_in_progress flag here,
2002   // so that Java threads can get out of oom_during_evacuation() and reach a safepoint,
2003   // in case a VM task is pending.
2004   set_evacuation_in_progress(in_progress);
2005   MutexLocker mu(Threads_lock);
2006   JavaThread::set_evacuation_in_progress_all_threads(in_progress);
2007 }
2008 
2009 void ShenandoahHeap::set_evacuation_in_progress_at_safepoint(bool in_progress) {
2010   assert(SafepointSynchronize::is_at_safepoint(), "Only call this at safepoint");
2011   set_evacuation_in_progress(in_progress);
2012   JavaThread::set_evacuation_in_progress_all_threads(in_progress);
2013 }
2014 
2015 void ShenandoahHeap::set_evacuation_in_progress(bool in_progress) {
2016   _evacuation_in_progress = in_progress ? 1 : 0;
2017   OrderAccess::fence();
2018 }
2019 
2020 void ShenandoahHeap::verify_copy(oop p,oop c){
2021     assert(! oopDesc::unsafe_equals(p, oopDesc::bs()->read_barrier(p)), "forwarded correctly");
2022     assert(oopDesc::unsafe_equals(oopDesc::bs()->read_barrier(p), c), "verify pointer is correct");
2023     if (p->klass() != c->klass()) {
2024       print_heap_regions();
2025     }
2026     assert(p->klass() == c->klass(), "verify class p-size: "INT32_FORMAT" c-size: "INT32_FORMAT, p->size(), c->size());
2027     assert(p->size() == c->size(), "verify size");
2028     // Object may have been locked between copy and verification
2029     //    assert(p->mark() == c->mark(), "verify mark");
2030     assert(oopDesc::unsafe_equals(c, oopDesc::bs()->read_barrier(c)), "verify only forwarded once");
2031   }
2032 
2033 void ShenandoahHeap::oom_during_evacuation() {
2034   log_develop_trace(gc)("Out of memory during evacuation, cancel evacuation, schedule full GC by thread %d",
2035                         Thread::current()->osthread()->thread_id());
2036 
2037   // We ran out of memory during evacuation. Cancel evacuation, and schedule a full-GC.
2038   collector_policy()->set_should_clear_all_soft_refs(true);
2039   concurrent_thread()->try_set_full_gc();
2040   cancel_concgc(_oom_evacuation);
2041 
2042   if ((! Thread::current()->is_GC_task_thread()) && (! Thread::current()->is_ConcurrentGC_thread())) {
2043     assert(! Threads_lock->owned_by_self()
2044            || SafepointSynchronize::is_at_safepoint(), "must not hold Threads_lock here");
2045     log_warning(gc)("OOM during evacuation. Let Java thread wait until evacuation finishes.");
2046     while (_evacuation_in_progress) { // wait.
2047       Thread::current()->_ParkEvent->park(1);
2048     }
2049   }
2050 
2051 }
2052 
2053 HeapWord* ShenandoahHeap::tlab_post_allocation_setup(HeapWord* obj) {
2054   // Initialize Brooks pointer for the next object
2055   HeapWord* result = obj + BrooksPointer::word_size();
2056   BrooksPointer::initialize(oop(result));
2057   return result;
2058 }
2059 
2060 uint ShenandoahHeap::oop_extra_words() {
2061   return BrooksPointer::word_size();
2062 }
2063 
2064 void ShenandoahHeap::grow_heap_by(size_t num_regions) {
2065   size_t base = _num_regions;
2066   ensure_new_regions(num_regions);
2067   for (size_t i = 0; i < num_regions; i++) {
2068     size_t new_region_index = i + base;
2069     HeapWord* start = _first_region_bottom + (ShenandoahHeapRegion::region_size_bytes() / HeapWordSize) * new_region_index;
2070     ShenandoahHeapRegion* new_region = new ShenandoahHeapRegion(this, start, ShenandoahHeapRegion::region_size_bytes() / HeapWordSize, new_region_index);
2071 
2072     if (log_is_enabled(Trace, gc, region)) {
2073       ResourceMark rm;
2074       outputStream* out = Log(gc, region)::trace_stream();
2075       out->print_cr("allocating new region at index: "SIZE_FORMAT, new_region_index);
2076       new_region->print_on(out);
2077     }
2078 
2079     assert(_ordered_regions->active_regions() == new_region->region_number(), "must match");
2080     _ordered_regions->add_region(new_region);
2081     _in_cset_fast_test_base[new_region_index] = false; // Not in cset
2082     _next_top_at_mark_starts_base[new_region_index] = new_region->bottom();
2083     _complete_top_at_mark_starts_base[new_region_index] = new_region->bottom();
2084 
2085     _free_regions->add_region(new_region);
2086   }
2087 }
2088 
2089 void ShenandoahHeap::ensure_new_regions(size_t new_regions) {
2090 
2091   size_t num_regions = _num_regions;
2092   size_t new_num_regions = num_regions + new_regions;
2093   assert(new_num_regions <= _max_regions, "we checked this earlier");
2094 
2095   size_t expand_size = new_regions * ShenandoahHeapRegion::region_size_bytes();
2096   log_trace(gc, region)("expanding storage by "SIZE_FORMAT_HEX" bytes, for "SIZE_FORMAT" new regions", expand_size, new_regions);
2097   bool success = _storage.expand_by(expand_size, ShenandoahAlwaysPreTouch);
2098   assert(success, "should always be able to expand by requested size");
2099 
2100   _num_regions = new_num_regions;
2101 
2102 }
2103 
2104 ShenandoahForwardedIsAliveClosure::ShenandoahForwardedIsAliveClosure() :
2105   _heap(ShenandoahHeap::heap_no_check()) {
2106 }
2107 
2108 void ShenandoahForwardedIsAliveClosure::init(ShenandoahHeap* heap) {
2109   _heap = heap;
2110 }
2111 
2112 bool ShenandoahForwardedIsAliveClosure::do_object_b(oop obj) {
2113 
2114   assert(_heap != NULL, "sanity");
2115   obj = ShenandoahBarrierSet::resolve_oop_static_not_null(obj);
2116 #ifdef ASSERT
2117   if (_heap->concurrent_mark_in_progress()) {
2118     assert(oopDesc::unsafe_equals(obj, ShenandoahBarrierSet::resolve_oop_static_not_null(obj)), "only query to-space");
2119   }
2120 #endif
2121   assert(!oopDesc::is_null(obj), "null");
2122   return _heap->is_marked_next(obj);
2123 }
2124 
2125 void ShenandoahHeap::ref_processing_init() {
2126   MemRegion mr = reserved_region();
2127 
2128   isAlive.init(ShenandoahHeap::heap());
2129   assert(_max_workers > 0, "Sanity");
2130 
2131   _ref_processor =
2132     new ReferenceProcessor(mr,    // span
2133                            ParallelRefProcEnabled,
2134                            // mt processing
2135                            _max_workers,
2136                            // degree of mt processing
2137                            true,
2138                            // mt discovery
2139                            _max_workers,
2140                            // degree of mt discovery
2141                            false,
2142                            // Reference discovery is not atomic
2143                            &isAlive);
2144 }
2145 
2146 size_t ShenandoahHeap::num_regions() {
2147   return _num_regions;
2148 }
2149 
2150 size_t ShenandoahHeap::max_regions() {
2151   return _max_regions;
2152 }
2153 
2154 GCTracer* ShenandoahHeap::tracer() {
2155   return shenandoahPolicy()->tracer();
2156 }
2157 
2158 size_t ShenandoahHeap::tlab_used(Thread* thread) const {
2159   return _free_regions->used();
2160 }
2161 
2162 void ShenandoahHeap::cancel_concgc(GCCause::Cause cause) {
2163   if (try_cancel_concgc()) {
2164     log_info(gc)("Cancelling concurrent GC: %s", GCCause::to_string(cause));
2165     _shenandoah_policy->report_concgc_cancelled();
2166   }
2167 }
2168 
2169 void ShenandoahHeap::cancel_concgc(ShenandoahCancelCause cause) {
2170   if (try_cancel_concgc()) {
2171     log_info(gc)("Cancelling concurrent GC: %s", cancel_cause_to_string(cause));
2172     _shenandoah_policy->report_concgc_cancelled();
2173   }
2174 }
2175 
2176 const char* ShenandoahHeap::cancel_cause_to_string(ShenandoahCancelCause cause) {
2177   switch (cause) {
2178     case _oom_evacuation:
2179       return "Out of memory for evacuation";
2180     case _vm_stop:
2181       return "Stopping VM";
2182     default:
2183       return "Unknown";
2184   }
2185 }
2186 
2187 uint ShenandoahHeap::max_workers() {
2188   return _max_workers;
2189 }
2190 
2191 void ShenandoahHeap::stop() {
2192   // The shutdown sequence should be able to terminate when GC is running.
2193 
2194   // Step 1. Notify control thread that we are in shutdown.
2195   // Note that we cannot do that with stop(), because stop() is blocking and waits for the actual shutdown.
2196   // Doing stop() here would wait for the normal GC cycle to complete, never falling through to cancel below.
2197   _concurrent_gc_thread->prepare_for_graceful_shutdown();
2198 
2199   // Step 2. Notify GC workers that we are cancelling GC.
2200   cancel_concgc(_vm_stop);
2201 
2202   // Step 3. Wait until GC worker exits normally.
2203   _concurrent_gc_thread->stop();
2204 }
2205 
2206 void ShenandoahHeap::unload_classes_and_cleanup_tables() {
2207   ShenandoahForwardedIsAliveClosure is_alive;
2208   // Unload classes and purge SystemDictionary.
2209   bool purged_class = SystemDictionary::do_unloading(&is_alive, true);
2210   ParallelCleaningTask unlink_task(&is_alive, true, true, _workers->active_workers(), purged_class);
2211   _workers->run_task(&unlink_task);
2212   ClassLoaderDataGraph::purge();
2213 }
2214 
2215 void ShenandoahHeap::set_need_update_refs(bool need_update_refs) {
2216   _need_update_refs = need_update_refs;
2217 }
2218 
2219 //fixme this should be in heapregionset
2220 ShenandoahHeapRegion* ShenandoahHeap::next_compaction_region(const ShenandoahHeapRegion* r) {
2221   size_t region_idx = r->region_number() + 1;
2222   ShenandoahHeapRegion* next = _ordered_regions->get(region_idx);
2223   guarantee(next->region_number() == region_idx, "region number must match");
2224   while (next->is_humongous()) {
2225     region_idx = next->region_number() + 1;
2226     next = _ordered_regions->get(region_idx);
2227     guarantee(next->region_number() == region_idx, "region number must match");
2228   }
2229   return next;
2230 }
2231 
2232 void ShenandoahHeap::set_region_in_collection_set(size_t region_index, bool b) {
2233   _in_cset_fast_test_base[region_index] = b;
2234 }
2235 
2236 ShenandoahMonitoringSupport* ShenandoahHeap::monitoring_support() {
2237   return _monitoring_support;
2238 }
2239 
2240 CMBitMap* ShenandoahHeap::complete_mark_bit_map() {
2241   return _complete_mark_bit_map;
2242 }
2243 
2244 CMBitMap* ShenandoahHeap::next_mark_bit_map() {
2245   return _next_mark_bit_map;
2246 }
2247 
2248 void ShenandoahHeap::add_free_region(ShenandoahHeapRegion* r) {
2249   _free_regions->add_region(r);
2250 }
2251 
2252 void ShenandoahHeap::clear_free_regions() {
2253   _free_regions->clear();
2254 }
2255 
2256 address ShenandoahHeap::in_cset_fast_test_addr() {
2257   return (address) (ShenandoahHeap::heap()->_in_cset_fast_test);
2258 }
2259 
2260 address ShenandoahHeap::cancelled_concgc_addr() {
2261   return (address) &(ShenandoahHeap::heap()->_cancelled_concgc);
2262 }
2263 
2264 void ShenandoahHeap::clear_cset_fast_test() {
2265   assert(_in_cset_fast_test_base != NULL, "sanity");
2266   memset(_in_cset_fast_test_base, false,
2267          _in_cset_fast_test_length * sizeof(bool));
2268 }
2269 
2270 size_t ShenandoahHeap::conservative_max_heap_alignment() {
2271   return ShenandoahMaxRegionSize;
2272 }
2273 
2274 size_t ShenandoahHeap::bytes_allocated_since_cm() {
2275   return _bytes_allocated_since_cm;
2276 }
2277 
2278 void ShenandoahHeap::set_bytes_allocated_since_cm(size_t bytes) {
2279   _bytes_allocated_since_cm = bytes;
2280 }
2281 
2282 void ShenandoahHeap::set_next_top_at_mark_start(HeapWord* region_base, HeapWord* addr) {
2283   uintx index = ((uintx) region_base) >> ShenandoahHeapRegion::region_size_shift();
2284   _next_top_at_mark_starts[index] = addr;
2285 }
2286 
2287 HeapWord* ShenandoahHeap::next_top_at_mark_start(HeapWord* region_base) {
2288   uintx index = ((uintx) region_base) >> ShenandoahHeapRegion::region_size_shift();
2289   return _next_top_at_mark_starts[index];
2290 }
2291 
2292 void ShenandoahHeap::set_complete_top_at_mark_start(HeapWord* region_base, HeapWord* addr) {
2293   uintx index = ((uintx) region_base) >> ShenandoahHeapRegion::region_size_shift();
2294   _complete_top_at_mark_starts[index] = addr;
2295 }
2296 
2297 HeapWord* ShenandoahHeap::complete_top_at_mark_start(HeapWord* region_base) {
2298   uintx index = ((uintx) region_base) >> ShenandoahHeapRegion::region_size_shift();
2299   return _complete_top_at_mark_starts[index];
2300 }
2301 
2302 void ShenandoahHeap::set_full_gc_in_progress(bool in_progress) {
2303   _full_gc_in_progress = in_progress;
2304 }
2305 
2306 bool ShenandoahHeap::is_full_gc_in_progress() const {
2307   return _full_gc_in_progress;
2308 }
2309 
2310 void ShenandoahHeap::set_update_refs_in_progress(bool in_progress) {
2311   _update_refs_in_progress = in_progress;
2312 }
2313 
2314 bool ShenandoahHeap::is_update_refs_in_progress() const {
2315   return _update_refs_in_progress;
2316 }
2317 
2318 class NMethodOopInitializer : public OopClosure {
2319 private:
2320   ShenandoahHeap* _heap;
2321 public:
2322   NMethodOopInitializer() : _heap(ShenandoahHeap::heap()) {
2323   }
2324 
2325 private:
2326   template <class T>
2327   inline void do_oop_work(T* p) {
2328     T o = oopDesc::load_heap_oop(p);
2329     if (! oopDesc::is_null(o)) {
2330       oop obj1 = oopDesc::decode_heap_oop_not_null(o);
2331       oop obj2 = oopDesc::bs()->write_barrier(obj1);
2332       if (! oopDesc::unsafe_equals(obj1, obj2)) {
2333         oopDesc::encode_store_heap_oop(p, obj2);
2334       }
2335     }
2336   }
2337 
2338 public:
2339   void do_oop(oop* o) {
2340     do_oop_work(o);
2341   }
2342   void do_oop(narrowOop* o) {
2343     do_oop_work(o);
2344   }
2345 };
2346 
2347 void ShenandoahHeap::register_nmethod(nmethod* nm) {
2348   NMethodOopInitializer init;
2349   nm->oops_do(&init);
2350   nm->fix_oop_relocations();
2351 }
2352 
2353 void ShenandoahHeap::unregister_nmethod(nmethod* nm) {
2354 }
2355 
2356 void ShenandoahHeap::pin_object(oop o) {
2357   heap_region_containing(o)->pin();
2358 }
2359 
2360 void ShenandoahHeap::unpin_object(oop o) {
2361   heap_region_containing(o)->unpin();
2362 }
2363 
2364 
2365 GCTimer* ShenandoahHeap::gc_timer() const {
2366   return _gc_timer;
2367 }
2368 
2369 class ShenandoahCountGarbageClosure : public ShenandoahHeapRegionClosure {
2370 private:
2371   size_t _garbage;
2372 public:
2373   ShenandoahCountGarbageClosure() : _garbage(0) {
2374   }
2375 
2376   bool doHeapRegion(ShenandoahHeapRegion* r) {
2377     if (! r->is_humongous() && ! r->is_pinned() && ! r->in_collection_set()) {
2378       _garbage += r->garbage();
2379     }
2380     return false;
2381   }
2382 
2383   size_t garbage() {
2384     return _garbage;
2385   }
2386 };
2387 
2388 size_t ShenandoahHeap::garbage() {
2389   ShenandoahCountGarbageClosure cl;
2390   heap_region_iterate(&cl);
2391   return cl.garbage();
2392 }
2393 
2394 ShenandoahConnectionMatrix* ShenandoahHeap::connection_matrix() {
2395   return _connection_matrix;
2396 }
2397 
2398 ShenandoahPartialGC* ShenandoahHeap::partial_gc() {
2399   return _partial_gc;
2400 }
2401 
2402 void ShenandoahHeap::do_partial_collection() {
2403   partial_gc()->do_partial_collection();
2404 }
2405 
2406 template<class T>
2407 class ShenandoahUpdateHeapRefsTask : public AbstractGangTask {
2408 private:
2409   T cl;
2410   ShenandoahHeap* _heap;
2411   ShenandoahHeapRegionSet* _regions;
2412 
2413 public:
2414   ShenandoahUpdateHeapRefsTask() :
2415     AbstractGangTask("Concurrent Update References Task"),
2416     cl(T()),
2417     _heap(ShenandoahHeap::heap()),
2418     _regions(ShenandoahHeap::heap()->regions()) {
2419     _regions->clear_current_index();
2420   }
2421 
2422   void work(uint worker_id) {
2423     ShenandoahHeapRegion* r = _regions->claim_next();
2424     while (r != NULL && ! _heap->cancelled_concgc()) {
2425       if (! _heap->in_collection_set(r) &&
2426           ! r->is_empty()) {
2427         _heap->marked_object_oop_safe_iterate(r, &cl);
2428       } else if (_heap->in_collection_set(r)) {
2429         HeapWord* bottom = r->bottom();
2430         HeapWord* top = _heap->complete_top_at_mark_start(r->bottom());
2431         if (top > bottom) {
2432           _heap->complete_mark_bit_map()->clear_range_large(MemRegion(bottom, top));
2433         }
2434       }
2435       r = _regions->claim_next();
2436     }
2437   }
2438 };
2439 
2440 void ShenandoahHeap::concurrent_update_heap_references() {
2441   _shenandoah_policy->record_phase_start(ShenandoahCollectorPolicy::conc_update_refs);
2442   if (UseShenandoahMatrix) {
2443     ShenandoahUpdateHeapRefsTask<ShenandoahUpdateHeapRefsMatrixClosure> task;
2444     workers()->run_task(&task);
2445   } else {
2446     ShenandoahUpdateHeapRefsTask<ShenandoahUpdateHeapRefsClosure> task;
2447     workers()->run_task(&task);
2448   }
2449   _shenandoah_policy->record_phase_end(ShenandoahCollectorPolicy::conc_update_refs);
2450 }
2451 
2452 void ShenandoahHeap::prepare_update_refs() {
2453   assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint");
2454   set_evacuation_in_progress_at_safepoint(false);
2455   set_update_refs_in_progress(true);
2456   ensure_parsability(true);
2457   connection_matrix()->clear_all();
2458   for (uint i = 0; i < _num_regions; i++) {
2459     ShenandoahHeapRegion* r = _ordered_regions->get(i);
2460     r->set_concurrent_iteration_safe_limit(r->top());
2461   }
2462 }
2463 
2464 void ShenandoahHeap::finish_update_refs() {
2465   assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint");
2466 
2467   if (! cancelled_concgc()) {
2468     concurrentMark()->update_roots();
2469     recycle_dirty_regions();
2470     set_need_update_refs(false);
2471 
2472     if (ShenandoahVerify) {
2473       verify_update_refs();
2474     }
2475 
2476     {
2477       // Rebuild the free set
2478       ShenandoahHeapLock hl(this);
2479       _free_regions->clear();
2480       size_t end = _ordered_regions->active_regions();
2481       for (size_t i = 0; i < end; i++) {
2482         ShenandoahHeapRegion* r = _ordered_regions->get(i);
2483         if (!r->is_humongous()) {
2484           assert (!in_collection_set(r), "collection set should be clear");
2485           _free_regions->add_region(r);
2486         }
2487       }
2488     }
2489   }
2490   set_update_refs_in_progress(false);
2491 }
2492 
2493 class ShenandoahVerifyUpdateRefsClosure : public ExtendedOopClosure {
2494 private:
2495   template <class T>
2496   void do_oop_work(T* p) {
2497     T o = oopDesc::load_heap_oop(p);
2498     if (! oopDesc::is_null(o)) {
2499       oop obj = oopDesc::decode_heap_oop_not_null(o);
2500       guarantee(oopDesc::unsafe_equals(obj, ShenandoahBarrierSet::resolve_oop_static_not_null(obj)),
2501                 "must not be forwarded");
2502     }
2503   }
2504 public:
2505   void do_oop(oop* p) { do_oop_work(p); }
2506   void do_oop(narrowOop* p) { do_oop_work(p); }
2507 };
2508 
2509 void ShenandoahHeap::verify_update_refs() {
2510 
2511   ensure_parsability(false);
2512 
2513   ShenandoahVerifyUpdateRefsClosure cl;
2514 
2515   // Verify roots.
2516   {
2517     CodeBlobToOopClosure blobsCl(&cl, false);
2518     CLDToOopClosure cldCl(&cl);
2519     ClassLoaderDataGraph::clear_claimed_marks();
2520     ShenandoahRootProcessor rp(this, 1);
2521     rp.process_all_roots(&cl, &cl, &cldCl, &blobsCl, 0);
2522   }
2523 
2524   // Verify heap.
2525   for (uint i = 0; i < num_regions(); i++) {
2526     ShenandoahHeapRegion* r = regions()->get(i);
2527     marked_object_oop_iterate(r, &cl);
2528   }
2529 }
2530 
2531 #ifdef ASSERT
2532 void ShenandoahHeap::assert_heaplock_owned_by_current_thread() {
2533   assert(_heap_lock == locked, "must be locked");
2534   assert(_heap_lock_owner == Thread::current(), "must be owned by current thread");
2535 }
2536 
2537 void ShenandoahHeap::assert_heaplock_or_safepoint() {
2538   Thread* thr = Thread::current();
2539   assert((_heap_lock == locked && _heap_lock_owner == thr) ||
2540          (SafepointSynchronize::is_at_safepoint() && thr->is_VM_thread()),
2541   "must own heap lock or by VM thread at safepoint");
2542 }
2543 
2544 #endif