1 /*
   2  * Copyright (c) 2013, 2015, Red Hat, Inc. and/or its affiliates.
   3  *
   4  * This code is free software; you can redistribute it and/or modify it
   5  * under the terms of the GNU General Public License version 2 only, as
   6  * published by the Free Software Foundation.
   7  *
   8  * This code is distributed in the hope that it will be useful, but WITHOUT
   9  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  10  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  11  * version 2 for more details (a copy is included in the LICENSE file that
  12  * accompanied this code).
  13  *
  14  * You should have received a copy of the GNU General Public License version
  15  * 2 along with this work; if not, write to the Free Software Foundation,
  16  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  17  *
  18  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  19  * or visit www.oracle.com if you need additional information or have any
  20  * questions.
  21  *
  22  */
  23 
  24 #include "memory/allocation.hpp"
  25 
  26 #include "gc/shared/gcTimer.hpp"
  27 #include "gc/shared/gcTraceTime.inline.hpp"
  28 #include "gc/shared/parallelCleaning.hpp"
  29 
  30 #include "gc/shenandoah/brooksPointer.hpp"
  31 #include "gc/shenandoah/shenandoahBarrierSet.hpp"
  32 #include "gc/shenandoah/shenandoahCollectionSet.hpp"
  33 #include "gc/shenandoah/shenandoahCollectorPolicy.hpp"
  34 #include "gc/shenandoah/shenandoahConcurrentMark.hpp"
  35 #include "gc/shenandoah/shenandoahConcurrentMark.inline.hpp"
  36 #include "gc/shenandoah/shenandoahConcurrentThread.hpp"
  37 #include "gc/shenandoah/shenandoahFreeSet.hpp"
  38 #include "gc/shenandoah/shenandoahHeap.inline.hpp"
  39 #include "gc/shenandoah/shenandoahHeapRegion.hpp"
  40 #include "gc/shenandoah/shenandoahHeapRegionSet.hpp"
  41 #include "gc/shenandoah/shenandoahHumongous.hpp"
  42 #include "gc/shenandoah/shenandoahMarkCompact.hpp"
  43 #include "gc/shenandoah/shenandoahMonitoringSupport.hpp"
  44 #include "gc/shenandoah/shenandoahPartialGC.hpp"
  45 #include "gc/shenandoah/shenandoahRootProcessor.hpp"
  46 #include "gc/shenandoah/vm_operations_shenandoah.hpp"
  47 
  48 #include "runtime/vmThread.hpp"
  49 #include "services/mallocTracker.hpp"
  50 
  51 SCMUpdateRefsClosure::SCMUpdateRefsClosure() : _heap(ShenandoahHeap::heap()) {}
  52 
  53 #ifdef ASSERT
  54 template <class T>
  55 void AssertToSpaceClosure::do_oop_nv(T* p) {
  56   T o = oopDesc::load_heap_oop(p);
  57   if (! oopDesc::is_null(o)) {
  58     oop obj = oopDesc::decode_heap_oop_not_null(o);
  59     assert(oopDesc::unsafe_equals(obj, ShenandoahBarrierSet::resolve_oop_static_not_null(obj)),
  60            "need to-space object here obj: "PTR_FORMAT" , rb(obj): "PTR_FORMAT", p: "PTR_FORMAT,
  61            p2i(obj), p2i(ShenandoahBarrierSet::resolve_oop_static_not_null(obj)), p2i(p));
  62   }
  63 }
  64 
  65 void AssertToSpaceClosure::do_oop(narrowOop* p) { do_oop_nv(p); }
  66 void AssertToSpaceClosure::do_oop(oop* p)       { do_oop_nv(p); }
  67 #endif
  68 
  69 const char* ShenandoahHeap::name() const {
  70   return "Shenandoah";
  71 }
  72 
  73 void ShenandoahHeap::print_heap_locations(HeapWord* start, HeapWord* end) {
  74   HeapWord* cur = NULL;
  75   for (cur = start; cur < end; cur++) {
  76     tty->print_cr(PTR_FORMAT" : "PTR_FORMAT, p2i(cur), p2i(*((HeapWord**) cur)));
  77   }
  78 }
  79 
  80 class PrintHeapRegionsClosure : public
  81    ShenandoahHeapRegionClosure {
  82 private:
  83   outputStream* _st;
  84 public:
  85   PrintHeapRegionsClosure() : _st(tty) {}
  86   PrintHeapRegionsClosure(outputStream* st) : _st(st) {}
  87 
  88   bool doHeapRegion(ShenandoahHeapRegion* r) {
  89     r->print_on(_st);
  90     return false;
  91   }
  92 };
  93 
  94 class ShenandoahPretouchTask : public AbstractGangTask {
  95 private:
  96   ShenandoahHeapRegionSet* _regions;
  97   const size_t _bitmap_size;
  98   const size_t _page_size;
  99   char* _bitmap0_base;
 100   char* _bitmap1_base;
 101 public:
 102   ShenandoahPretouchTask(ShenandoahHeapRegionSet* regions,
 103                          char* bitmap0_base, char* bitmap1_base, size_t bitmap_size,
 104                          size_t page_size) :
 105     AbstractGangTask("Shenandoah PreTouch",
 106                      Universe::is_fully_initialized() ? GCId::current_raw() :
 107                                                         // During VM initialization there is
 108                                                         // no GC cycle that this task can be
 109                                                         // associated with.
 110                                                         GCId::undefined()),
 111     _bitmap0_base(bitmap0_base),
 112     _bitmap1_base(bitmap1_base),
 113     _regions(regions),
 114     _bitmap_size(bitmap_size),
 115     _page_size(page_size) {
 116     _regions->clear_current_index();
 117   };
 118 
 119   virtual void work(uint worker_id) {
 120     ShenandoahHeapRegion* r = _regions->claim_next();
 121     while (r != NULL) {
 122       log_trace(gc, heap)("Pretouch region " SIZE_FORMAT ": " PTR_FORMAT " -> " PTR_FORMAT,
 123                           r->region_number(), p2i(r->bottom()), p2i(r->end()));
 124       os::pretouch_memory(r->bottom(), r->end(), _page_size);
 125 
 126       size_t start = r->region_number()       * ShenandoahHeapRegion::RegionSizeBytes / CMBitMap::heap_map_factor();
 127       size_t end   = (r->region_number() + 1) * ShenandoahHeapRegion::RegionSizeBytes / CMBitMap::heap_map_factor();
 128       assert (end <= _bitmap_size, "end is sane: " SIZE_FORMAT " < " SIZE_FORMAT, end, _bitmap_size);
 129 
 130       log_trace(gc, heap)("Pretouch bitmap under region " SIZE_FORMAT ": " PTR_FORMAT " -> " PTR_FORMAT,
 131                           r->region_number(), p2i(_bitmap0_base + start), p2i(_bitmap0_base + end));
 132       os::pretouch_memory(_bitmap0_base + start, _bitmap0_base + end, _page_size);
 133 
 134       log_trace(gc, heap)("Pretouch bitmap under region " SIZE_FORMAT ": " PTR_FORMAT " -> " PTR_FORMAT,
 135                           r->region_number(), p2i(_bitmap1_base + start), p2i(_bitmap1_base + end));
 136       os::pretouch_memory(_bitmap1_base + start, _bitmap1_base + end, _page_size);
 137 
 138       r = _regions->claim_next();
 139     }
 140   }
 141 };
 142 
 143 jint ShenandoahHeap::initialize() {
 144   CollectedHeap::pre_initialize();
 145 
 146   size_t init_byte_size = collector_policy()->initial_heap_byte_size();
 147   size_t max_byte_size = collector_policy()->max_heap_byte_size();
 148 
 149   Universe::check_alignment(max_byte_size,
 150                             ShenandoahHeapRegion::RegionSizeBytes,
 151                             "shenandoah heap");
 152   Universe::check_alignment(init_byte_size,
 153                             ShenandoahHeapRegion::RegionSizeBytes,
 154                             "shenandoah heap");
 155 
 156   ReservedSpace heap_rs = Universe::reserve_heap(max_byte_size,
 157                                                  Arguments::conservative_max_heap_alignment());
 158   initialize_reserved_region((HeapWord*)heap_rs.base(), (HeapWord*) (heap_rs.base() + heap_rs.size()));
 159 
 160   set_barrier_set(new ShenandoahBarrierSet(this));
 161   ReservedSpace pgc_rs = heap_rs.first_part(max_byte_size);
 162   _storage.initialize(pgc_rs, init_byte_size);
 163 
 164   _num_regions = init_byte_size / ShenandoahHeapRegion::RegionSizeBytes;
 165   _max_regions = max_byte_size / ShenandoahHeapRegion::RegionSizeBytes;
 166   _initialSize = _num_regions * ShenandoahHeapRegion::RegionSizeBytes;
 167   size_t regionSizeWords = ShenandoahHeapRegion::RegionSizeBytes / HeapWordSize;
 168   assert(init_byte_size == _initialSize, "tautology");
 169   _ordered_regions = new ShenandoahHeapRegionSet(_max_regions);
 170   _collection_set = new ShenandoahCollectionSet(_max_regions);
 171   _free_regions = new ShenandoahFreeSet(_max_regions);
 172 
 173   // Initialize fast collection set test structure.
 174   _in_cset_fast_test_length = _max_regions;
 175   _in_cset_fast_test_base =
 176                    NEW_C_HEAP_ARRAY(bool, _in_cset_fast_test_length, mtGC);
 177   _in_cset_fast_test = _in_cset_fast_test_base -
 178                ((uintx) pgc_rs.base() >> ShenandoahHeapRegion::RegionSizeShift);
 179 
 180   _next_top_at_mark_starts_base =
 181                    NEW_C_HEAP_ARRAY(HeapWord*, _max_regions, mtGC);
 182   _next_top_at_mark_starts = _next_top_at_mark_starts_base -
 183                ((uintx) pgc_rs.base() >> ShenandoahHeapRegion::RegionSizeShift);
 184 
 185   _complete_top_at_mark_starts_base =
 186                    NEW_C_HEAP_ARRAY(HeapWord*, _max_regions, mtGC);
 187   _complete_top_at_mark_starts = _complete_top_at_mark_starts_base -
 188                ((uintx) pgc_rs.base() >> ShenandoahHeapRegion::RegionSizeShift);
 189 
 190   size_t i = 0;
 191   for (i = 0; i < _num_regions; i++) {
 192     _in_cset_fast_test_base[i] = false; // Not in cset
 193     HeapWord* bottom = (HeapWord*) pgc_rs.base() + regionSizeWords * i;
 194     _complete_top_at_mark_starts_base[i] = bottom;
 195     _next_top_at_mark_starts_base[i] = bottom;
 196   }
 197 
 198   {
 199     ShenandoahHeapLock lock(this);
 200     for (i = 0; i < _num_regions; i++) {
 201       ShenandoahHeapRegion* current = new ShenandoahHeapRegion(this, (HeapWord*) pgc_rs.base() +
 202                                                                regionSizeWords * i, regionSizeWords, i);
 203       _free_regions->add_region(current);
 204       _ordered_regions->add_region(current);
 205     }
 206   }
 207   assert(((size_t) _ordered_regions->active_regions()) == _num_regions, "");
 208   _first_region = _ordered_regions->get(0);
 209   _first_region_bottom = _first_region->bottom();
 210   assert((((size_t) _first_region_bottom) &
 211           (ShenandoahHeapRegion::RegionSizeBytes - 1)) == 0,
 212          "misaligned heap: "PTR_FORMAT, p2i(_first_region_bottom));
 213 
 214   _numAllocs = 0;
 215 
 216   if (log_is_enabled(Trace, gc, region)) {
 217     ResourceMark rm;
 218     outputStream* out = Log(gc, region)::trace_stream();
 219     log_trace(gc, region)("All Regions");
 220     _ordered_regions->print(out);
 221     log_trace(gc, region)("Free Regions");
 222     _free_regions->print(out);
 223   }
 224 
 225   // The call below uses stuff (the SATB* things) that are in G1, but probably
 226   // belong into a shared location.
 227   JavaThread::satb_mark_queue_set().initialize(SATB_Q_CBL_mon,
 228                                                SATB_Q_FL_lock,
 229                                                20 /*G1SATBProcessCompletedThreshold */,
 230                                                Shared_SATB_Q_lock);
 231 
 232   // Reserve space for prev and next bitmap.
 233   _bitmap_size = CMBitMap::compute_size(heap_rs.size());
 234   _heap_region = MemRegion((HeapWord*) heap_rs.base(), heap_rs.size() / HeapWordSize);
 235 
 236   size_t page_size = UseLargePages ? (size_t)os::large_page_size() : (size_t)os::vm_page_size();
 237 
 238   ReservedSpace bitmap0(_bitmap_size, page_size);
 239   os::commit_memory_or_exit(bitmap0.base(), bitmap0.size(), false, "couldn't allocate mark bitmap");
 240   MemTracker::record_virtual_memory_type(bitmap0.base(), mtGC);
 241   MemRegion bitmap_region0 = MemRegion((HeapWord*) bitmap0.base(), bitmap0.size() / HeapWordSize);
 242 
 243   ReservedSpace bitmap1(_bitmap_size, page_size);
 244   os::commit_memory_or_exit(bitmap1.base(), bitmap1.size(), false, "couldn't allocate mark bitmap");
 245   MemTracker::record_virtual_memory_type(bitmap1.base(), mtGC);
 246   MemRegion bitmap_region1 = MemRegion((HeapWord*) bitmap1.base(), bitmap1.size() / HeapWordSize);
 247 
 248   if (ShenandoahAlwaysPreTouch) {
 249     assert (!AlwaysPreTouch, "Should have been overridden");
 250 
 251     // For NUMA, it is important to pre-touch the storage under bitmaps with worker threads,
 252     // before initialize() below zeroes it with initializing thread. For any given region,
 253     // we touch the region and the corresponding bitmaps from the same thread.
 254 
 255     log_info(gc, heap)("Parallel pretouch " SIZE_FORMAT " regions with " SIZE_FORMAT " byte pages",
 256                        _ordered_regions->count(), page_size);
 257     ShenandoahPretouchTask cl(_ordered_regions, bitmap0.base(), bitmap1.base(), _bitmap_size, page_size);
 258     _workers->run_task(&cl);
 259   }
 260 
 261   _mark_bit_map0.initialize(_heap_region, bitmap_region0);
 262   _complete_mark_bit_map = &_mark_bit_map0;
 263 
 264   _mark_bit_map1.initialize(_heap_region, bitmap_region1);
 265   _next_mark_bit_map = &_mark_bit_map1;
 266 
 267   _connection_matrix = new ShenandoahConnectionMatrix(_max_regions);
 268   _partial_gc = new ShenandoahPartialGC(this, _max_regions);
 269 
 270   _monitoring_support = new ShenandoahMonitoringSupport(this);
 271 
 272   _concurrent_gc_thread = new ShenandoahConcurrentThread();
 273 
 274   ShenandoahMarkCompact::initialize();
 275 
 276   return JNI_OK;
 277 }
 278 
 279 ShenandoahHeap::ShenandoahHeap(ShenandoahCollectorPolicy* policy) :
 280   CollectedHeap(),
 281   _shenandoah_policy(policy),
 282   _concurrent_mark_in_progress(0),
 283   _evacuation_in_progress(0),
 284   _full_gc_in_progress(false),
 285   _free_regions(NULL),
 286   _collection_set(NULL),
 287   _bytes_allocated_since_cm(0),
 288   _bytes_allocated_during_cm(0),
 289   _max_allocated_gc(0),
 290   _allocated_last_gc(0),
 291   _used_start_gc(0),
 292   _max_workers(MAX2(ConcGCThreads, ParallelGCThreads)),
 293   _ref_processor(NULL),
 294   _in_cset_fast_test(NULL),
 295   _in_cset_fast_test_base(NULL),
 296   _next_top_at_mark_starts(NULL),
 297   _next_top_at_mark_starts_base(NULL),
 298   _complete_top_at_mark_starts(NULL),
 299   _complete_top_at_mark_starts_base(NULL),
 300   _mark_bit_map0(),
 301   _mark_bit_map1(),
 302   _connection_matrix(NULL),
 303   _cancelled_concgc(false),
 304   _need_update_refs(false),
 305   _need_reset_bitmaps(false),
 306   _heap_lock(0),
 307 #ifdef ASSERT
 308   _heap_lock_owner(NULL),
 309 #endif
 310   _gc_timer(new (ResourceObj::C_HEAP, mtGC) ConcurrentGCTimer())
 311 
 312 {
 313   log_info(gc, init)("Parallel GC threads: "UINT32_FORMAT, ParallelGCThreads);
 314   log_info(gc, init)("Concurrent GC threads: "UINT32_FORMAT, ConcGCThreads);
 315   log_info(gc, init)("Parallel reference processing enabled: %s", BOOL_TO_STR(ParallelRefProcEnabled));
 316 
 317   _scm = new ShenandoahConcurrentMark();
 318   _used = 0;
 319 
 320   _max_workers = MAX2(_max_workers, 1U);
 321   _workers = new ShenandoahWorkGang("Shenandoah GC Threads", _max_workers,
 322                             /* are_GC_task_threads */true,
 323                             /* are_ConcurrentGC_threads */false);
 324   if (_workers == NULL) {
 325     vm_exit_during_initialization("Failed necessary allocation.");
 326   } else {
 327     _workers->initialize_workers();
 328   }
 329 }
 330 
 331 class ResetNextBitmapTask : public AbstractGangTask {
 332 private:
 333   ShenandoahHeapRegionSet* _regions;
 334 
 335 public:
 336   ResetNextBitmapTask(ShenandoahHeapRegionSet* regions) :
 337     AbstractGangTask("Parallel Reset Bitmap Task"),
 338     _regions(regions) {
 339     _regions->clear_current_index();
 340   }
 341 
 342   void work(uint worker_id) {
 343     ShenandoahHeapRegion* region = _regions->claim_next();
 344     ShenandoahHeap* heap = ShenandoahHeap::heap();
 345     while (region != NULL) {
 346       HeapWord* bottom = region->bottom();
 347       HeapWord* top = heap->next_top_at_mark_start(region->bottom());
 348       if (top > bottom) {
 349         heap->next_mark_bit_map()->clear_range_large(MemRegion(bottom, top));
 350       }
 351       region = _regions->claim_next();
 352     }
 353   }
 354 };
 355 
 356 void ShenandoahHeap::reset_next_mark_bitmap(WorkGang* workers) {
 357   ResetNextBitmapTask task = ResetNextBitmapTask(_ordered_regions);
 358   workers->run_task(&task);
 359 }
 360 
 361 class ResetCompleteBitmapTask : public AbstractGangTask {
 362 private:
 363   ShenandoahHeapRegionSet* _regions;
 364 
 365 public:
 366   ResetCompleteBitmapTask(ShenandoahHeapRegionSet* regions) :
 367     AbstractGangTask("Parallel Reset Bitmap Task"),
 368     _regions(regions) {
 369     _regions->clear_current_index();
 370   }
 371 
 372   void work(uint worker_id) {
 373     ShenandoahHeapRegion* region = _regions->claim_next();
 374     ShenandoahHeap* heap = ShenandoahHeap::heap();
 375     while (region != NULL) {
 376       HeapWord* bottom = region->bottom();
 377       HeapWord* top = heap->complete_top_at_mark_start(region->bottom());
 378       if (top > bottom) {
 379         heap->complete_mark_bit_map()->clear_range_large(MemRegion(bottom, top));
 380       }
 381       region = _regions->claim_next();
 382     }
 383   }
 384 };
 385 
 386 void ShenandoahHeap::reset_complete_mark_bitmap(WorkGang* workers) {
 387   ResetCompleteBitmapTask task = ResetCompleteBitmapTask(_ordered_regions);
 388   workers->run_task(&task);
 389 }
 390 
 391 bool ShenandoahHeap::is_next_bitmap_clear() {
 392   HeapWord* start = _ordered_regions->bottom();
 393   HeapWord* end = _ordered_regions->end();
 394   return _next_mark_bit_map->getNextMarkedWordAddress(start, end) == end;
 395 }
 396 
 397 bool ShenandoahHeap::is_complete_bitmap_clear_range(HeapWord* start, HeapWord* end) {
 398   return _complete_mark_bit_map->getNextMarkedWordAddress(start, end) == end;
 399 }
 400 
 401 void ShenandoahHeap::print_on(outputStream* st) const {
 402   st->print("Shenandoah Heap");
 403   st->print(" total = " SIZE_FORMAT " K, used " SIZE_FORMAT " K ", capacity()/ K, used() /K);
 404   st->print(" [" PTR_FORMAT ", " PTR_FORMAT ") ",
 405             p2i(reserved_region().start()),
 406             p2i(reserved_region().end()));
 407   st->print("Region size = " SIZE_FORMAT "K ", ShenandoahHeapRegion::RegionSizeBytes / K);
 408   if (_concurrent_mark_in_progress) {
 409     st->print("marking ");
 410   }
 411   if (_evacuation_in_progress) {
 412     st->print("evacuating ");
 413   }
 414   if (cancelled_concgc()) {
 415     st->print("cancelled ");
 416   }
 417   st->print("\n");
 418 
 419   // Adapted from VirtualSpace::print_on(), which is non-PRODUCT only
 420   st->print   ("Virtual space:");
 421   if (_storage.special()) st->print(" (pinned in memory)");
 422   st->cr();
 423   st->print_cr(" - committed: " SIZE_FORMAT, _storage.committed_size());
 424   st->print_cr(" - reserved:  " SIZE_FORMAT, _storage.reserved_size());
 425   st->print_cr(" - [low, high]:     [" INTPTR_FORMAT ", " INTPTR_FORMAT "]",  p2i(_storage.low()), p2i(_storage.high()));
 426   st->print_cr(" - [low_b, high_b]: [" INTPTR_FORMAT ", " INTPTR_FORMAT "]",  p2i(_storage.low_boundary()), p2i(_storage.high_boundary()));
 427 
 428   if (Verbose) {
 429     print_heap_regions(st);
 430   }
 431 }
 432 
 433 class InitGCLABClosure : public ThreadClosure {
 434 public:
 435   void do_thread(Thread* thread) {
 436     thread->gclab().initialize(true);
 437   }
 438 };
 439 
 440 void ShenandoahHeap::post_initialize() {
 441   if (UseTLAB) {
 442     // This is a very tricky point in VM lifetime. We cannot easily call Threads::threads_do
 443     // here, because some system threads (VMThread, WatcherThread, etc) are not yet available.
 444     // Their initialization should be handled separately. Is we miss some threads here,
 445     // then any other TLAB-related activity would fail with asserts.
 446 
 447     InitGCLABClosure init_gclabs;
 448     {
 449       MutexLocker ml(Threads_lock);
 450       for (JavaThread *thread = Threads::first(); thread != NULL; thread = thread->next()) {
 451         init_gclabs.do_thread(thread);
 452       }
 453     }
 454     gc_threads_do(&init_gclabs);
 455 
 456     // gclab can not be initialized early during VM startup, as it can not determinate its max_size.
 457     // Now, we will let WorkGang to initialize gclab when new worker is created.
 458     _workers->set_initialize_gclab();
 459   }
 460 
 461   _scm->initialize(_max_workers);
 462 
 463   ref_processing_init();
 464 }
 465 
 466 class CalculateUsedRegionClosure : public ShenandoahHeapRegionClosure {
 467   size_t sum;
 468 public:
 469 
 470   CalculateUsedRegionClosure() {
 471     sum = 0;
 472   }
 473 
 474   bool doHeapRegion(ShenandoahHeapRegion* r) {
 475     sum = sum + r->used();
 476     return false;
 477   }
 478 
 479   size_t getResult() { return sum;}
 480 };
 481 
 482 size_t ShenandoahHeap::calculateUsed() {
 483   CalculateUsedRegionClosure cl;
 484   heap_region_iterate(&cl);
 485   return cl.getResult();
 486 }
 487 
 488 void ShenandoahHeap::verify_heap_size_consistency() {
 489 
 490   assert(calculateUsed() == used(),
 491          "heap used size must be consistent heap-used: "SIZE_FORMAT" regions-used: "SIZE_FORMAT, used(), calculateUsed());
 492 }
 493 
 494 size_t ShenandoahHeap::used() const {
 495   OrderAccess::acquire();
 496   return _used;
 497 }
 498 
 499 void ShenandoahHeap::increase_used(size_t bytes) {
 500   assert_heaplock_or_safepoint();
 501   _used += bytes;
 502 }
 503 
 504 void ShenandoahHeap::set_used(size_t bytes) {
 505   assert_heaplock_or_safepoint();
 506   _used = bytes;
 507 }
 508 
 509 void ShenandoahHeap::decrease_used(size_t bytes) {
 510   assert_heaplock_or_safepoint();
 511   assert(_used >= bytes, "never decrease heap size by more than we've left");
 512   _used -= bytes;
 513 }
 514 
 515 size_t ShenandoahHeap::capacity() const {
 516   return _num_regions * ShenandoahHeapRegion::RegionSizeBytes;
 517 }
 518 
 519 bool ShenandoahHeap::is_maximal_no_gc() const {
 520   Unimplemented();
 521   return true;
 522 }
 523 
 524 size_t ShenandoahHeap::max_capacity() const {
 525   return _max_regions * ShenandoahHeapRegion::RegionSizeBytes;
 526 }
 527 
 528 size_t ShenandoahHeap::min_capacity() const {
 529   return _initialSize;
 530 }
 531 
 532 VirtualSpace* ShenandoahHeap::storage() const {
 533   return (VirtualSpace*) &_storage;
 534 }
 535 
 536 bool ShenandoahHeap::is_in(const void* p) const {
 537   HeapWord* first_region_bottom = _first_region->bottom();
 538   HeapWord* last_region_end = first_region_bottom + (ShenandoahHeapRegion::RegionSizeBytes / HeapWordSize) * _num_regions;
 539   return p >= _first_region_bottom && p < last_region_end;
 540 }
 541 
 542 bool ShenandoahHeap::is_scavengable(const void* p) {
 543   return true;
 544 }
 545 
 546 HeapWord* ShenandoahHeap::allocate_from_gclab_slow(Thread* thread, size_t size) {
 547   // Retain tlab and allocate object in shared space if
 548   // the amount free in the tlab is too large to discard.
 549   if (thread->gclab().free() > thread->gclab().refill_waste_limit()) {
 550     thread->gclab().record_slow_allocation(size);
 551     return NULL;
 552   }
 553 
 554   // Discard gclab and allocate a new one.
 555   // To minimize fragmentation, the last GCLAB may be smaller than the rest.
 556   size_t new_gclab_size = thread->gclab().compute_size(size);
 557 
 558   thread->gclab().clear_before_allocation();
 559 
 560   if (new_gclab_size == 0) {
 561     return NULL;
 562   }
 563 
 564   // Allocate a new GCLAB...
 565   HeapWord* obj = allocate_new_gclab(new_gclab_size);
 566   if (obj == NULL) {
 567     return NULL;
 568   }
 569 
 570   if (ZeroTLAB) {
 571     // ..and clear it.
 572     Copy::zero_to_words(obj, new_gclab_size);
 573   } else {
 574     // ...and zap just allocated object.
 575 #ifdef ASSERT
 576     // Skip mangling the space corresponding to the object header to
 577     // ensure that the returned space is not considered parsable by
 578     // any concurrent GC thread.
 579     size_t hdr_size = oopDesc::header_size();
 580     Copy::fill_to_words(obj + hdr_size, new_gclab_size - hdr_size, badHeapWordVal);
 581 #endif // ASSERT
 582   }
 583   thread->gclab().fill(obj, obj + size, new_gclab_size);
 584   return obj;
 585 }
 586 
 587 HeapWord* ShenandoahHeap::allocate_new_tlab(size_t word_size) {
 588   return allocate_new_tlab(word_size, false);
 589 }
 590 
 591 HeapWord* ShenandoahHeap::allocate_new_gclab(size_t word_size) {
 592   return allocate_new_tlab(word_size, true);
 593 }
 594 
 595 HeapWord* ShenandoahHeap::allocate_new_tlab(size_t word_size, bool evacuating) {
 596   HeapWord* result = allocate_memory(word_size, evacuating);
 597 
 598   if (result != NULL) {
 599     assert(! in_collection_set(result), "Never allocate in dirty region");
 600     _bytes_allocated_since_cm += word_size * HeapWordSize;
 601 
 602     log_develop_trace(gc, tlab)("allocating new tlab of size "SIZE_FORMAT" at addr "PTR_FORMAT, word_size, p2i(result));
 603 
 604   }
 605   return result;
 606 }
 607 
 608 ShenandoahHeap* ShenandoahHeap::heap() {
 609   CollectedHeap* heap = Universe::heap();
 610   assert(heap != NULL, "Unitialized access to ShenandoahHeap::heap()");
 611   assert(heap->kind() == CollectedHeap::ShenandoahHeap, "not a shenandoah heap");
 612   return (ShenandoahHeap*) heap;
 613 }
 614 
 615 ShenandoahHeap* ShenandoahHeap::heap_no_check() {
 616   CollectedHeap* heap = Universe::heap();
 617   return (ShenandoahHeap*) heap;
 618 }
 619 
 620 HeapWord* ShenandoahHeap::allocate_memory_work(size_t word_size) {
 621 
 622   ShenandoahHeapLock heap_lock(this);
 623 
 624   HeapWord* result = allocate_memory_under_lock(word_size);
 625   int grow_by = (word_size * HeapWordSize + ShenandoahHeapRegion::RegionSizeBytes - 1) / ShenandoahHeapRegion::RegionSizeBytes;
 626 
 627   while (result == NULL && _num_regions + grow_by <= _max_regions) {
 628     grow_heap_by(grow_by);
 629     result = allocate_memory_under_lock(word_size);
 630   }
 631 
 632   return result;
 633 }
 634 
 635 HeapWord* ShenandoahHeap::allocate_memory(size_t word_size, bool evacuating) {
 636   HeapWord* result = NULL;
 637   result = allocate_memory_work(word_size);
 638 
 639   if (!evacuating) {
 640     // Allocation failed, try full-GC, then retry allocation.
 641     //
 642     // It might happen that one of the threads requesting allocation would unblock
 643     // way later after full-GC happened, only to fail the second allocation, because
 644     // other threads have already depleted the free storage. In this case, a better
 645     // strategy would be to try full-GC again.
 646     //
 647     // Lacking the way to detect progress from "collect" call, we are left with blindly
 648     // retrying for some bounded number of times.
 649     // TODO: Poll if Full GC made enough progress to warrant retry.
 650     int tries = 0;
 651     while ((result == NULL) && (tries++ < ShenandoahFullGCTries)) {
 652       log_debug(gc)("[" PTR_FORMAT " Failed to allocate " SIZE_FORMAT " bytes, doing full GC, try %d",
 653                     p2i(Thread::current()), word_size * HeapWordSize, tries);
 654       collect(GCCause::_allocation_failure);
 655       result = allocate_memory_work(word_size);
 656     }
 657   }
 658 
 659   // Only update monitoring counters when not calling from a write-barrier.
 660   // Otherwise we might attempt to grab the Service_lock, which we must
 661   // not do when coming from a write-barrier (because the thread might
 662   // already hold the Compile_lock).
 663   if (! evacuating) {
 664     monitoring_support()->update_counters();
 665   }
 666 
 667   log_develop_trace(gc, alloc)("allocate memory chunk of size "SIZE_FORMAT" at addr "PTR_FORMAT " by thread %d ",
 668                                word_size, p2i(result), Thread::current()->osthread()->thread_id());
 669 
 670   return result;
 671 }
 672 
 673 bool ShenandoahHeap::call_from_write_barrier(bool evacuating) {
 674   return evacuating && Thread::current()->is_Java_thread();
 675 }
 676 
 677 HeapWord* ShenandoahHeap::allocate_memory_under_lock(size_t word_size) {
 678   assert_heaplock_owned_by_current_thread();
 679 
 680   if (word_size * HeapWordSize > ShenandoahHeapRegion::RegionSizeBytes) {
 681     return allocate_large_memory(word_size);
 682   }
 683 
 684   // Not enough memory in free region set.
 685   // Coming out of full GC, it is possible that there is not
 686   // free region available, so current_index may not be valid.
 687   if (word_size * HeapWordSize > _free_regions->capacity()) return NULL;
 688 
 689   ShenandoahHeapRegion* my_current_region = _free_regions->current_no_humongous();
 690 
 691   if (my_current_region == NULL) {
 692     return NULL; // No more room to make a new region. OOM.
 693   }
 694   assert(my_current_region != NULL, "should have a region at this point");
 695 
 696 #ifdef ASSERT
 697   if (in_collection_set(my_current_region)) {
 698     print_heap_regions();
 699   }
 700 #endif
 701   assert(! in_collection_set(my_current_region), "never get targetted regions in free-lists");
 702   assert(! my_current_region->is_humongous(), "never attempt to allocate from humongous object regions");
 703 
 704   HeapWord* result = my_current_region->allocate(word_size);
 705 
 706   while (result == NULL) {
 707     // 2nd attempt. Try next region.
 708     _free_regions->increase_used(my_current_region->free());
 709     ShenandoahHeapRegion* next_region = _free_regions->next_no_humongous();
 710     assert(next_region != my_current_region, "must not get current again");
 711     my_current_region = next_region;
 712 
 713     if (my_current_region == NULL) {
 714       return NULL; // No more room to make a new region. OOM.
 715     }
 716     assert(my_current_region != NULL, "should have a region at this point");
 717     assert(! in_collection_set(my_current_region), "never get targetted regions in free-lists");
 718     assert(! my_current_region->is_humongous(), "never attempt to allocate from humongous object regions");
 719     result = my_current_region->allocate(word_size);
 720   }
 721 
 722   my_current_region->increase_live_data_words(word_size);
 723   increase_used(word_size * HeapWordSize);
 724   _free_regions->increase_used(word_size * HeapWordSize);
 725   return result;
 726 }
 727 
 728 HeapWord* ShenandoahHeap::allocate_large_memory(size_t words) {
 729   assert_heaplock_owned_by_current_thread();
 730 
 731   uint required_regions = ShenandoahHumongous::required_regions(words * HeapWordSize);
 732   if (required_regions > _max_regions) return NULL;
 733 
 734   ShenandoahHeapRegion* r = _free_regions->allocate_contiguous(required_regions);
 735 
 736   HeapWord* result = NULL;
 737 
 738   if (r != NULL)  {
 739     result = r->bottom();
 740 
 741     log_debug(gc, humongous)("allocating humongous object of size: "SIZE_FORMAT" KB at location "PTR_FORMAT" in start region "SIZE_FORMAT,
 742                              (words * HeapWordSize) / K, p2i(result), r->region_number());
 743   } else {
 744     log_debug(gc, humongous)("allocating humongous object of size: "SIZE_FORMAT" KB at location "PTR_FORMAT" failed",
 745                              (words * HeapWordSize) / K, p2i(result));
 746   }
 747 
 748 
 749   return result;
 750 
 751 }
 752 
 753 HeapWord*  ShenandoahHeap::mem_allocate(size_t size,
 754                                         bool*  gc_overhead_limit_was_exceeded) {
 755 
 756 #ifdef ASSERT
 757   if (ShenandoahVerify && _numAllocs > 1000000) {
 758     _numAllocs = 0;
 759   }
 760   _numAllocs++;
 761 #endif
 762   HeapWord* filler = allocate_memory(BrooksPointer::word_size() + size, false);
 763   HeapWord* result = filler + BrooksPointer::word_size();
 764   if (filler != NULL) {
 765     BrooksPointer::initialize(oop(result));
 766     _bytes_allocated_since_cm += size * HeapWordSize;
 767 
 768     assert(! in_collection_set(result), "never allocate in targetted region");
 769     return result;
 770   } else {
 771     /*
 772     tty->print_cr("Out of memory. Requested number of words: "SIZE_FORMAT" used heap: "INT64_FORMAT", bytes allocated since last CM: "INT64_FORMAT,
 773                   size, used(), _bytes_allocated_since_cm);
 774     {
 775       print_heap_regions();
 776       tty->print("Printing "SIZE_FORMAT" free regions:\n", _free_regions->count());
 777       _free_regions->print();
 778     }
 779     */
 780     return NULL;
 781   }
 782 }
 783 
 784 class ParallelEvacuateRegionObjectClosure : public ObjectClosure {
 785 private:
 786   ShenandoahHeap* _heap;
 787   Thread* _thread;
 788   public:
 789   ParallelEvacuateRegionObjectClosure(ShenandoahHeap* heap) :
 790     _heap(heap), _thread(Thread::current()) {
 791   }
 792 
 793   void do_object(oop p) {
 794 
 795     log_develop_trace(gc, compaction)("Calling ParallelEvacuateRegionObjectClosure on "PTR_FORMAT" of size %d\n", p2i((HeapWord*) p), p->size());
 796 
 797     assert(_heap->is_marked_complete(p), "expect only marked objects");
 798     if (oopDesc::unsafe_equals(p, ShenandoahBarrierSet::resolve_oop_static_not_null(p))) {
 799       _heap->evacuate_object(p, _thread);
 800     }
 801   }
 802 };
 803 
 804 #ifdef ASSERT
 805 class VerifyEvacuatedObjectClosure : public ObjectClosure {
 806 
 807 public:
 808 
 809   void do_object(oop p) {
 810     if (ShenandoahHeap::heap()->is_marked_complete(p)) {
 811       oop p_prime = oopDesc::bs()->read_barrier(p);
 812       assert(! oopDesc::unsafe_equals(p, p_prime), "Should point to evacuated copy");
 813       if (p->klass() != p_prime->klass()) {
 814         tty->print_cr("copy has different class than original:");
 815         p->klass()->print_on(tty);
 816         p_prime->klass()->print_on(tty);
 817       }
 818       assert(p->klass() == p_prime->klass(), "Should have the same class p: "PTR_FORMAT", p_prime: "PTR_FORMAT, p2i(p), p2i(p_prime));
 819       //      assert(p->mark() == p_prime->mark(), "Should have the same mark");
 820       assert(p->size() == p_prime->size(), "Should be the same size");
 821       assert(oopDesc::unsafe_equals(p_prime, oopDesc::bs()->read_barrier(p_prime)), "One forward once");
 822     }
 823   }
 824 };
 825 
 826 void ShenandoahHeap::verify_evacuated_region(ShenandoahHeapRegion* from_region) {
 827   VerifyEvacuatedObjectClosure verify_evacuation;
 828   marked_object_iterate(from_region, &verify_evacuation);
 829 }
 830 #endif
 831 
 832 void ShenandoahHeap::parallel_evacuate_region(ShenandoahHeapRegion* from_region) {
 833 
 834   assert(from_region->has_live(), "all-garbage regions are reclaimed earlier");
 835 
 836   ParallelEvacuateRegionObjectClosure evacuate_region(this);
 837 
 838   marked_object_iterate(from_region, &evacuate_region);
 839 
 840 #ifdef ASSERT
 841   if (ShenandoahVerify && ! cancelled_concgc()) {
 842     verify_evacuated_region(from_region);
 843   }
 844 #endif
 845 }
 846 
 847 class ParallelEvacuationTask : public AbstractGangTask {
 848 private:
 849   ShenandoahHeap* _sh;
 850   ShenandoahCollectionSet* _cs;
 851 
 852 public:
 853   ParallelEvacuationTask(ShenandoahHeap* sh,
 854                          ShenandoahCollectionSet* cs) :
 855     AbstractGangTask("Parallel Evacuation Task"),
 856     _cs(cs),
 857     _sh(sh) {}
 858 
 859   void work(uint worker_id) {
 860 
 861     ShenandoahHeapRegion* from_hr = _cs->claim_next();
 862 
 863     while (from_hr != NULL) {
 864       log_develop_trace(gc, region)("Thread "INT32_FORMAT" claimed Heap Region "SIZE_FORMAT,
 865                                     worker_id,
 866                                     from_hr->region_number());
 867 
 868       assert(from_hr->has_live(), "all-garbage regions are reclaimed early");
 869       _sh->parallel_evacuate_region(from_hr);
 870 
 871       if (_sh->cancelled_concgc()) {
 872         log_develop_trace(gc, region)("Cancelled concgc while evacuating region " SIZE_FORMAT "\n", from_hr->region_number());
 873         break;
 874       }
 875       from_hr = _cs->claim_next();
 876     }
 877   }
 878 };
 879 
 880 class RecycleDirtyRegionsClosure: public ShenandoahHeapRegionClosure {
 881 private:
 882   ShenandoahHeap* _heap;
 883   size_t _bytes_reclaimed;
 884 public:
 885   RecycleDirtyRegionsClosure() : _heap(ShenandoahHeap::heap()) {}
 886 
 887   bool doHeapRegion(ShenandoahHeapRegion* r) {
 888 
 889     assert (! _heap->cancelled_concgc(), "no recycling after cancelled marking");
 890 
 891     if (_heap->in_collection_set(r)) {
 892       log_develop_trace(gc, region)("Recycling region " SIZE_FORMAT ":", r->region_number());
 893       _heap->decrease_used(r->used());
 894       _bytes_reclaimed += r->used();
 895       r->recycle();
 896     }
 897 
 898     return false;
 899   }
 900   size_t bytes_reclaimed() { return _bytes_reclaimed;}
 901   void clear_bytes_reclaimed() {_bytes_reclaimed = 0;}
 902 };
 903 
 904 void ShenandoahHeap::recycle_dirty_regions() {
 905   RecycleDirtyRegionsClosure cl;
 906   cl.clear_bytes_reclaimed();
 907 
 908   heap_region_iterate(&cl);
 909 
 910   _shenandoah_policy->record_bytes_reclaimed(cl.bytes_reclaimed());
 911   if (! cancelled_concgc()) {
 912     clear_cset_fast_test();
 913   }
 914 }
 915 
 916 ShenandoahFreeSet* ShenandoahHeap::free_regions() {
 917   return _free_regions;
 918 }
 919 
 920 void ShenandoahHeap::print_heap_regions(outputStream* st) const {
 921   _ordered_regions->print(st);
 922 }
 923 
 924 class PrintAllRefsOopClosure: public ExtendedOopClosure {
 925 private:
 926   int _index;
 927   const char* _prefix;
 928 
 929 public:
 930   PrintAllRefsOopClosure(const char* prefix) : _index(0), _prefix(prefix) {}
 931 
 932 private:
 933   template <class T>
 934   inline void do_oop_work(T* p) {
 935     oop o = oopDesc::load_decode_heap_oop(p);
 936     if (o != NULL) {
 937       if (ShenandoahHeap::heap()->is_in(o) && o->is_oop()) {
 938         tty->print_cr("%s "INT32_FORMAT" ("PTR_FORMAT")-> "PTR_FORMAT" (marked: %s) (%s "PTR_FORMAT")",
 939                       _prefix, _index,
 940                       p2i(p), p2i(o),
 941                       BOOL_TO_STR(ShenandoahHeap::heap()->is_marked_complete(o)),
 942                       o->klass()->internal_name(), p2i(o->klass()));
 943       } else {
 944         tty->print_cr("%s "INT32_FORMAT" ("PTR_FORMAT" dirty -> "PTR_FORMAT" (not in heap, possibly corrupted or dirty)",
 945                       _prefix, _index,
 946                       p2i(p), p2i(o));
 947       }
 948     } else {
 949       tty->print_cr("%s "INT32_FORMAT" ("PTR_FORMAT") -> "PTR_FORMAT, _prefix, _index, p2i(p), p2i((HeapWord*) o));
 950     }
 951     _index++;
 952   }
 953 
 954 public:
 955   void do_oop(oop* p) {
 956     do_oop_work(p);
 957   }
 958 
 959   void do_oop(narrowOop* p) {
 960     do_oop_work(p);
 961   }
 962 
 963 };
 964 
 965 class PrintAllRefsObjectClosure : public ObjectClosure {
 966   const char* _prefix;
 967 
 968 public:
 969   PrintAllRefsObjectClosure(const char* prefix) : _prefix(prefix) {}
 970 
 971   void do_object(oop p) {
 972     if (ShenandoahHeap::heap()->is_in(p)) {
 973         tty->print_cr("%s object "PTR_FORMAT" (marked: %s) (%s "PTR_FORMAT") refers to:",
 974                       _prefix, p2i(p),
 975                       BOOL_TO_STR(ShenandoahHeap::heap()->is_marked_complete(p)),
 976                       p->klass()->internal_name(), p2i(p->klass()));
 977         PrintAllRefsOopClosure cl(_prefix);
 978         p->oop_iterate(&cl);
 979       }
 980   }
 981 };
 982 
 983 void ShenandoahHeap::print_all_refs(const char* prefix) {
 984   tty->print_cr("printing all references in the heap");
 985   tty->print_cr("root references:");
 986 
 987   ensure_parsability(false);
 988 
 989   PrintAllRefsOopClosure cl(prefix);
 990   roots_iterate(&cl);
 991 
 992   tty->print_cr("heap references:");
 993   PrintAllRefsObjectClosure cl2(prefix);
 994   object_iterate(&cl2);
 995 }
 996 
 997 class VerifyAfterMarkingOopClosure: public ExtendedOopClosure {
 998 private:
 999   ShenandoahHeap*  _heap;
1000 
1001 public:
1002   VerifyAfterMarkingOopClosure() :
1003     _heap(ShenandoahHeap::heap()) { }
1004 
1005 private:
1006   template <class T>
1007   inline void do_oop_work(T* p) {
1008     oop o = oopDesc::load_decode_heap_oop(p);
1009     if (o != NULL) {
1010       if (! _heap->is_marked_complete(o)) {
1011         _heap->print_heap_regions();
1012         _heap->print_all_refs("post-mark");
1013         tty->print_cr("oop not marked, although referrer is marked: "PTR_FORMAT": in_heap: %s, is_marked: %s",
1014                       p2i((HeapWord*) o), BOOL_TO_STR(_heap->is_in(o)), BOOL_TO_STR(_heap->is_marked_complete(o)));
1015         _heap->print_heap_locations((HeapWord*) o, (HeapWord*) o + o->size());
1016 
1017         tty->print_cr("oop class: %s", o->klass()->internal_name());
1018         if (_heap->is_in(p)) {
1019           oop referrer = oop(_heap->heap_region_containing(p)->block_start_const(p));
1020           tty->print_cr("Referrer starts at addr "PTR_FORMAT, p2i((HeapWord*) referrer));
1021           referrer->print();
1022           _heap->print_heap_locations((HeapWord*) referrer, (HeapWord*) referrer + referrer->size());
1023         }
1024         tty->print_cr("heap region containing object:");
1025         _heap->heap_region_containing(o)->print();
1026         tty->print_cr("heap region containing referrer:");
1027         _heap->heap_region_containing(p)->print();
1028         tty->print_cr("heap region containing forwardee:");
1029         _heap->heap_region_containing(oopDesc::bs()->read_barrier(o))->print();
1030       }
1031       assert(o->is_oop(), "oop must be an oop");
1032       assert(Metaspace::contains(o->klass()), "klass pointer must go to metaspace");
1033       if (! oopDesc::unsafe_equals(o, oopDesc::bs()->read_barrier(o))) {
1034         tty->print_cr("oops has forwardee: p: "PTR_FORMAT" (%s), o = "PTR_FORMAT" (%s), new-o: "PTR_FORMAT" (%s)",
1035                       p2i(p),
1036                       BOOL_TO_STR(_heap->in_collection_set(p)),
1037                       p2i(o),
1038                       BOOL_TO_STR(_heap->in_collection_set(o)),
1039                       p2i((HeapWord*) oopDesc::bs()->read_barrier(o)),
1040                       BOOL_TO_STR(_heap->in_collection_set(oopDesc::bs()->read_barrier(o))));
1041         tty->print_cr("oop class: %s", o->klass()->internal_name());
1042       }
1043       assert(oopDesc::unsafe_equals(o, oopDesc::bs()->read_barrier(o)), "oops must not be forwarded");
1044       assert(! _heap->in_collection_set(o), "references must not point to dirty heap regions");
1045       assert(_heap->is_marked_complete(o), "live oops must be marked current");
1046     }
1047   }
1048 
1049 public:
1050   void do_oop(oop* p) {
1051     do_oop_work(p);
1052   }
1053 
1054   void do_oop(narrowOop* p) {
1055     do_oop_work(p);
1056   }
1057 
1058 };
1059 
1060 void ShenandoahHeap::verify_heap_after_marking() {
1061 
1062   verify_heap_size_consistency();
1063 
1064   log_trace(gc)("verifying heap after marking");
1065 
1066   VerifyAfterMarkingOopClosure cl;
1067   roots_iterate(&cl);
1068   ObjectToOopClosure objs(&cl);
1069   object_iterate(&objs);
1070 }
1071 
1072 
1073 void ShenandoahHeap::reclaim_humongous_region_at(ShenandoahHeapRegion* r) {
1074   assert(r->is_humongous_start(), "reclaim regions starting with the first one");
1075 
1076   oop humongous_obj = oop(r->bottom() + BrooksPointer::word_size());
1077   size_t size = humongous_obj->size() + BrooksPointer::word_size();
1078   uint required_regions = ShenandoahHumongous::required_regions(size * HeapWordSize);
1079   uint index = r->region_number();
1080 
1081 
1082   assert(!r->has_live(), "liveness must be zero");
1083 
1084   for(size_t i = 0; i < required_regions; i++) {
1085 
1086     ShenandoahHeapRegion* region = _ordered_regions->get(index++);
1087 
1088     assert((region->is_humongous_start() || region->is_humongous_continuation()),
1089            "expect correct humongous start or continuation");
1090 
1091     if (log_is_enabled(Debug, gc, humongous)) {
1092       log_debug(gc, humongous)("reclaiming "UINT32_FORMAT" humongous regions for object of size: "SIZE_FORMAT" words", required_regions, size);
1093       ResourceMark rm;
1094       outputStream* out = Log(gc, humongous)::debug_stream();
1095       region->print_on(out);
1096     }
1097 
1098     region->recycle();
1099     ShenandoahHeap::heap()->decrease_used(ShenandoahHeapRegion::RegionSizeBytes);
1100   }
1101 }
1102 
1103 class ShenandoahReclaimHumongousRegionsClosure : public ShenandoahHeapRegionClosure {
1104 
1105   bool doHeapRegion(ShenandoahHeapRegion* r) {
1106     ShenandoahHeap* heap = ShenandoahHeap::heap();
1107 
1108     if (r->is_humongous_start()) {
1109       oop humongous_obj = oop(r->bottom() + BrooksPointer::word_size());
1110       if (! heap->is_marked_complete(humongous_obj)) {
1111 
1112         heap->reclaim_humongous_region_at(r);
1113       }
1114     }
1115     return false;
1116   }
1117 };
1118 
1119 #ifdef ASSERT
1120 class CheckCollectionSetClosure: public ShenandoahHeapRegionClosure {
1121   bool doHeapRegion(ShenandoahHeapRegion* r) {
1122     assert(! ShenandoahHeap::heap()->in_collection_set(r), "Should have been cleared by now");
1123     return false;
1124   }
1125 };
1126 #endif
1127 
1128 void ShenandoahHeap::prepare_for_concurrent_evacuation() {
1129   assert(_ordered_regions->get(0)->region_number() == 0, "FIXME CHF. FIXME CHF!");
1130 
1131   log_develop_trace(gc)("Thread %d started prepare_for_concurrent_evacuation", Thread::current()->osthread()->thread_id());
1132 
1133   if (!cancelled_concgc()) {
1134 
1135     recycle_dirty_regions();
1136 
1137     ensure_parsability(true);
1138 
1139     if (UseShenandoahMatrix) {
1140       if (PrintShenandoahMatrix) {
1141         outputStream* log = Log(gc)::info_stream();
1142         connection_matrix()->print_on(log);
1143       }
1144     }
1145 
1146     if (ShenandoahVerify || (UseShenandoahMatrix && VerifyShenandoahMatrix)) {
1147       verify_heap_reachable_at_safepoint();
1148     }
1149 
1150 #ifdef ASSERT
1151     if (ShenandoahVerify) {
1152       verify_heap_after_marking();
1153     }
1154 #endif
1155 
1156     // NOTE: This needs to be done during a stop the world pause, because
1157     // putting regions into the collection set concurrently with Java threads
1158     // will create a race. In particular, acmp could fail because when we
1159     // resolve the first operand, the containing region might not yet be in
1160     // the collection set, and thus return the original oop. When the 2nd
1161     // operand gets resolved, the region could be in the collection set
1162     // and the oop gets evacuated. If both operands have originally been
1163     // the same, we get false negatives.
1164 
1165     {
1166       ShenandoahHeapLock lock(this);
1167       _collection_set->clear();
1168       _free_regions->clear();
1169 
1170       ShenandoahReclaimHumongousRegionsClosure reclaim;
1171       heap_region_iterate(&reclaim);
1172 
1173 #ifdef ASSERT
1174       CheckCollectionSetClosure ccsc;
1175       _ordered_regions->heap_region_iterate(&ccsc);
1176 #endif
1177 
1178       _shenandoah_policy->choose_collection_set(_collection_set);
1179 
1180       _shenandoah_policy->choose_free_set(_free_regions);
1181     }
1182 
1183     _bytes_allocated_since_cm = 0;
1184 
1185     Universe::update_heap_info_at_gc();
1186   }
1187 }
1188 
1189 
1190 class RetireTLABClosure : public ThreadClosure {
1191 private:
1192   bool _retire;
1193 
1194 public:
1195   RetireTLABClosure(bool retire) : _retire(retire) {
1196   }
1197 
1198   void do_thread(Thread* thread) {
1199     thread->gclab().make_parsable(_retire);
1200   }
1201 };
1202 
1203 void ShenandoahHeap::ensure_parsability(bool retire_tlabs) {
1204   if (UseTLAB) {
1205     CollectedHeap::ensure_parsability(retire_tlabs);
1206     RetireTLABClosure cl(retire_tlabs);
1207     Threads::threads_do(&cl);
1208   }
1209 }
1210 
1211 class ShenandoahEvacuateUpdateRootsClosure: public ExtendedOopClosure {
1212 private:
1213   ShenandoahHeap* _heap;
1214   Thread* _thread;
1215 public:
1216   ShenandoahEvacuateUpdateRootsClosure() :
1217     _heap(ShenandoahHeap::heap()), _thread(Thread::current()) {
1218   }
1219 
1220 private:
1221   template <class T>
1222   void do_oop_work(T* p) {
1223     assert(_heap->is_evacuation_in_progress(), "Only do this when evacuation is in progress");
1224 
1225     T o = oopDesc::load_heap_oop(p);
1226     if (! oopDesc::is_null(o)) {
1227       oop obj = oopDesc::decode_heap_oop_not_null(o);
1228       if (_heap->in_collection_set(obj)) {
1229         assert(_heap->is_marked_complete(obj), "only evacuate marked objects %d %d",
1230                _heap->is_marked_complete(obj), _heap->is_marked_complete(ShenandoahBarrierSet::resolve_oop_static_not_null(obj)));
1231         oop resolved = ShenandoahBarrierSet::resolve_oop_static_not_null(obj);
1232         if (oopDesc::unsafe_equals(resolved, obj)) {
1233           resolved = _heap->evacuate_object(obj, _thread);
1234         }
1235         oopDesc::encode_store_heap_oop(p, resolved);
1236       }
1237     }
1238 #ifdef ASSERT
1239     else {
1240       // tty->print_cr("not updating root at: "PTR_FORMAT" with object: "PTR_FORMAT", is_in_heap: %s, is_in_cset: %s, is_marked: %s",
1241       //               p2i(p),
1242       //               p2i((HeapWord*) obj),
1243       //               BOOL_TO_STR(_heap->is_in(obj)),
1244       //               BOOL_TO_STR(_heap->in_cset_fast_test(obj)),
1245       //               BOOL_TO_STR(_heap->is_marked_complete(obj)));
1246     }
1247 #endif
1248   }
1249 
1250 public:
1251   void do_oop(oop* p) {
1252     do_oop_work(p);
1253   }
1254   void do_oop(narrowOop* p) {
1255     do_oop_work(p);
1256   }
1257 };
1258 
1259 class ShenandoahEvacuateUpdateRootsTask : public AbstractGangTask {
1260   ShenandoahRootEvacuator* _rp;
1261 public:
1262 
1263   ShenandoahEvacuateUpdateRootsTask(ShenandoahRootEvacuator* rp) :
1264     AbstractGangTask("Shenandoah evacuate and update roots"),
1265     _rp(rp)
1266   {
1267     // Nothing else to do.
1268   }
1269 
1270   void work(uint worker_id) {
1271     ShenandoahEvacuateUpdateRootsClosure cl;
1272     MarkingCodeBlobClosure blobsCl(&cl, CodeBlobToOopClosure::FixRelocations);
1273 
1274     _rp->process_evacuate_roots(&cl, &blobsCl, worker_id);
1275   }
1276 };
1277 
1278 class ShenandoahFixRootsTask : public AbstractGangTask {
1279   ShenandoahRootEvacuator* _rp;
1280 public:
1281 
1282   ShenandoahFixRootsTask(ShenandoahRootEvacuator* rp) :
1283     AbstractGangTask("Shenandoah update roots"),
1284     _rp(rp)
1285   {
1286     // Nothing else to do.
1287   }
1288 
1289   void work(uint worker_id) {
1290     SCMUpdateRefsClosure cl;
1291     MarkingCodeBlobClosure blobsCl(&cl, CodeBlobToOopClosure::FixRelocations);
1292 
1293     _rp->process_evacuate_roots(&cl, &blobsCl, worker_id);
1294   }
1295 };
1296 void ShenandoahHeap::evacuate_and_update_roots() {
1297 
1298   COMPILER2_PRESENT(DerivedPointerTable::clear());
1299 
1300 #ifdef ASSERT
1301   if (ShenandoahVerifyReadsToFromSpace) {
1302     set_from_region_protection(false);
1303   }
1304 #endif
1305 
1306   assert(SafepointSynchronize::is_at_safepoint(), "Only iterate roots while world is stopped");
1307   ClassLoaderDataGraph::clear_claimed_marks();
1308 
1309   {
1310     ShenandoahRootEvacuator rp(this, workers()->active_workers(), ShenandoahCollectorPolicy::evac_thread_roots);
1311     ShenandoahEvacuateUpdateRootsTask roots_task(&rp);
1312     workers()->run_task(&roots_task);
1313   }
1314 
1315   COMPILER2_PRESENT(DerivedPointerTable::update_pointers());
1316 
1317   if (cancelled_concgc()) {
1318     // If initial evacuation has been cancelled, we need to update all references
1319     // after all workers have finished. Otherwise we might run into the following problem:
1320     // GC thread 1 cannot allocate anymore, thus evacuation fails, leaves from-space ptr of object X.
1321     // GC thread 2 evacuates the same object X to to-space
1322     // which leaves a truly dangling from-space reference in the first root oop*. This must not happen.
1323     // clear() and update_pointers() must always be called in pairs,
1324     // cannot nest with above clear()/update_pointers().
1325     COMPILER2_PRESENT(DerivedPointerTable::clear());
1326     ShenandoahRootEvacuator rp(this, workers()->active_workers(), ShenandoahCollectorPolicy::evac_thread_roots);
1327     ShenandoahFixRootsTask update_roots_task(&rp);
1328     workers()->run_task(&update_roots_task);
1329     COMPILER2_PRESENT(DerivedPointerTable::update_pointers());
1330   }
1331 
1332 #ifdef ASSERT
1333   if (ShenandoahVerifyReadsToFromSpace) {
1334     set_from_region_protection(true);
1335   }
1336 #endif
1337 
1338 #ifdef ASSERT
1339   {
1340     AssertToSpaceClosure cl;
1341     CodeBlobToOopClosure code_cl(&cl, !CodeBlobToOopClosure::FixRelocations);
1342     ShenandoahRootEvacuator rp(this, 1);
1343     rp.process_evacuate_roots(&cl, &code_cl, 0);
1344   }
1345 #endif
1346 }
1347 
1348 
1349 void ShenandoahHeap::do_evacuation() {
1350 
1351   parallel_evacuate();
1352 
1353   if (ShenandoahVerify && ! cancelled_concgc()) {
1354     VM_ShenandoahVerifyHeapAfterEvacuation verify_after_evacuation;
1355     if (Thread::current()->is_VM_thread()) {
1356       verify_after_evacuation.doit();
1357     } else {
1358       VMThread::execute(&verify_after_evacuation);
1359     }
1360   }
1361 
1362 }
1363 
1364 void ShenandoahHeap::parallel_evacuate() {
1365   log_develop_trace(gc)("starting parallel_evacuate");
1366 
1367   _shenandoah_policy->record_phase_start(ShenandoahCollectorPolicy::conc_evac);
1368 
1369   if (log_is_enabled(Trace, gc, region)) {
1370     ResourceMark rm;
1371     outputStream *out = Log(gc, region)::trace_stream();
1372     out->print("Printing all available regions");
1373     print_heap_regions(out);
1374   }
1375 
1376   if (log_is_enabled(Trace, gc, cset)) {
1377     ResourceMark rm;
1378     outputStream *out = Log(gc, cset)::trace_stream();
1379     out->print("Printing collection set which contains "SIZE_FORMAT" regions:\n", _collection_set->count());
1380     _collection_set->print(out);
1381 
1382     out->print("Printing free set which contains "SIZE_FORMAT" regions:\n", _free_regions->count());
1383     _free_regions->print(out);
1384   }
1385 
1386   ParallelEvacuationTask evacuationTask = ParallelEvacuationTask(this, _collection_set);
1387 
1388 
1389   workers()->run_task(&evacuationTask);
1390 
1391   if (log_is_enabled(Trace, gc, cset)) {
1392     ResourceMark rm;
1393     outputStream *out = Log(gc, cset)::trace_stream();
1394     out->print("Printing postgc collection set which contains "SIZE_FORMAT" regions:\n",
1395                _collection_set->count());
1396 
1397     _collection_set->print(out);
1398 
1399     out->print("Printing postgc free regions which contain "SIZE_FORMAT" free regions:\n",
1400                _free_regions->count());
1401     _free_regions->print(out);
1402 
1403   }
1404 
1405   if (log_is_enabled(Trace, gc, region)) {
1406     ResourceMark rm;
1407     outputStream *out = Log(gc, region)::trace_stream();
1408     out->print_cr("all regions after evacuation:");
1409     print_heap_regions(out);
1410   }
1411 
1412   _shenandoah_policy->record_phase_end(ShenandoahCollectorPolicy::conc_evac);
1413 }
1414 
1415 class VerifyEvacuationClosure: public ExtendedOopClosure {
1416 private:
1417   ShenandoahHeap*  _heap;
1418   ShenandoahHeapRegion* _from_region;
1419 
1420 public:
1421   VerifyEvacuationClosure(ShenandoahHeapRegion* from_region) :
1422     _heap(ShenandoahHeap::heap()), _from_region(from_region) { }
1423 private:
1424   template <class T>
1425   inline void do_oop_work(T* p) {
1426     oop heap_oop = oopDesc::load_decode_heap_oop(p);
1427     if (! oopDesc::is_null(heap_oop)) {
1428       guarantee(! _from_region->is_in(heap_oop), "no references to from-region allowed after evacuation: "PTR_FORMAT, p2i((HeapWord*) heap_oop));
1429     }
1430   }
1431 
1432 public:
1433   void do_oop(oop* p)       {
1434     do_oop_work(p);
1435   }
1436 
1437   void do_oop(narrowOop* p) {
1438     do_oop_work(p);
1439   }
1440 
1441 };
1442 
1443 void ShenandoahHeap::roots_iterate(OopClosure* cl) {
1444 
1445   assert(SafepointSynchronize::is_at_safepoint(), "Only iterate roots while world is stopped");
1446 
1447   CodeBlobToOopClosure blobsCl(cl, false);
1448   CLDToOopClosure cldCl(cl);
1449 
1450   ClassLoaderDataGraph::clear_claimed_marks();
1451 
1452   ShenandoahRootProcessor rp(this, 1);
1453   rp.process_all_roots(cl, NULL, &cldCl, &blobsCl, 0);
1454 }
1455 
1456 void ShenandoahHeap::verify_evacuation(ShenandoahHeapRegion* from_region) {
1457 
1458   VerifyEvacuationClosure rootsCl(from_region);
1459   roots_iterate(&rootsCl);
1460 
1461 }
1462 
1463 bool ShenandoahHeap::supports_tlab_allocation() const {
1464   return true;
1465 }
1466 
1467 
1468 size_t  ShenandoahHeap::unsafe_max_tlab_alloc(Thread *thread) const {
1469   size_t idx = _free_regions->current_index();
1470   ShenandoahHeapRegion* current = _free_regions->get(idx);
1471   if (current == NULL) {
1472     return 0;
1473   } else if (current->free() > MinTLABSize) {
1474     // Current region has enough space left, can use it.
1475     return current->free();
1476   } else {
1477     // No more space in current region, we will take next free region
1478     // on the next TLAB allocation.
1479     return ShenandoahHeapRegion::RegionSizeBytes;
1480   }
1481 }
1482 
1483 size_t ShenandoahHeap::max_tlab_size() const {
1484   return ShenandoahHeapRegion::RegionSizeBytes;
1485 }
1486 
1487 class ResizeGCLABClosure : public ThreadClosure {
1488 public:
1489   void do_thread(Thread* thread) {
1490     thread->gclab().resize();
1491   }
1492 };
1493 
1494 void ShenandoahHeap::resize_all_tlabs() {
1495   CollectedHeap::resize_all_tlabs();
1496 
1497   ResizeGCLABClosure cl;
1498   Threads::threads_do(&cl);
1499 }
1500 
1501 class AccumulateStatisticsGCLABClosure : public ThreadClosure {
1502 public:
1503   void do_thread(Thread* thread) {
1504     thread->gclab().accumulate_statistics();
1505     thread->gclab().initialize_statistics();
1506   }
1507 };
1508 
1509 void ShenandoahHeap::accumulate_statistics_all_gclabs() {
1510   AccumulateStatisticsGCLABClosure cl;
1511   Threads::threads_do(&cl);
1512 }
1513 
1514 bool  ShenandoahHeap::can_elide_tlab_store_barriers() const {
1515   return true;
1516 }
1517 
1518 oop ShenandoahHeap::new_store_pre_barrier(JavaThread* thread, oop new_obj) {
1519   // Overridden to do nothing.
1520   return new_obj;
1521 }
1522 
1523 bool  ShenandoahHeap::can_elide_initializing_store_barrier(oop new_obj) {
1524   return true;
1525 }
1526 
1527 bool ShenandoahHeap::card_mark_must_follow_store() const {
1528   return false;
1529 }
1530 
1531 void ShenandoahHeap::collect(GCCause::Cause cause) {
1532   assert(cause != GCCause::_gc_locker, "no JNI critical callback");
1533   if (GCCause::is_user_requested_gc(cause)) {
1534     if (! DisableExplicitGC) {
1535       _concurrent_gc_thread->do_full_gc(cause);
1536     }
1537   } else if (cause == GCCause::_allocation_failure) {
1538     collector_policy()->set_should_clear_all_soft_refs(true);
1539     _concurrent_gc_thread->do_full_gc(cause);
1540   }
1541 }
1542 
1543 void ShenandoahHeap::do_full_collection(bool clear_all_soft_refs) {
1544   //assert(false, "Shouldn't need to do full collections");
1545 }
1546 
1547 AdaptiveSizePolicy* ShenandoahHeap::size_policy() {
1548   Unimplemented();
1549   return NULL;
1550 
1551 }
1552 
1553 CollectorPolicy* ShenandoahHeap::collector_policy() const {
1554   return _shenandoah_policy;
1555 }
1556 
1557 
1558 HeapWord* ShenandoahHeap::block_start(const void* addr) const {
1559   Space* sp = heap_region_containing(addr);
1560   if (sp != NULL) {
1561     return sp->block_start(addr);
1562   }
1563   return NULL;
1564 }
1565 
1566 size_t ShenandoahHeap::block_size(const HeapWord* addr) const {
1567   Space* sp = heap_region_containing(addr);
1568   assert(sp != NULL, "block_size of address outside of heap");
1569   return sp->block_size(addr);
1570 }
1571 
1572 bool ShenandoahHeap::block_is_obj(const HeapWord* addr) const {
1573   Space* sp = heap_region_containing(addr);
1574   return sp->block_is_obj(addr);
1575 }
1576 
1577 jlong ShenandoahHeap::millis_since_last_gc() {
1578   return 0;
1579 }
1580 
1581 void ShenandoahHeap::prepare_for_verify() {
1582   if (SafepointSynchronize::is_at_safepoint() || ! UseTLAB) {
1583     ensure_parsability(false);
1584   }
1585 }
1586 
1587 void ShenandoahHeap::print_gc_threads_on(outputStream* st) const {
1588   workers()->print_worker_threads_on(st);
1589 }
1590 
1591 void ShenandoahHeap::gc_threads_do(ThreadClosure* tcl) const {
1592   workers()->threads_do(tcl);
1593 }
1594 
1595 void ShenandoahHeap::print_tracing_info() const {
1596   if (log_is_enabled(Info, gc, stats)) {
1597     ResourceMark rm;
1598     outputStream* out = Log(gc, stats)::info_stream();
1599     _shenandoah_policy->print_tracing_info(out);
1600   }
1601 }
1602 
1603 class ShenandoahVerifyRootsClosure: public ExtendedOopClosure {
1604 private:
1605   ShenandoahHeap*  _heap;
1606   VerifyOption     _vo;
1607   bool             _failures;
1608 public:
1609   // _vo == UsePrevMarking -> use "prev" marking information,
1610   // _vo == UseNextMarking -> use "next" marking information,
1611   // _vo == UseMarkWord    -> use mark word from object header.
1612   ShenandoahVerifyRootsClosure(VerifyOption vo) :
1613     _heap(ShenandoahHeap::heap()),
1614     _vo(vo),
1615     _failures(false) { }
1616 
1617   bool failures() { return _failures; }
1618 
1619 private:
1620   template <class T>
1621   inline void do_oop_work(T* p) {
1622     oop obj = oopDesc::load_decode_heap_oop(p);
1623     if (! oopDesc::is_null(obj) && ! obj->is_oop()) {
1624       { // Just for debugging.
1625         tty->print_cr("Root location "PTR_FORMAT
1626                       "verified "PTR_FORMAT, p2i(p), p2i((void*) obj));
1627         //      obj->print_on(tty);
1628       }
1629     }
1630     guarantee(obj->is_oop_or_null(), "is oop or null");
1631   }
1632 
1633 public:
1634   void do_oop(oop* p)       {
1635     do_oop_work(p);
1636   }
1637 
1638   void do_oop(narrowOop* p) {
1639     do_oop_work(p);
1640   }
1641 
1642 };
1643 
1644 class ShenandoahVerifyHeapClosure: public ObjectClosure {
1645 private:
1646   ShenandoahVerifyRootsClosure _rootsCl;
1647 public:
1648   ShenandoahVerifyHeapClosure(ShenandoahVerifyRootsClosure rc) :
1649     _rootsCl(rc) {};
1650 
1651   void do_object(oop p) {
1652     _rootsCl.do_oop(&p);
1653   }
1654 };
1655 
1656 class ShenandoahVerifyKlassClosure: public KlassClosure {
1657   OopClosure *_oop_closure;
1658  public:
1659   ShenandoahVerifyKlassClosure(OopClosure* cl) : _oop_closure(cl) {}
1660   void do_klass(Klass* k) {
1661     k->oops_do(_oop_closure);
1662   }
1663 };
1664 
1665 void ShenandoahHeap::verify(VerifyOption vo) {
1666   if (SafepointSynchronize::is_at_safepoint() || ! UseTLAB) {
1667 
1668     ShenandoahVerifyRootsClosure rootsCl(vo);
1669 
1670     assert(Thread::current()->is_VM_thread(),
1671            "Expected to be executed serially by the VM thread at this point");
1672 
1673     roots_iterate(&rootsCl);
1674 
1675     bool failures = rootsCl.failures();
1676     log_trace(gc)("verify failures: %s", BOOL_TO_STR(failures));
1677 
1678     ShenandoahVerifyHeapClosure heapCl(rootsCl);
1679 
1680     object_iterate(&heapCl);
1681     // TODO: Implement rest of it.
1682   } else {
1683     tty->print("(SKIPPING roots, heapRegions, remset) ");
1684   }
1685 }
1686 size_t ShenandoahHeap::tlab_capacity(Thread *thr) const {
1687   return _free_regions->capacity();
1688 }
1689 
1690 class ShenandoahIterateObjectClosureRegionClosure: public ShenandoahHeapRegionClosure {
1691   ObjectClosure* _cl;
1692 public:
1693   ShenandoahIterateObjectClosureRegionClosure(ObjectClosure* cl) : _cl(cl) {}
1694   bool doHeapRegion(ShenandoahHeapRegion* r) {
1695     ShenandoahHeap::heap()->marked_object_iterate(r, _cl);
1696     return false;
1697   }
1698 };
1699 
1700 void ShenandoahHeap::object_iterate(ObjectClosure* cl) {
1701   ShenandoahIterateObjectClosureRegionClosure blk(cl);
1702   heap_region_iterate(&blk, false, true);
1703 }
1704 
1705 class ShenandoahSafeObjectIterateAdjustPtrsClosure : public MetadataAwareOopClosure {
1706 private:
1707   ShenandoahHeap* _heap;
1708 
1709 public:
1710   ShenandoahSafeObjectIterateAdjustPtrsClosure() : _heap(ShenandoahHeap::heap()) {}
1711 
1712 private:
1713   template <class T>
1714   inline void do_oop_work(T* p) {
1715     T o = oopDesc::load_heap_oop(p);
1716     if (!oopDesc::is_null(o)) {
1717       oop obj = oopDesc::decode_heap_oop_not_null(o);
1718       oopDesc::encode_store_heap_oop(p, BrooksPointer::forwardee(obj));
1719     }
1720   }
1721 public:
1722   void do_oop(oop* p) {
1723     do_oop_work(p);
1724   }
1725   void do_oop(narrowOop* p) {
1726     do_oop_work(p);
1727   }
1728 };
1729 
1730 class ShenandoahSafeObjectIterateAndUpdate : public ObjectClosure {
1731 private:
1732   ObjectClosure* _cl;
1733 public:
1734   ShenandoahSafeObjectIterateAndUpdate(ObjectClosure *cl) : _cl(cl) {}
1735 
1736   virtual void do_object(oop obj) {
1737     assert (oopDesc::unsafe_equals(obj, BrooksPointer::forwardee(obj)),
1738             "avoid double-counting: only non-forwarded objects here");
1739 
1740     // Fix up the ptrs.
1741     ShenandoahSafeObjectIterateAdjustPtrsClosure adjust_ptrs;
1742     obj->oop_iterate(&adjust_ptrs);
1743 
1744     // Can reply the object now:
1745     _cl->do_object(obj);
1746   }
1747 };
1748 
1749 void ShenandoahHeap::safe_object_iterate(ObjectClosure* cl) {
1750   assert(SafepointSynchronize::is_at_safepoint(), "safe iteration is only available during safepoints");
1751 
1752   // Safe iteration does objects only with correct references.
1753   // This is why we skip dirty regions that have stale copies of objects,
1754   // and fix up the pointers in the returned objects.
1755 
1756   ShenandoahSafeObjectIterateAndUpdate safe_cl(cl);
1757   ShenandoahIterateObjectClosureRegionClosure blk(&safe_cl);
1758   heap_region_iterate(&blk,
1759                       /* skip_dirty_regions = */ true,
1760                       /* skip_humongous_continuations = */ true);
1761 
1762   _need_update_refs = false; // already updated the references
1763 }
1764 
1765 // Apply blk->doHeapRegion() on all committed regions in address order,
1766 // terminating the iteration early if doHeapRegion() returns true.
1767 void ShenandoahHeap::heap_region_iterate(ShenandoahHeapRegionClosure* blk, bool skip_dirty_regions, bool skip_humongous_continuation) const {
1768   for (size_t i = 0; i < _num_regions; i++) {
1769     ShenandoahHeapRegion* current  = _ordered_regions->get(i);
1770     if (skip_humongous_continuation && current->is_humongous_continuation()) {
1771       continue;
1772     }
1773     if (skip_dirty_regions && in_collection_set(current)) {
1774       continue;
1775     }
1776     if (blk->doHeapRegion(current)) {
1777       return;
1778     }
1779   }
1780 }
1781 
1782 class ClearLivenessClosure : public ShenandoahHeapRegionClosure {
1783   ShenandoahHeap* sh;
1784 public:
1785   ClearLivenessClosure(ShenandoahHeap* heap) : sh(heap) { }
1786 
1787   bool doHeapRegion(ShenandoahHeapRegion* r) {
1788     r->clear_live_data();
1789     sh->set_next_top_at_mark_start(r->bottom(), r->top());
1790     return false;
1791   }
1792 };
1793 
1794 void ShenandoahHeap::start_concurrent_marking() {
1795 
1796   shenandoahPolicy()->record_phase_start(ShenandoahCollectorPolicy::accumulate_stats);
1797   accumulate_statistics_all_tlabs();
1798   shenandoahPolicy()->record_phase_end(ShenandoahCollectorPolicy::accumulate_stats);
1799 
1800   set_concurrent_mark_in_progress(true);
1801   // We need to reset all TLABs because we'd lose marks on all objects allocated in them.
1802   if (UseTLAB) {
1803     shenandoahPolicy()->record_phase_start(ShenandoahCollectorPolicy::make_parsable);
1804     ensure_parsability(true);
1805     shenandoahPolicy()->record_phase_end(ShenandoahCollectorPolicy::make_parsable);
1806   }
1807 
1808   _shenandoah_policy->record_bytes_allocated(_bytes_allocated_since_cm);
1809   _used_start_gc = used();
1810 
1811 #ifdef ASSERT
1812   if (ShenandoahDumpHeapBeforeConcurrentMark) {
1813     ensure_parsability(false);
1814     print_all_refs("pre-mark");
1815   }
1816 #endif
1817 
1818   shenandoahPolicy()->record_phase_start(ShenandoahCollectorPolicy::clear_liveness);
1819   ClearLivenessClosure clc(this);
1820   heap_region_iterate(&clc);
1821   shenandoahPolicy()->record_phase_end(ShenandoahCollectorPolicy::clear_liveness);
1822 
1823   if (UseShenandoahMatrix) {
1824     connection_matrix()->clear_all();
1825   }
1826   // print_all_refs("pre -mark");
1827 
1828   // oopDesc::_debug = true;
1829 
1830   // Make above changes visible to worker threads
1831   OrderAccess::fence();
1832 
1833   shenandoahPolicy()->record_phase_start(ShenandoahCollectorPolicy::scan_roots);
1834   concurrentMark()->init_mark_roots();
1835   shenandoahPolicy()->record_phase_end(ShenandoahCollectorPolicy::scan_roots);
1836 
1837   //  print_all_refs("pre-mark2");
1838 }
1839 
1840 class VerifyAfterEvacuationClosure : public ExtendedOopClosure {
1841 
1842   ShenandoahHeap* _sh;
1843 
1844 public:
1845   VerifyAfterEvacuationClosure() : _sh ( ShenandoahHeap::heap() ) {}
1846 
1847   template<class T> void do_oop_nv(T* p) {
1848     T heap_oop = oopDesc::load_heap_oop(p);
1849     if (!oopDesc::is_null(heap_oop)) {
1850       oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
1851       guarantee(_sh->in_collection_set(obj) == (! oopDesc::unsafe_equals(obj, oopDesc::bs()->read_barrier(obj))),
1852                 "forwarded objects can only exist in dirty (from-space) regions is_dirty: %s, is_forwarded: %s obj-klass: %s, marked: %s",
1853                 BOOL_TO_STR(_sh->in_collection_set(obj)),
1854                 BOOL_TO_STR(! oopDesc::unsafe_equals(obj, oopDesc::bs()->read_barrier(obj))),
1855                 obj->klass()->external_name(),
1856                 BOOL_TO_STR(_sh->is_marked_complete(obj))
1857                 );
1858       obj = oopDesc::bs()->read_barrier(obj);
1859       guarantee(! _sh->in_collection_set(obj), "forwarded oops must not point to dirty regions");
1860       guarantee(obj->is_oop(), "is_oop");
1861       guarantee(Metaspace::contains(obj->klass()), "klass pointer must go to metaspace");
1862     }
1863   }
1864 
1865   void do_oop(oop* p)       { do_oop_nv(p); }
1866   void do_oop(narrowOop* p) { do_oop_nv(p); }
1867 
1868 };
1869 
1870 void ShenandoahHeap::verify_heap_after_evacuation() {
1871 
1872   verify_heap_size_consistency();
1873 
1874   ensure_parsability(false);
1875 
1876   VerifyAfterEvacuationClosure cl;
1877   roots_iterate(&cl);
1878 
1879   ObjectToOopClosure objs(&cl);
1880   object_iterate(&objs);
1881 
1882 }
1883 
1884 class VerifyRegionsAfterUpdateRefsClosure : public ShenandoahHeapRegionClosure {
1885 public:
1886   bool doHeapRegion(ShenandoahHeapRegion* r) {
1887     assert(! ShenandoahHeap::heap()->in_collection_set(r), "no region must be in collection set");
1888     return false;
1889   }
1890 };
1891 
1892 void ShenandoahHeap::swap_mark_bitmaps() {
1893   // Swap bitmaps.
1894   CMBitMap* tmp1 = _complete_mark_bit_map;
1895   _complete_mark_bit_map = _next_mark_bit_map;
1896   _next_mark_bit_map = tmp1;
1897 
1898   // Swap top-at-mark-start pointers
1899   HeapWord** tmp2 = _complete_top_at_mark_starts;
1900   _complete_top_at_mark_starts = _next_top_at_mark_starts;
1901   _next_top_at_mark_starts = tmp2;
1902 
1903   HeapWord** tmp3 = _complete_top_at_mark_starts_base;
1904   _complete_top_at_mark_starts_base = _next_top_at_mark_starts_base;
1905   _next_top_at_mark_starts_base = tmp3;
1906 }
1907 
1908 class VerifyReachableHeapClosure : public ExtendedOopClosure {
1909 private:
1910   SCMObjToScanQueue* _queue;
1911   ShenandoahHeap* _heap;
1912   CMBitMap* _map;
1913   bool _check_matrix;
1914   oop _obj;
1915 public:
1916   VerifyReachableHeapClosure(SCMObjToScanQueue* queue, CMBitMap* map, bool check_matrix) :
1917           _queue(queue), _heap(ShenandoahHeap::heap()), _map(map), _check_matrix(check_matrix) {};
1918   template <class T>
1919   void do_oop_work(T* p) {
1920     T o = oopDesc::load_heap_oop(p);
1921     if (!oopDesc::is_null(o)) {
1922       oop obj = oopDesc::decode_heap_oop_not_null(o);
1923       guarantee(check_obj_alignment(obj), "sanity");
1924 
1925       guarantee(!oopDesc::is_null(obj), "sanity");
1926       guarantee(_heap->is_in(obj), "sanity");
1927 
1928       oop forw = BrooksPointer::forwardee(obj);
1929       guarantee(!oopDesc::is_null(forw), "sanity");
1930       guarantee(_heap->is_in(forw), "sanity");
1931 
1932       guarantee(oopDesc::unsafe_equals(obj, forw), "should not be forwarded");
1933 
1934       if (_check_matrix) {
1935         uint from_idx = _heap->heap_region_index_containing(p);
1936         uint to_idx = _heap->heap_region_index_containing(obj);
1937         if (!_heap->connection_matrix()->is_connected(from_idx, to_idx)) {
1938           tty->print_cr("from-obj: ");
1939           _obj->print_on(tty);
1940           tty->print_cr("to-obj:");
1941           obj->print_on(tty);
1942           tty->print_cr("from-obj allocated after mark: %s", BOOL_TO_STR(_heap->allocated_after_complete_mark_start((HeapWord*) _obj)));
1943           tty->print_cr("to-obj allocated after mark: %s", BOOL_TO_STR(_heap->allocated_after_complete_mark_start((HeapWord*) obj)));
1944           tty->print_cr("from-obj marked: %s", BOOL_TO_STR(_heap->is_marked_complete(_obj)));
1945           tty->print_cr("to-obj marked: %s", BOOL_TO_STR(_heap->is_marked_complete(obj)));
1946           tty->print_cr("from-idx: %u, to-idx: %u", from_idx, to_idx);
1947 
1948           oop fwd_from = BrooksPointer::forwardee(_obj);
1949           oop fwd_to = BrooksPointer::forwardee(obj);
1950           tty->print_cr("from-obj forwardee: " PTR_FORMAT, p2i(fwd_from));
1951           tty->print_cr("to-obj forwardee: " PTR_FORMAT, p2i(fwd_to));
1952           tty->print_cr("forward(from-obj) marked: %s", BOOL_TO_STR(_heap->is_marked_complete(fwd_from)));
1953           tty->print_cr("forward(to-obj) marked: %s", BOOL_TO_STR(_heap->is_marked_complete(fwd_to)));
1954           uint fwd_from_idx = _heap->heap_region_index_containing(fwd_from);
1955           uint fwd_to_idx = _heap->heap_region_index_containing(fwd_to);
1956           tty->print_cr("forward(from-idx): %u, forward(to-idx): %u", fwd_from_idx, fwd_to_idx);
1957           tty->print_cr("forward(from) connected with forward(to)? %s", BOOL_TO_STR(_heap->connection_matrix()->is_connected(fwd_from_idx, fwd_to_idx)));
1958         }
1959         guarantee(oopDesc::unsafe_equals(ShenandoahBarrierSet::resolve_oop_static_not_null(obj), obj), "polizeilich verboten");
1960         guarantee(_heap->connection_matrix()->is_connected(from_idx, to_idx), "must be connected");
1961       }
1962 
1963       if (_map->parMark((HeapWord*) obj)) {
1964         _queue->push(SCMTask(obj));
1965       }
1966     }
1967   }
1968 
1969   void do_oop(oop* p) { do_oop_work(p); }
1970   void do_oop(narrowOop* p) { do_oop_work(p); }
1971   void set_obj(oop o) { _obj = o; }
1972 };
1973 
1974 void ShenandoahHeap::verify_heap_reachable_at_safepoint() {
1975   guarantee(SafepointSynchronize::is_at_safepoint(), "only when nothing else happens");
1976 
1977   OrderAccess::fence();
1978   ensure_parsability(false);
1979 
1980   // Allocate temporary bitmap for storing marking wavefront:
1981   ReservedSpace bm(_bitmap_size, os::vm_page_size());
1982   os::commit_memory_or_exit(bm.base(), bm.size(), false, "couldn't allocate verification bitmap");
1983   MemTracker::record_virtual_memory_type(bm.base(), mtGC);
1984   MemRegion verify_bitmap_region = MemRegion((HeapWord*) bm.base(), bm.size() / HeapWordSize);
1985 
1986   CMBitMap* _verification_bit_map = new CMBitMap();
1987   _verification_bit_map->initialize(_heap_region, verify_bitmap_region);
1988   MemRegion mr = MemRegion(_verification_bit_map->startWord(), _verification_bit_map->endWord());
1989   _verification_bit_map->clear_range_large(mr);
1990 
1991   // Initialize a single queue
1992   SCMObjToScanQueue* q = new SCMObjToScanQueue();
1993   q->initialize();
1994 
1995   // Scan root set
1996   ClassLoaderDataGraph::clear_claimed_marks();
1997   ShenandoahRootProcessor rp(this, 1);
1998 
1999   {
2000     VerifyReachableHeapClosure cl(q, _verification_bit_map, false);
2001     CLDToOopClosure cld_cl(&cl);
2002     CodeBlobToOopClosure code_cl(&cl, ! CodeBlobToOopClosure::FixRelocations);
2003     rp.process_all_roots(&cl, &cl, &cld_cl, &code_cl, 0);
2004   }
2005 
2006   // Finish the scan
2007   {
2008     VerifyReachableHeapClosure cl(q, _verification_bit_map, UseShenandoahMatrix && VerifyShenandoahMatrix);
2009     SCMTask task;
2010     while ((q->pop_buffer(task) ||
2011             q->pop_local(task) ||
2012             q->pop_overflow(task))) {
2013       oop obj = task.obj();
2014       assert(!oopDesc::is_null(obj), "must not be null");
2015       cl.set_obj(obj);
2016       obj->oop_iterate(&cl);
2017     }
2018   }
2019 
2020   // Clean up!
2021   os::uncommit_memory(bm.base(), bm.size());
2022   MemTracker::record_free(bm.base());
2023   delete(q);
2024 }
2025 
2026 void ShenandoahHeap::stop_concurrent_marking() {
2027   assert(concurrent_mark_in_progress(), "How else could we get here?");
2028   if (! cancelled_concgc()) {
2029     // If we needed to update refs, and concurrent marking has been cancelled,
2030     // we need to finish updating references.
2031     set_need_update_refs(false);
2032     swap_mark_bitmaps();
2033   }
2034   set_concurrent_mark_in_progress(false);
2035 
2036   if (log_is_enabled(Trace, gc, region)) {
2037     ResourceMark rm;
2038     outputStream* out = Log(gc, region)::trace_stream();
2039     print_heap_regions(out);
2040   }
2041 
2042 }
2043 
2044 void ShenandoahHeap::set_concurrent_mark_in_progress(bool in_progress) {
2045   _concurrent_mark_in_progress = in_progress ? 1 : 0;
2046   JavaThread::satb_mark_queue_set().set_active_all_threads(in_progress, !in_progress);
2047 }
2048 
2049 void ShenandoahHeap::set_evacuation_in_progress_concurrently(bool in_progress) {
2050   // Note: it is important to first release the _evacuation_in_progress flag here,
2051   // so that Java threads can get out of oom_during_evacuation() and reach a safepoint,
2052   // in case a VM task is pending.
2053   set_evacuation_in_progress(in_progress);
2054   MutexLocker mu(Threads_lock);
2055   JavaThread::set_evacuation_in_progress_all_threads(in_progress);
2056 }
2057 
2058 void ShenandoahHeap::set_evacuation_in_progress_at_safepoint(bool in_progress) {
2059   assert(SafepointSynchronize::is_at_safepoint(), "Only call this at safepoint");
2060   set_evacuation_in_progress(in_progress);
2061   JavaThread::set_evacuation_in_progress_all_threads(in_progress);
2062 }
2063 
2064 void ShenandoahHeap::set_evacuation_in_progress(bool in_progress) {
2065   _evacuation_in_progress = in_progress ? 1 : 0;
2066   OrderAccess::fence();
2067 }
2068 
2069 void ShenandoahHeap::verify_copy(oop p,oop c){
2070     assert(! oopDesc::unsafe_equals(p, oopDesc::bs()->read_barrier(p)), "forwarded correctly");
2071     assert(oopDesc::unsafe_equals(oopDesc::bs()->read_barrier(p), c), "verify pointer is correct");
2072     if (p->klass() != c->klass()) {
2073       print_heap_regions();
2074     }
2075     assert(p->klass() == c->klass(), "verify class p-size: "INT32_FORMAT" c-size: "INT32_FORMAT, p->size(), c->size());
2076     assert(p->size() == c->size(), "verify size");
2077     // Object may have been locked between copy and verification
2078     //    assert(p->mark() == c->mark(), "verify mark");
2079     assert(oopDesc::unsafe_equals(c, oopDesc::bs()->read_barrier(c)), "verify only forwarded once");
2080   }
2081 
2082 void ShenandoahHeap::oom_during_evacuation() {
2083   log_develop_trace(gc)("Out of memory during evacuation, cancel evacuation, schedule full GC by thread %d",
2084                         Thread::current()->osthread()->thread_id());
2085 
2086   // We ran out of memory during evacuation. Cancel evacuation, and schedule a full-GC.
2087   collector_policy()->set_should_clear_all_soft_refs(true);
2088   concurrent_thread()->try_set_full_gc();
2089   cancel_concgc(_oom_evacuation);
2090 
2091   if ((! Thread::current()->is_GC_task_thread()) && (! Thread::current()->is_ConcurrentGC_thread())) {
2092     assert(! Threads_lock->owned_by_self()
2093            || SafepointSynchronize::is_at_safepoint(), "must not hold Threads_lock here");
2094     log_warning(gc)("OOM during evacuation. Let Java thread wait until evacuation finishes.");
2095     while (_evacuation_in_progress) { // wait.
2096       Thread::current()->_ParkEvent->park(1);
2097     }
2098   }
2099 
2100 }
2101 
2102 HeapWord* ShenandoahHeap::tlab_post_allocation_setup(HeapWord* obj) {
2103   // Initialize Brooks pointer for the next object
2104   HeapWord* result = obj + BrooksPointer::word_size();
2105   BrooksPointer::initialize(oop(result));
2106   return result;
2107 }
2108 
2109 uint ShenandoahHeap::oop_extra_words() {
2110   return BrooksPointer::word_size();
2111 }
2112 
2113 void ShenandoahHeap::grow_heap_by(size_t num_regions) {
2114   size_t base = _num_regions;
2115   ensure_new_regions(num_regions);
2116   for (size_t i = 0; i < num_regions; i++) {
2117     size_t new_region_index = i + base;
2118     HeapWord* start = _first_region_bottom + (ShenandoahHeapRegion::RegionSizeBytes / HeapWordSize) * new_region_index;
2119     ShenandoahHeapRegion* new_region = new ShenandoahHeapRegion(this, start, ShenandoahHeapRegion::RegionSizeBytes / HeapWordSize, new_region_index);
2120 
2121     if (log_is_enabled(Trace, gc, region)) {
2122       ResourceMark rm;
2123       outputStream* out = Log(gc, region)::trace_stream();
2124       out->print_cr("allocating new region at index: "SIZE_FORMAT, new_region_index);
2125       new_region->print_on(out);
2126     }
2127 
2128     assert(_ordered_regions->active_regions() == new_region->region_number(), "must match");
2129     _ordered_regions->add_region(new_region);
2130     _in_cset_fast_test_base[new_region_index] = false; // Not in cset
2131     _next_top_at_mark_starts_base[new_region_index] = new_region->bottom();
2132     _complete_top_at_mark_starts_base[new_region_index] = new_region->bottom();
2133 
2134     _free_regions->add_region(new_region);
2135   }
2136 }
2137 
2138 void ShenandoahHeap::ensure_new_regions(size_t new_regions) {
2139 
2140   size_t num_regions = _num_regions;
2141   size_t new_num_regions = num_regions + new_regions;
2142   assert(new_num_regions <= _max_regions, "we checked this earlier");
2143 
2144   size_t expand_size = new_regions * ShenandoahHeapRegion::RegionSizeBytes;
2145   log_trace(gc, region)("expanding storage by "SIZE_FORMAT_HEX" bytes, for "SIZE_FORMAT" new regions", expand_size, new_regions);
2146   bool success = _storage.expand_by(expand_size, ShenandoahAlwaysPreTouch);
2147   assert(success, "should always be able to expand by requested size");
2148 
2149   _num_regions = new_num_regions;
2150 
2151 }
2152 
2153 ShenandoahForwardedIsAliveClosure::ShenandoahForwardedIsAliveClosure() :
2154   _heap(ShenandoahHeap::heap_no_check()) {
2155 }
2156 
2157 void ShenandoahForwardedIsAliveClosure::init(ShenandoahHeap* heap) {
2158   _heap = heap;
2159 }
2160 
2161 bool ShenandoahForwardedIsAliveClosure::do_object_b(oop obj) {
2162 
2163   assert(_heap != NULL, "sanity");
2164   obj = ShenandoahBarrierSet::resolve_oop_static_not_null(obj);
2165 #ifdef ASSERT
2166   if (_heap->concurrent_mark_in_progress()) {
2167     assert(oopDesc::unsafe_equals(obj, ShenandoahBarrierSet::resolve_oop_static_not_null(obj)), "only query to-space");
2168   }
2169 #endif
2170   assert(!oopDesc::is_null(obj), "null");
2171   return _heap->is_marked_next(obj);
2172 }
2173 
2174 void ShenandoahHeap::ref_processing_init() {
2175   MemRegion mr = reserved_region();
2176 
2177   isAlive.init(ShenandoahHeap::heap());
2178   assert(_max_workers > 0, "Sanity");
2179 
2180   _ref_processor =
2181     new ReferenceProcessor(mr,    // span
2182                            ParallelRefProcEnabled,
2183                            // mt processing
2184                            _max_workers,
2185                            // degree of mt processing
2186                            true,
2187                            // mt discovery
2188                            _max_workers,
2189                            // degree of mt discovery
2190                            false,
2191                            // Reference discovery is not atomic
2192                            &isAlive);
2193 }
2194 
2195 #ifdef ASSERT
2196 void ShenandoahHeap::set_from_region_protection(bool protect) {
2197   for (uint i = 0; i < _num_regions; i++) {
2198     ShenandoahHeapRegion* region = _ordered_regions->get(i);
2199     if (region != NULL && in_collection_set(region)) {
2200       if (protect) {
2201         region->memProtectionOn();
2202       } else {
2203         region->memProtectionOff();
2204       }
2205     }
2206   }
2207 }
2208 #endif
2209 
2210 size_t ShenandoahHeap::num_regions() {
2211   return _num_regions;
2212 }
2213 
2214 size_t ShenandoahHeap::max_regions() {
2215   return _max_regions;
2216 }
2217 
2218 GCTracer* ShenandoahHeap::tracer() {
2219   return shenandoahPolicy()->tracer();
2220 }
2221 
2222 size_t ShenandoahHeap::tlab_used(Thread* thread) const {
2223   return _free_regions->used();
2224 }
2225 
2226 void ShenandoahHeap::cancel_concgc(GCCause::Cause cause) {
2227   if (try_cancel_concgc()) {
2228     log_info(gc)("Cancelling concurrent GC: %s", GCCause::to_string(cause));
2229     _shenandoah_policy->report_concgc_cancelled();
2230   }
2231 }
2232 
2233 void ShenandoahHeap::cancel_concgc(ShenandoahCancelCause cause) {
2234   if (try_cancel_concgc()) {
2235     log_info(gc)("Cancelling concurrent GC: %s", cancel_cause_to_string(cause));
2236     _shenandoah_policy->report_concgc_cancelled();
2237   }
2238 }
2239 
2240 const char* ShenandoahHeap::cancel_cause_to_string(ShenandoahCancelCause cause) {
2241   switch (cause) {
2242     case _oom_evacuation:
2243       return "Out of memory for evacuation";
2244     case _vm_stop:
2245       return "Stopping VM";
2246     default:
2247       return "Unknown";
2248   }
2249 }
2250 
2251 void ShenandoahHeap::clear_cancelled_concgc() {
2252   set_cancelled_concgc(false);
2253 }
2254 
2255 uint ShenandoahHeap::max_workers() {
2256   return _max_workers;
2257 }
2258 
2259 void ShenandoahHeap::stop() {
2260   // The shutdown sequence should be able to terminate when GC is running.
2261 
2262   // Step 1. Notify control thread that we are in shutdown.
2263   // Note that we cannot do that with stop(), because stop() is blocking and waits for the actual shutdown.
2264   // Doing stop() here would wait for the normal GC cycle to complete, never falling through to cancel below.
2265   _concurrent_gc_thread->prepare_for_graceful_shutdown();
2266 
2267   // Step 2. Notify GC workers that we are cancelling GC.
2268   cancel_concgc(_vm_stop);
2269 
2270   // Step 3. Wait until GC worker exits normally.
2271   _concurrent_gc_thread->stop();
2272 }
2273 
2274 void ShenandoahHeap::unlink_string_and_symbol_table(BoolObjectClosure* is_alive, bool process_strings, bool process_symbols) {
2275 
2276   StringSymbolTableUnlinkTask shenandoah_unlink_task(is_alive, process_strings, process_symbols);
2277   workers()->run_task(&shenandoah_unlink_task);
2278 
2279   //  if (G1StringDedup::is_enabled()) {
2280   //    G1StringDedup::unlink(is_alive);
2281   //  }
2282 }
2283 
2284 void ShenandoahHeap::set_need_update_refs(bool need_update_refs) {
2285   _need_update_refs = need_update_refs;
2286 }
2287 
2288 //fixme this should be in heapregionset
2289 ShenandoahHeapRegion* ShenandoahHeap::next_compaction_region(const ShenandoahHeapRegion* r) {
2290   size_t region_idx = r->region_number() + 1;
2291   ShenandoahHeapRegion* next = _ordered_regions->get(region_idx);
2292   guarantee(next->region_number() == region_idx, "region number must match");
2293   while (next->is_humongous()) {
2294     region_idx = next->region_number() + 1;
2295     next = _ordered_regions->get(region_idx);
2296     guarantee(next->region_number() == region_idx, "region number must match");
2297   }
2298   return next;
2299 }
2300 
2301 void ShenandoahHeap::set_region_in_collection_set(size_t region_index, bool b) {
2302   _in_cset_fast_test_base[region_index] = b;
2303 }
2304 
2305 ShenandoahMonitoringSupport* ShenandoahHeap::monitoring_support() {
2306   return _monitoring_support;
2307 }
2308 
2309 CMBitMap* ShenandoahHeap::complete_mark_bit_map() {
2310   return _complete_mark_bit_map;
2311 }
2312 
2313 CMBitMap* ShenandoahHeap::next_mark_bit_map() {
2314   return _next_mark_bit_map;
2315 }
2316 
2317 void ShenandoahHeap::add_free_region(ShenandoahHeapRegion* r) {
2318   _free_regions->add_region(r);
2319 }
2320 
2321 void ShenandoahHeap::clear_free_regions() {
2322   _free_regions->clear();
2323 }
2324 
2325 address ShenandoahHeap::in_cset_fast_test_addr() {
2326   return (address) (ShenandoahHeap::heap()->_in_cset_fast_test);
2327 }
2328 
2329 address ShenandoahHeap::cancelled_concgc_addr() {
2330   return (address) &(ShenandoahHeap::heap()->_cancelled_concgc);
2331 }
2332 
2333 void ShenandoahHeap::clear_cset_fast_test() {
2334   assert(_in_cset_fast_test_base != NULL, "sanity");
2335   memset(_in_cset_fast_test_base, false,
2336          _in_cset_fast_test_length * sizeof(bool));
2337 }
2338 
2339 size_t ShenandoahHeap::conservative_max_heap_alignment() {
2340   return ShenandoahMaxRegionSize;
2341 }
2342 
2343 size_t ShenandoahHeap::bytes_allocated_since_cm() {
2344   return _bytes_allocated_since_cm;
2345 }
2346 
2347 void ShenandoahHeap::set_bytes_allocated_since_cm(size_t bytes) {
2348   _bytes_allocated_since_cm = bytes;
2349 }
2350 
2351 size_t ShenandoahHeap::max_allocated_gc() {
2352   return _max_allocated_gc;
2353 }
2354 
2355 void ShenandoahHeap::set_next_top_at_mark_start(HeapWord* region_base, HeapWord* addr) {
2356   uintx index = ((uintx) region_base) >> ShenandoahHeapRegion::RegionSizeShift;
2357   _next_top_at_mark_starts[index] = addr;
2358 }
2359 
2360 HeapWord* ShenandoahHeap::next_top_at_mark_start(HeapWord* region_base) {
2361   uintx index = ((uintx) region_base) >> ShenandoahHeapRegion::RegionSizeShift;
2362   return _next_top_at_mark_starts[index];
2363 }
2364 
2365 void ShenandoahHeap::set_complete_top_at_mark_start(HeapWord* region_base, HeapWord* addr) {
2366   uintx index = ((uintx) region_base) >> ShenandoahHeapRegion::RegionSizeShift;
2367   _complete_top_at_mark_starts[index] = addr;
2368 }
2369 
2370 HeapWord* ShenandoahHeap::complete_top_at_mark_start(HeapWord* region_base) {
2371   uintx index = ((uintx) region_base) >> ShenandoahHeapRegion::RegionSizeShift;
2372   return _complete_top_at_mark_starts[index];
2373 }
2374 
2375 void ShenandoahHeap::set_full_gc_in_progress(bool in_progress) {
2376   _full_gc_in_progress = in_progress;
2377 }
2378 
2379 bool ShenandoahHeap::is_full_gc_in_progress() const {
2380   return _full_gc_in_progress;
2381 }
2382 
2383 class NMethodOopInitializer : public OopClosure {
2384 private:
2385   ShenandoahHeap* _heap;
2386 public:
2387   NMethodOopInitializer() : _heap(ShenandoahHeap::heap()) {
2388   }
2389 
2390 private:
2391   template <class T>
2392   inline void do_oop_work(T* p) {
2393     T o = oopDesc::load_heap_oop(p);
2394     if (! oopDesc::is_null(o)) {
2395       oop obj1 = oopDesc::decode_heap_oop_not_null(o);
2396       oop obj2 = oopDesc::bs()->write_barrier(obj1);
2397       if (! oopDesc::unsafe_equals(obj1, obj2)) {
2398         oopDesc::encode_store_heap_oop(p, obj2);
2399       }
2400     }
2401   }
2402 
2403 public:
2404   void do_oop(oop* o) {
2405     do_oop_work(o);
2406   }
2407   void do_oop(narrowOop* o) {
2408     do_oop_work(o);
2409   }
2410 };
2411 
2412 void ShenandoahHeap::register_nmethod(nmethod* nm) {
2413   NMethodOopInitializer init;
2414   nm->oops_do(&init);
2415   nm->fix_oop_relocations();
2416 }
2417 
2418 void ShenandoahHeap::unregister_nmethod(nmethod* nm) {
2419 }
2420 
2421 void ShenandoahHeap::pin_object(oop o) {
2422   heap_region_containing(o)->pin();
2423 }
2424 
2425 void ShenandoahHeap::unpin_object(oop o) {
2426   heap_region_containing(o)->unpin();
2427 }
2428 
2429 
2430 GCTimer* ShenandoahHeap::gc_timer() const {
2431   return _gc_timer;
2432 }
2433 
2434 class RecordAllRefsOopClosure: public ExtendedOopClosure {
2435 private:
2436   int _x;
2437   int *_matrix;
2438   int _num_regions;
2439   oop _p;
2440 
2441 public:
2442   RecordAllRefsOopClosure(int *matrix, int x, size_t num_regions, oop p) :
2443     _matrix(matrix), _x(x), _num_regions(num_regions), _p(p) {}
2444 
2445   template <class T>
2446   void do_oop_work(T* p) {
2447     oop o = oopDesc::load_decode_heap_oop(p);
2448     if (o != NULL) {
2449       if (ShenandoahHeap::heap()->is_in(o) && o->is_oop() ) {
2450         int y = ShenandoahHeap::heap()->heap_region_containing(o)->region_number();
2451         _matrix[_x * _num_regions + y]++;
2452       }
2453     }
2454   }
2455   void do_oop(oop* p) {
2456     do_oop_work(p);
2457   }
2458 
2459   void do_oop(narrowOop* p) {
2460     do_oop_work(p);
2461   }
2462 
2463 };
2464 
2465 class RecordAllRefsObjectClosure : public ObjectClosure {
2466   int *_matrix;
2467   size_t _num_regions;
2468 
2469 public:
2470   RecordAllRefsObjectClosure(int *matrix, size_t num_regions) :
2471     _matrix(matrix), _num_regions(num_regions) {}
2472 
2473   void do_object(oop p) {
2474     if (ShenandoahHeap::heap()->is_in(p) && ShenandoahHeap::heap()->is_marked_next(p)  && p->is_oop()) {
2475       int x = ShenandoahHeap::heap()->heap_region_containing(p)->region_number();
2476       RecordAllRefsOopClosure cl(_matrix, x, _num_regions, p);
2477       p->oop_iterate(&cl);
2478     }
2479   }
2480 };
2481 void ShenandoahHeap::calculate_matrix(int* connections) {
2482   log_develop_trace(gc)("calculating matrix");
2483   ensure_parsability(false);
2484   int num = num_regions();
2485 
2486   for (int i = 0; i < num; i++) {
2487     for (int j = 0; j < num; j++) {
2488       connections[i * num + j] = 0;
2489     }
2490   }
2491 
2492   RecordAllRefsOopClosure cl(connections, 0, num, NULL);
2493   roots_iterate(&cl);
2494 
2495   RecordAllRefsObjectClosure cl2(connections, num);
2496   object_iterate(&cl2);
2497 
2498 }
2499 
2500 void ShenandoahHeap::print_matrix(int* connections) {
2501   int num = num_regions();
2502   int cs_regions = 0;
2503   int referenced = 0;
2504 
2505   for (int i = 0; i < num; i++) {
2506     size_t liveData = ShenandoahHeap::heap()->regions()->get(i)->get_live_data_bytes();
2507 
2508     int numReferencedRegions = 0;
2509     int numReferencedByRegions = 0;
2510 
2511     for (int j = 0; j < num; j++) {
2512       if (connections[i * num + j] > 0)
2513         numReferencedRegions++;
2514 
2515       if (connections [j * num + i] > 0)
2516         numReferencedByRegions++;
2517 
2518       cs_regions++;
2519       referenced += numReferencedByRegions;
2520     }
2521 
2522     if (ShenandoahHeap::heap()->regions()->get(i)->has_live()) {
2523       tty->print("Region %d is referenced by %d regions {",
2524                  i, numReferencedByRegions);
2525       int col_count = 0;
2526       for (int j = 0; j < num; j++) {
2527         int foo = connections[j * num + i];
2528         if (foo > 0) {
2529           col_count++;
2530           if ((col_count % 10) == 0)
2531             tty->print("\n");
2532           tty->print("%d(%d), ", j,foo);
2533         }
2534       }
2535       tty->print("} \n");
2536     }
2537   }
2538 
2539   double avg = (double)referenced / (double) cs_regions;
2540   tty->print("Average Number of regions scanned / region = %lf\n", avg);
2541 }
2542 
2543 class ShenandoahCountGarbageClosure : public ShenandoahHeapRegionClosure {
2544 private:
2545   size_t _garbage;
2546 public:
2547   ShenandoahCountGarbageClosure() : _garbage(0) {
2548   }
2549 
2550   bool doHeapRegion(ShenandoahHeapRegion* r) {
2551     if (! r->is_humongous() && ! r->is_pinned() && ! r->in_collection_set()) {
2552       _garbage += r->garbage();
2553     }
2554     return false;
2555   }
2556 
2557   size_t garbage() {
2558     return _garbage;
2559   }
2560 };
2561 
2562 size_t ShenandoahHeap::garbage() {
2563   ShenandoahCountGarbageClosure cl;
2564   heap_region_iterate(&cl);
2565   return cl.garbage();
2566 }
2567 
2568 ShenandoahConnectionMatrix* ShenandoahHeap::connection_matrix() {
2569   return _connection_matrix;
2570 }
2571 
2572 ShenandoahPartialGC* ShenandoahHeap::partial_gc() {
2573   return _partial_gc;
2574 }
2575 
2576 void ShenandoahHeap::do_partial_collection() {
2577   {
2578     ShenandoahHeapLock lock(this);
2579     partial_gc()->prepare();
2580   }
2581   partial_gc()->do_partial_collection();
2582 }
2583 
2584 #ifdef ASSERT
2585 void ShenandoahHeap::assert_heaplock_owned_by_current_thread() {
2586   assert(_heap_lock == locked, "must be locked");
2587   assert(_heap_lock_owner == Thread::current(), "must be owned by current thread");
2588 }
2589 
2590 void ShenandoahHeap::assert_heaplock_or_safepoint() {
2591   Thread* thr = Thread::current();
2592   assert((_heap_lock == locked && _heap_lock_owner == thr) ||
2593          (SafepointSynchronize::is_at_safepoint() && thr->is_VM_thread()),
2594   "must own heap lock or by VM thread at safepoint");
2595 }
2596 
2597 #endif