1 /*
   2  * Copyright (c) 2013, 2015, Red Hat, Inc. and/or its affiliates.
   3  *
   4  * This code is free software; you can redistribute it and/or modify it
   5  * under the terms of the GNU General Public License version 2 only, as
   6  * published by the Free Software Foundation.
   7  *
   8  * This code is distributed in the hope that it will be useful, but WITHOUT
   9  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  10  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  11  * version 2 for more details (a copy is included in the LICENSE file that
  12  * accompanied this code).
  13  *
  14  * You should have received a copy of the GNU General Public License version
  15  * 2 along with this work; if not, write to the Free Software Foundation,
  16  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  17  *
  18  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  19  * or visit www.oracle.com if you need additional information or have any
  20  * questions.
  21  *
  22  */
  23 
  24 #include "memory/allocation.hpp"
  25 
  26 #include "gc/shared/gcTimer.hpp"
  27 #include "gc/shared/gcTraceTime.inline.hpp"
  28 #include "gc/shared/parallelCleaning.hpp"
  29 
  30 #include "gc/shenandoah/brooksPointer.hpp"
  31 #include "gc/shenandoah/shenandoahBarrierSet.hpp"
  32 #include "gc/shenandoah/shenandoahCollectionSet.hpp"
  33 #include "gc/shenandoah/shenandoahCollectorPolicy.hpp"
  34 #include "gc/shenandoah/shenandoahConcurrentMark.hpp"
  35 #include "gc/shenandoah/shenandoahConcurrentMark.inline.hpp"
  36 #include "gc/shenandoah/shenandoahConcurrentThread.hpp"
  37 #include "gc/shenandoah/shenandoahFreeSet.hpp"
  38 #include "gc/shenandoah/shenandoahHeap.inline.hpp"
  39 #include "gc/shenandoah/shenandoahHeapRegion.hpp"
  40 #include "gc/shenandoah/shenandoahHeapRegionSet.hpp"
  41 #include "gc/shenandoah/shenandoahHumongous.hpp"
  42 #include "gc/shenandoah/shenandoahMarkCompact.hpp"
  43 #include "gc/shenandoah/shenandoahMonitoringSupport.hpp"
  44 #include "gc/shenandoah/shenandoahRootProcessor.hpp"
  45 #include "gc/shenandoah/vm_operations_shenandoah.hpp"
  46 
  47 #include "runtime/vmThread.hpp"
  48 #include "services/mallocTracker.hpp"
  49 
  50 SCMUpdateRefsClosure::SCMUpdateRefsClosure() : _heap(ShenandoahHeap::heap()) {}
  51 
  52 #ifdef ASSERT
  53 template <class T>
  54 void AssertToSpaceClosure::do_oop_nv(T* p) {
  55   T o = oopDesc::load_heap_oop(p);
  56   if (! oopDesc::is_null(o)) {
  57     oop obj = oopDesc::decode_heap_oop_not_null(o);
  58     assert(oopDesc::unsafe_equals(obj, ShenandoahBarrierSet::resolve_oop_static_not_null(obj)),
  59            "need to-space object here obj: "PTR_FORMAT" , rb(obj): "PTR_FORMAT", p: "PTR_FORMAT,
  60            p2i(obj), p2i(ShenandoahBarrierSet::resolve_oop_static_not_null(obj)), p2i(p));
  61   }
  62 }
  63 
  64 void AssertToSpaceClosure::do_oop(narrowOop* p) { do_oop_nv(p); }
  65 void AssertToSpaceClosure::do_oop(oop* p)       { do_oop_nv(p); }
  66 #endif
  67 
  68 const char* ShenandoahHeap::name() const {
  69   return "Shenandoah";
  70 }
  71 
  72 void ShenandoahHeap::print_heap_locations(HeapWord* start, HeapWord* end) {
  73   HeapWord* cur = NULL;
  74   for (cur = start; cur < end; cur++) {
  75     tty->print_cr(PTR_FORMAT" : "PTR_FORMAT, p2i(cur), p2i(*((HeapWord**) cur)));
  76   }
  77 }
  78 
  79 class PrintHeapRegionsClosure : public
  80    ShenandoahHeapRegionClosure {
  81 private:
  82   outputStream* _st;
  83 public:
  84   PrintHeapRegionsClosure() : _st(tty) {}
  85   PrintHeapRegionsClosure(outputStream* st) : _st(st) {}
  86 
  87   bool doHeapRegion(ShenandoahHeapRegion* r) {
  88     r->print_on(_st);
  89     return false;
  90   }
  91 };
  92 
  93 class ShenandoahPretouchTask : public AbstractGangTask {
  94 private:
  95   ShenandoahHeapRegionSet* _regions;
  96   const size_t _bitmap_size;
  97   const size_t _page_size;
  98   char* _bitmap0_base;
  99   char* _bitmap1_base;
 100 public:
 101   ShenandoahPretouchTask(ShenandoahHeapRegionSet* regions,
 102                          char* bitmap0_base, char* bitmap1_base, size_t bitmap_size,
 103                          size_t page_size) :
 104     AbstractGangTask("Shenandoah PreTouch",
 105                      Universe::is_fully_initialized() ? GCId::current_raw() :
 106                                                         // During VM initialization there is
 107                                                         // no GC cycle that this task can be
 108                                                         // associated with.
 109                                                         GCId::undefined()),
 110     _bitmap0_base(bitmap0_base),
 111     _bitmap1_base(bitmap1_base),
 112     _regions(regions),
 113     _bitmap_size(bitmap_size),
 114     _page_size(page_size) {
 115     _regions->clear_current_index();
 116   };
 117 
 118   virtual void work(uint worker_id) {
 119     ShenandoahHeapRegion* r = _regions->claim_next();
 120     while (r != NULL) {
 121       log_trace(gc, heap)("Pretouch region " SIZE_FORMAT ": " PTR_FORMAT " -> " PTR_FORMAT,
 122                           r->region_number(), p2i(r->bottom()), p2i(r->end()));
 123       os::pretouch_memory(r->bottom(), r->end(), _page_size);
 124 
 125       size_t start = r->region_number()       * ShenandoahHeapRegion::RegionSizeBytes / CMBitMap::heap_map_factor();
 126       size_t end   = (r->region_number() + 1) * ShenandoahHeapRegion::RegionSizeBytes / CMBitMap::heap_map_factor();
 127       assert (end <= _bitmap_size, "end is sane: " SIZE_FORMAT " < " SIZE_FORMAT, end, _bitmap_size);
 128 
 129       log_trace(gc, heap)("Pretouch bitmap under region " SIZE_FORMAT ": " PTR_FORMAT " -> " PTR_FORMAT,
 130                           r->region_number(), p2i(_bitmap0_base + start), p2i(_bitmap0_base + end));
 131       os::pretouch_memory(_bitmap0_base + start, _bitmap0_base + end, _page_size);
 132 
 133       log_trace(gc, heap)("Pretouch bitmap under region " SIZE_FORMAT ": " PTR_FORMAT " -> " PTR_FORMAT,
 134                           r->region_number(), p2i(_bitmap1_base + start), p2i(_bitmap1_base + end));
 135       os::pretouch_memory(_bitmap1_base + start, _bitmap1_base + end, _page_size);
 136 
 137       r = _regions->claim_next();
 138     }
 139   }
 140 };
 141 
 142 jint ShenandoahHeap::initialize() {
 143   CollectedHeap::pre_initialize();
 144 
 145   size_t init_byte_size = collector_policy()->initial_heap_byte_size();
 146   size_t max_byte_size = collector_policy()->max_heap_byte_size();
 147 
 148   Universe::check_alignment(max_byte_size,
 149                             ShenandoahHeapRegion::RegionSizeBytes,
 150                             "shenandoah heap");
 151   Universe::check_alignment(init_byte_size,
 152                             ShenandoahHeapRegion::RegionSizeBytes,
 153                             "shenandoah heap");
 154 
 155   ReservedSpace heap_rs = Universe::reserve_heap(max_byte_size,
 156                                                  Arguments::conservative_max_heap_alignment());
 157   initialize_reserved_region((HeapWord*)heap_rs.base(), (HeapWord*) (heap_rs.base() + heap_rs.size()));
 158 
 159   set_barrier_set(new ShenandoahBarrierSet(this));
 160   ReservedSpace pgc_rs = heap_rs.first_part(max_byte_size);
 161   _storage.initialize(pgc_rs, init_byte_size);
 162 
 163   _num_regions = init_byte_size / ShenandoahHeapRegion::RegionSizeBytes;
 164   _max_regions = max_byte_size / ShenandoahHeapRegion::RegionSizeBytes;
 165   _initialSize = _num_regions * ShenandoahHeapRegion::RegionSizeBytes;
 166   size_t regionSizeWords = ShenandoahHeapRegion::RegionSizeBytes / HeapWordSize;
 167   assert(init_byte_size == _initialSize, "tautology");
 168   _ordered_regions = new ShenandoahHeapRegionSet(_max_regions);
 169   _collection_set = new ShenandoahCollectionSet(_max_regions);
 170   _free_regions = new ShenandoahFreeSet(_max_regions);
 171 
 172   // Initialize fast collection set test structure.
 173   _in_cset_fast_test_length = _max_regions;
 174   _in_cset_fast_test_base =
 175                    NEW_C_HEAP_ARRAY(bool, _in_cset_fast_test_length, mtGC);
 176   _in_cset_fast_test = _in_cset_fast_test_base -
 177                ((uintx) pgc_rs.base() >> ShenandoahHeapRegion::RegionSizeShift);
 178 
 179   _next_top_at_mark_starts_base =
 180                    NEW_C_HEAP_ARRAY(HeapWord*, _max_regions, mtGC);
 181   _next_top_at_mark_starts = _next_top_at_mark_starts_base -
 182                ((uintx) pgc_rs.base() >> ShenandoahHeapRegion::RegionSizeShift);
 183 
 184   _complete_top_at_mark_starts_base =
 185                    NEW_C_HEAP_ARRAY(HeapWord*, _max_regions, mtGC);
 186   _complete_top_at_mark_starts = _complete_top_at_mark_starts_base -
 187                ((uintx) pgc_rs.base() >> ShenandoahHeapRegion::RegionSizeShift);
 188 
 189   size_t i = 0;
 190   for (i = 0; i < _num_regions; i++) {
 191     _in_cset_fast_test_base[i] = false; // Not in cset
 192     HeapWord* bottom = (HeapWord*) pgc_rs.base() + regionSizeWords * i;
 193     _complete_top_at_mark_starts_base[i] = bottom;
 194     _next_top_at_mark_starts_base[i] = bottom;
 195   }
 196 
 197   {
 198     ShenandoahHeapLock lock(this);
 199     for (i = 0; i < _num_regions; i++) {
 200       ShenandoahHeapRegion* current = new ShenandoahHeapRegion(this, (HeapWord*) pgc_rs.base() +
 201                                                                regionSizeWords * i, regionSizeWords, i);
 202       _free_regions->add_region(current);
 203       _ordered_regions->add_region(current);
 204     }
 205   }
 206   assert(((size_t) _ordered_regions->active_regions()) == _num_regions, "");
 207   _first_region = _ordered_regions->get(0);
 208   _first_region_bottom = _first_region->bottom();
 209   assert((((size_t) _first_region_bottom) &
 210           (ShenandoahHeapRegion::RegionSizeBytes - 1)) == 0,
 211          "misaligned heap: "PTR_FORMAT, p2i(_first_region_bottom));
 212 
 213   _numAllocs = 0;
 214 
 215   if (log_is_enabled(Trace, gc, region)) {
 216     ResourceMark rm;
 217     outputStream* out = Log(gc, region)::trace_stream();
 218     log_trace(gc, region)("All Regions");
 219     _ordered_regions->print(out);
 220     log_trace(gc, region)("Free Regions");
 221     _free_regions->print(out);
 222   }
 223 
 224   // The call below uses stuff (the SATB* things) that are in G1, but probably
 225   // belong into a shared location.
 226   JavaThread::satb_mark_queue_set().initialize(SATB_Q_CBL_mon,
 227                                                SATB_Q_FL_lock,
 228                                                20 /*G1SATBProcessCompletedThreshold */,
 229                                                Shared_SATB_Q_lock);
 230 
 231   // Reserve space for prev and next bitmap.
 232   size_t bitmap_size = CMBitMap::compute_size(heap_rs.size());
 233   MemRegion heap_region = MemRegion((HeapWord*) heap_rs.base(), heap_rs.size() / HeapWordSize);
 234 
 235   size_t page_size = UseLargePages ? (size_t)os::large_page_size() : (size_t)os::vm_page_size();
 236 
 237   ReservedSpace bitmap0(bitmap_size, page_size);
 238   os::commit_memory_or_exit(bitmap0.base(), bitmap0.size(), false, "couldn't allocate mark bitmap");
 239   MemTracker::record_virtual_memory_type(bitmap0.base(), mtGC);
 240   MemRegion bitmap_region0 = MemRegion((HeapWord*) bitmap0.base(), bitmap0.size() / HeapWordSize);
 241 
 242   ReservedSpace bitmap1(bitmap_size, page_size);
 243   os::commit_memory_or_exit(bitmap1.base(), bitmap1.size(), false, "couldn't allocate mark bitmap");
 244   MemTracker::record_virtual_memory_type(bitmap1.base(), mtGC);
 245   MemRegion bitmap_region1 = MemRegion((HeapWord*) bitmap1.base(), bitmap1.size() / HeapWordSize);
 246 
 247   if (ShenandoahAlwaysPreTouch) {
 248     assert (!AlwaysPreTouch, "Should have been overridden");
 249 
 250     // For NUMA, it is important to pre-touch the storage under bitmaps with worker threads,
 251     // before initialize() below zeroes it with initializing thread. For any given region,
 252     // we touch the region and the corresponding bitmaps from the same thread.
 253 
 254     log_info(gc, heap)("Parallel pretouch " SIZE_FORMAT " regions with " SIZE_FORMAT " byte pages",
 255                        _ordered_regions->count(), page_size);
 256     ShenandoahPretouchTask cl(_ordered_regions, bitmap0.base(), bitmap1.base(), bitmap_size, page_size);
 257     _workers->run_task(&cl);
 258   }
 259 
 260   _mark_bit_map0.initialize(heap_region, bitmap_region0);
 261   _complete_mark_bit_map = &_mark_bit_map0;
 262 
 263   _mark_bit_map1.initialize(heap_region, bitmap_region1);
 264   _next_mark_bit_map = &_mark_bit_map1;
 265 
 266   _monitoring_support = new ShenandoahMonitoringSupport(this);
 267 
 268   _concurrent_gc_thread = new ShenandoahConcurrentThread();
 269 
 270   ShenandoahMarkCompact::initialize();
 271 
 272   return JNI_OK;
 273 }
 274 
 275 ShenandoahHeap::ShenandoahHeap(ShenandoahCollectorPolicy* policy) :
 276   CollectedHeap(),
 277   _shenandoah_policy(policy),
 278   _concurrent_mark_in_progress(0),
 279   _evacuation_in_progress(0),
 280   _full_gc_in_progress(false),
 281   _free_regions(NULL),
 282   _collection_set(NULL),
 283   _bytes_allocated_since_cm(0),
 284   _bytes_allocated_during_cm(0),
 285   _max_allocated_gc(0),
 286   _allocated_last_gc(0),
 287   _used_start_gc(0),
 288   _max_workers(MAX2(ConcGCThreads, ParallelGCThreads)),
 289   _ref_processor(NULL),
 290   _in_cset_fast_test(NULL),
 291   _in_cset_fast_test_base(NULL),
 292   _next_top_at_mark_starts(NULL),
 293   _next_top_at_mark_starts_base(NULL),
 294   _complete_top_at_mark_starts(NULL),
 295   _complete_top_at_mark_starts_base(NULL),
 296   _mark_bit_map0(),
 297   _mark_bit_map1(),
 298   _cancelled_concgc(false),
 299   _need_update_refs(false),
 300   _need_reset_bitmaps(false),
 301   _heap_lock(0),
 302 #ifdef ASSERT
 303   _heap_lock_owner(NULL),
 304 #endif
 305   _gc_timer(new (ResourceObj::C_HEAP, mtGC) ConcurrentGCTimer())
 306 
 307 {
 308   log_info(gc, init)("Parallel GC threads: "UINT32_FORMAT, ParallelGCThreads);
 309   log_info(gc, init)("Concurrent GC threads: "UINT32_FORMAT, ConcGCThreads);
 310   log_info(gc, init)("Parallel reference processing enabled: %s", BOOL_TO_STR(ParallelRefProcEnabled));
 311 
 312   _scm = new ShenandoahConcurrentMark();
 313   _used = 0;
 314 
 315   _max_workers = MAX2(_max_workers, 1U);
 316   _workers = new ShenandoahWorkGang("Shenandoah GC Threads", _max_workers,
 317                             /* are_GC_task_threads */true,
 318                             /* are_ConcurrentGC_threads */false);
 319   if (_workers == NULL) {
 320     vm_exit_during_initialization("Failed necessary allocation.");
 321   } else {
 322     _workers->initialize_workers();
 323   }
 324 }
 325 
 326 class ResetNextBitmapTask : public AbstractGangTask {
 327 private:
 328   ShenandoahHeapRegionSet* _regions;
 329 
 330 public:
 331   ResetNextBitmapTask(ShenandoahHeapRegionSet* regions) :
 332     AbstractGangTask("Parallel Reset Bitmap Task"),
 333     _regions(regions) {
 334     _regions->clear_current_index();
 335   }
 336 
 337   void work(uint worker_id) {
 338     ShenandoahHeapRegion* region = _regions->claim_next();
 339     ShenandoahHeap* heap = ShenandoahHeap::heap();
 340     while (region != NULL) {
 341       HeapWord* bottom = region->bottom();
 342       HeapWord* top = heap->next_top_at_mark_start(region->bottom());
 343       if (top > bottom) {
 344         heap->next_mark_bit_map()->clear_range_large(MemRegion(bottom, top));
 345       }
 346       region = _regions->claim_next();
 347     }
 348   }
 349 };
 350 
 351 void ShenandoahHeap::reset_next_mark_bitmap(WorkGang* workers) {
 352   ResetNextBitmapTask task = ResetNextBitmapTask(_ordered_regions);
 353   workers->run_task(&task);
 354 }
 355 
 356 class ResetCompleteBitmapTask : public AbstractGangTask {
 357 private:
 358   ShenandoahHeapRegionSet* _regions;
 359 
 360 public:
 361   ResetCompleteBitmapTask(ShenandoahHeapRegionSet* regions) :
 362     AbstractGangTask("Parallel Reset Bitmap Task"),
 363     _regions(regions) {
 364     _regions->clear_current_index();
 365   }
 366 
 367   void work(uint worker_id) {
 368     ShenandoahHeapRegion* region = _regions->claim_next();
 369     ShenandoahHeap* heap = ShenandoahHeap::heap();
 370     while (region != NULL) {
 371       HeapWord* bottom = region->bottom();
 372       HeapWord* top = heap->complete_top_at_mark_start(region->bottom());
 373       if (top > bottom) {
 374         heap->complete_mark_bit_map()->clear_range_large(MemRegion(bottom, top));
 375       }
 376       region = _regions->claim_next();
 377     }
 378   }
 379 };
 380 
 381 void ShenandoahHeap::reset_complete_mark_bitmap(WorkGang* workers) {
 382   ResetCompleteBitmapTask task = ResetCompleteBitmapTask(_ordered_regions);
 383   workers->run_task(&task);
 384 }
 385 
 386 bool ShenandoahHeap::is_next_bitmap_clear() {
 387   HeapWord* start = _ordered_regions->bottom();
 388   HeapWord* end = _ordered_regions->end();
 389   return _next_mark_bit_map->getNextMarkedWordAddress(start, end) == end;
 390 }
 391 
 392 bool ShenandoahHeap::is_complete_bitmap_clear_range(HeapWord* start, HeapWord* end) {
 393   return _complete_mark_bit_map->getNextMarkedWordAddress(start, end) == end;
 394 }
 395 
 396 void ShenandoahHeap::print_on(outputStream* st) const {
 397   st->print("Shenandoah Heap");
 398   st->print(" total = " SIZE_FORMAT " K, used " SIZE_FORMAT " K ", capacity()/ K, used() /K);
 399   st->print(" [" PTR_FORMAT ", " PTR_FORMAT ") ",
 400             p2i(reserved_region().start()),
 401             p2i(reserved_region().end()));
 402   st->print("Region size = " SIZE_FORMAT "K ", ShenandoahHeapRegion::RegionSizeBytes / K);
 403   if (_concurrent_mark_in_progress) {
 404     st->print("marking ");
 405   }
 406   if (_evacuation_in_progress) {
 407     st->print("evacuating ");
 408   }
 409   if (cancelled_concgc()) {
 410     st->print("cancelled ");
 411   }
 412   st->print("\n");
 413 
 414   // Adapted from VirtualSpace::print_on(), which is non-PRODUCT only
 415   st->print   ("Virtual space:");
 416   if (_storage.special()) st->print(" (pinned in memory)");
 417   st->cr();
 418   st->print_cr(" - committed: " SIZE_FORMAT, _storage.committed_size());
 419   st->print_cr(" - reserved:  " SIZE_FORMAT, _storage.reserved_size());
 420   st->print_cr(" - [low, high]:     [" INTPTR_FORMAT ", " INTPTR_FORMAT "]",  p2i(_storage.low()), p2i(_storage.high()));
 421   st->print_cr(" - [low_b, high_b]: [" INTPTR_FORMAT ", " INTPTR_FORMAT "]",  p2i(_storage.low_boundary()), p2i(_storage.high_boundary()));
 422 
 423   if (Verbose) {
 424     print_heap_regions(st);
 425   }
 426 }
 427 
 428 class InitGCLABClosure : public ThreadClosure {
 429 public:
 430   void do_thread(Thread* thread) {
 431     thread->gclab().initialize(true);
 432   }
 433 };
 434 
 435 void ShenandoahHeap::post_initialize() {
 436   if (UseTLAB) {
 437     // This is a very tricky point in VM lifetime. We cannot easily call Threads::threads_do
 438     // here, because some system threads (VMThread, WatcherThread, etc) are not yet available.
 439     // Their initialization should be handled separately. Is we miss some threads here,
 440     // then any other TLAB-related activity would fail with asserts.
 441 
 442     InitGCLABClosure init_gclabs;
 443     {
 444       MutexLocker ml(Threads_lock);
 445       for (JavaThread *thread = Threads::first(); thread != NULL; thread = thread->next()) {
 446         init_gclabs.do_thread(thread);
 447       }
 448     }
 449     gc_threads_do(&init_gclabs);
 450 
 451     // gclab can not be initialized early during VM startup, as it can not determinate its max_size.
 452     // Now, we will let WorkGang to initialize gclab when new worker is created.
 453     _workers->set_initialize_gclab();
 454   }
 455 
 456   _scm->initialize(_max_workers);
 457 
 458   ref_processing_init();
 459 }
 460 
 461 class CalculateUsedRegionClosure : public ShenandoahHeapRegionClosure {
 462   size_t sum;
 463 public:
 464 
 465   CalculateUsedRegionClosure() {
 466     sum = 0;
 467   }
 468 
 469   bool doHeapRegion(ShenandoahHeapRegion* r) {
 470     sum = sum + r->used();
 471     return false;
 472   }
 473 
 474   size_t getResult() { return sum;}
 475 };
 476 
 477 size_t ShenandoahHeap::calculateUsed() {
 478   CalculateUsedRegionClosure cl;
 479   heap_region_iterate(&cl);
 480   return cl.getResult();
 481 }
 482 
 483 void ShenandoahHeap::verify_heap_size_consistency() {
 484 
 485   assert(calculateUsed() == used(),
 486          "heap used size must be consistent heap-used: "SIZE_FORMAT" regions-used: "SIZE_FORMAT, used(), calculateUsed());
 487 }
 488 
 489 size_t ShenandoahHeap::used() const {
 490   OrderAccess::acquire();
 491   return _used;
 492 }
 493 
 494 void ShenandoahHeap::increase_used(size_t bytes) {
 495   assert_heaplock_or_safepoint();
 496   _used += bytes;
 497 }
 498 
 499 void ShenandoahHeap::set_used(size_t bytes) {
 500   assert_heaplock_or_safepoint();
 501   _used = bytes;
 502 }
 503 
 504 void ShenandoahHeap::decrease_used(size_t bytes) {
 505   assert_heaplock_or_safepoint();
 506   assert(_used >= bytes, "never decrease heap size by more than we've left");
 507   _used -= bytes;
 508 }
 509 
 510 size_t ShenandoahHeap::capacity() const {
 511   return _num_regions * ShenandoahHeapRegion::RegionSizeBytes;
 512 }
 513 
 514 bool ShenandoahHeap::is_maximal_no_gc() const {
 515   Unimplemented();
 516   return true;
 517 }
 518 
 519 size_t ShenandoahHeap::max_capacity() const {
 520   return _max_regions * ShenandoahHeapRegion::RegionSizeBytes;
 521 }
 522 
 523 size_t ShenandoahHeap::min_capacity() const {
 524   return _initialSize;
 525 }
 526 
 527 VirtualSpace* ShenandoahHeap::storage() const {
 528   return (VirtualSpace*) &_storage;
 529 }
 530 
 531 bool ShenandoahHeap::is_in(const void* p) const {
 532   HeapWord* first_region_bottom = _first_region->bottom();
 533   HeapWord* last_region_end = first_region_bottom + (ShenandoahHeapRegion::RegionSizeBytes / HeapWordSize) * _num_regions;
 534   return p >= _first_region_bottom && p < last_region_end;
 535 }
 536 
 537 bool ShenandoahHeap::is_scavengable(const void* p) {
 538   return true;
 539 }
 540 
 541 HeapWord* ShenandoahHeap::allocate_from_gclab_slow(Thread* thread, size_t size) {
 542   // Retain tlab and allocate object in shared space if
 543   // the amount free in the tlab is too large to discard.
 544   if (thread->gclab().free() > thread->gclab().refill_waste_limit()) {
 545     thread->gclab().record_slow_allocation(size);
 546     return NULL;
 547   }
 548 
 549   // Discard gclab and allocate a new one.
 550   // To minimize fragmentation, the last GCLAB may be smaller than the rest.
 551   size_t new_gclab_size = thread->gclab().compute_size(size);
 552 
 553   thread->gclab().clear_before_allocation();
 554 
 555   if (new_gclab_size == 0) {
 556     return NULL;
 557   }
 558 
 559   // Allocate a new GCLAB...
 560   HeapWord* obj = allocate_new_gclab(new_gclab_size);
 561   if (obj == NULL) {
 562     return NULL;
 563   }
 564 
 565   if (ZeroTLAB) {
 566     // ..and clear it.
 567     Copy::zero_to_words(obj, new_gclab_size);
 568   } else {
 569     // ...and zap just allocated object.
 570 #ifdef ASSERT
 571     // Skip mangling the space corresponding to the object header to
 572     // ensure that the returned space is not considered parsable by
 573     // any concurrent GC thread.
 574     size_t hdr_size = oopDesc::header_size();
 575     Copy::fill_to_words(obj + hdr_size, new_gclab_size - hdr_size, badHeapWordVal);
 576 #endif // ASSERT
 577   }
 578   thread->gclab().fill(obj, obj + size, new_gclab_size);
 579   return obj;
 580 }
 581 
 582 HeapWord* ShenandoahHeap::allocate_new_tlab(size_t word_size) {
 583   return allocate_new_tlab(word_size, false);
 584 }
 585 
 586 HeapWord* ShenandoahHeap::allocate_new_gclab(size_t word_size) {
 587   return allocate_new_tlab(word_size, true);
 588 }
 589 
 590 HeapWord* ShenandoahHeap::allocate_new_tlab(size_t word_size, bool evacuating) {
 591   HeapWord* result = allocate_memory(word_size, evacuating);
 592 
 593   if (result != NULL) {
 594     assert(! in_collection_set(result), "Never allocate in dirty region");
 595     _bytes_allocated_since_cm += word_size * HeapWordSize;
 596 
 597     log_develop_trace(gc, tlab)("allocating new tlab of size "SIZE_FORMAT" at addr "PTR_FORMAT, word_size, p2i(result));
 598 
 599   }
 600   return result;
 601 }
 602 
 603 ShenandoahHeap* ShenandoahHeap::heap() {
 604   CollectedHeap* heap = Universe::heap();
 605   assert(heap != NULL, "Unitialized access to ShenandoahHeap::heap()");
 606   assert(heap->kind() == CollectedHeap::ShenandoahHeap, "not a shenandoah heap");
 607   return (ShenandoahHeap*) heap;
 608 }
 609 
 610 ShenandoahHeap* ShenandoahHeap::heap_no_check() {
 611   CollectedHeap* heap = Universe::heap();
 612   return (ShenandoahHeap*) heap;
 613 }
 614 
 615 HeapWord* ShenandoahHeap::allocate_memory_work(size_t word_size) {
 616 
 617   ShenandoahHeapLock heap_lock(this);
 618 
 619   HeapWord* result = allocate_memory_under_lock(word_size);
 620   int grow_by = (word_size * HeapWordSize + ShenandoahHeapRegion::RegionSizeBytes - 1) / ShenandoahHeapRegion::RegionSizeBytes;
 621 
 622   while (result == NULL && _num_regions + grow_by <= _max_regions) {
 623     grow_heap_by(grow_by);
 624     result = allocate_memory_under_lock(word_size);
 625   }
 626 
 627   return result;
 628 }
 629 
 630 HeapWord* ShenandoahHeap::allocate_memory(size_t word_size, bool evacuating) {
 631   HeapWord* result = NULL;
 632   result = allocate_memory_work(word_size);
 633 
 634   if (!evacuating) {
 635     // Allocation failed, try full-GC, then retry allocation.
 636     //
 637     // It might happen that one of the threads requesting allocation would unblock
 638     // way later after full-GC happened, only to fail the second allocation, because
 639     // other threads have already depleted the free storage. In this case, a better
 640     // strategy would be to try full-GC again.
 641     //
 642     // Lacking the way to detect progress from "collect" call, we are left with blindly
 643     // retrying for some bounded number of times.
 644     // TODO: Poll if Full GC made enough progress to warrant retry.
 645     int tries = 0;
 646     while ((result == NULL) && (tries++ < ShenandoahFullGCTries)) {
 647       log_debug(gc)("[" PTR_FORMAT " Failed to allocate " SIZE_FORMAT " bytes, doing full GC, try %d",
 648                     p2i(Thread::current()), word_size * HeapWordSize, tries);
 649       collect(GCCause::_allocation_failure);
 650       result = allocate_memory_work(word_size);
 651     }
 652   }
 653 
 654   // Only update monitoring counters when not calling from a write-barrier.
 655   // Otherwise we might attempt to grab the Service_lock, which we must
 656   // not do when coming from a write-barrier (because the thread might
 657   // already hold the Compile_lock).
 658   if (! evacuating) {
 659     monitoring_support()->update_counters();
 660   }
 661 
 662   log_develop_trace(gc, alloc)("allocate memory chunk of size "SIZE_FORMAT" at addr "PTR_FORMAT " by thread %d ",
 663                                word_size, p2i(result), Thread::current()->osthread()->thread_id());
 664 
 665   return result;
 666 }
 667 
 668 bool ShenandoahHeap::call_from_write_barrier(bool evacuating) {
 669   return evacuating && Thread::current()->is_Java_thread();
 670 }
 671 
 672 HeapWord* ShenandoahHeap::allocate_memory_under_lock(size_t word_size) {
 673   assert_heaplock_owned_by_current_thread();
 674 
 675   if (word_size * HeapWordSize > ShenandoahHeapRegion::RegionSizeBytes) {
 676     return allocate_large_memory(word_size);
 677   }
 678 
 679   // Not enough memory in free region set.
 680   // Coming out of full GC, it is possible that there is not
 681   // free region available, so current_index may not be valid.
 682   if (word_size * HeapWordSize > _free_regions->capacity()) return NULL;
 683 
 684   ShenandoahHeapRegion* my_current_region = _free_regions->current_no_humongous();
 685 
 686   if (my_current_region == NULL) {
 687     return NULL; // No more room to make a new region. OOM.
 688   }
 689   assert(my_current_region != NULL, "should have a region at this point");
 690 
 691 #ifdef ASSERT
 692   if (in_collection_set(my_current_region)) {
 693     print_heap_regions();
 694   }
 695 #endif
 696   assert(! in_collection_set(my_current_region), "never get targetted regions in free-lists");
 697   assert(! my_current_region->is_humongous(), "never attempt to allocate from humongous object regions");
 698 
 699   HeapWord* result = my_current_region->allocate(word_size);
 700 
 701   while (result == NULL) {
 702     // 2nd attempt. Try next region.
 703     _free_regions->increase_used(my_current_region->free());
 704     ShenandoahHeapRegion* next_region = _free_regions->next_no_humongous();
 705     assert(next_region != my_current_region, "must not get current again");
 706     my_current_region = next_region;
 707 
 708     if (my_current_region == NULL) {
 709       return NULL; // No more room to make a new region. OOM.
 710     }
 711     assert(my_current_region != NULL, "should have a region at this point");
 712     assert(! in_collection_set(my_current_region), "never get targetted regions in free-lists");
 713     assert(! my_current_region->is_humongous(), "never attempt to allocate from humongous object regions");
 714     result = my_current_region->allocate(word_size);
 715   }
 716 
 717   my_current_region->increase_live_data_words(word_size);
 718   increase_used(word_size * HeapWordSize);
 719   _free_regions->increase_used(word_size * HeapWordSize);
 720   return result;
 721 }
 722 
 723 HeapWord* ShenandoahHeap::allocate_large_memory(size_t words) {
 724   assert_heaplock_owned_by_current_thread();
 725 
 726   uint required_regions = ShenandoahHumongous::required_regions(words * HeapWordSize);
 727   if (required_regions > _max_regions) return NULL;
 728 
 729   ShenandoahHeapRegion* r = _free_regions->allocate_contiguous(required_regions);
 730 
 731   HeapWord* result = NULL;
 732 
 733   if (r != NULL)  {
 734     result = r->bottom();
 735 
 736     log_debug(gc, humongous)("allocating humongous object of size: "SIZE_FORMAT" KB at location "PTR_FORMAT" in start region "SIZE_FORMAT,
 737                              (words * HeapWordSize) / K, p2i(result), r->region_number());
 738   } else {
 739     log_debug(gc, humongous)("allocating humongous object of size: "SIZE_FORMAT" KB at location "PTR_FORMAT" failed",
 740                              (words * HeapWordSize) / K, p2i(result));
 741   }
 742 
 743 
 744   return result;
 745 
 746 }
 747 
 748 HeapWord*  ShenandoahHeap::mem_allocate(size_t size,
 749                                         bool*  gc_overhead_limit_was_exceeded) {
 750 
 751 #ifdef ASSERT
 752   if (ShenandoahVerify && _numAllocs > 1000000) {
 753     _numAllocs = 0;
 754   }
 755   _numAllocs++;
 756 #endif
 757   HeapWord* filler = allocate_memory(BrooksPointer::word_size() + size, false);
 758   HeapWord* result = filler + BrooksPointer::word_size();
 759   if (filler != NULL) {
 760     BrooksPointer::initialize(oop(result));
 761     _bytes_allocated_since_cm += size * HeapWordSize;
 762 
 763     assert(! in_collection_set(result), "never allocate in targetted region");
 764     return result;
 765   } else {
 766     /*
 767     tty->print_cr("Out of memory. Requested number of words: "SIZE_FORMAT" used heap: "INT64_FORMAT", bytes allocated since last CM: "INT64_FORMAT,
 768                   size, used(), _bytes_allocated_since_cm);
 769     {
 770       print_heap_regions();
 771       tty->print("Printing "SIZE_FORMAT" free regions:\n", _free_regions->count());
 772       _free_regions->print();
 773     }
 774     */
 775     return NULL;
 776   }
 777 }
 778 
 779 class ParallelEvacuateRegionObjectClosure : public ObjectClosure {
 780 private:
 781   ShenandoahHeap* _heap;
 782   Thread* _thread;
 783   public:
 784   ParallelEvacuateRegionObjectClosure(ShenandoahHeap* heap) :
 785     _heap(heap), _thread(Thread::current()) {
 786   }
 787 
 788   void do_object(oop p) {
 789 
 790     log_develop_trace(gc, compaction)("Calling ParallelEvacuateRegionObjectClosure on "PTR_FORMAT" of size %d\n", p2i((HeapWord*) p), p->size());
 791 
 792     assert(_heap->is_marked_complete(p), "expect only marked objects");
 793     if (oopDesc::unsafe_equals(p, ShenandoahBarrierSet::resolve_oop_static_not_null(p))) {
 794       _heap->evacuate_object(p, _thread);
 795     }
 796   }
 797 };
 798 
 799 #ifdef ASSERT
 800 class VerifyEvacuatedObjectClosure : public ObjectClosure {
 801 
 802 public:
 803 
 804   void do_object(oop p) {
 805     if (ShenandoahHeap::heap()->is_marked_complete(p)) {
 806       oop p_prime = oopDesc::bs()->read_barrier(p);
 807       assert(! oopDesc::unsafe_equals(p, p_prime), "Should point to evacuated copy");
 808       if (p->klass() != p_prime->klass()) {
 809         tty->print_cr("copy has different class than original:");
 810         p->klass()->print_on(tty);
 811         p_prime->klass()->print_on(tty);
 812       }
 813       assert(p->klass() == p_prime->klass(), "Should have the same class p: "PTR_FORMAT", p_prime: "PTR_FORMAT, p2i(p), p2i(p_prime));
 814       //      assert(p->mark() == p_prime->mark(), "Should have the same mark");
 815       assert(p->size() == p_prime->size(), "Should be the same size");
 816       assert(oopDesc::unsafe_equals(p_prime, oopDesc::bs()->read_barrier(p_prime)), "One forward once");
 817     }
 818   }
 819 };
 820 
 821 void ShenandoahHeap::verify_evacuated_region(ShenandoahHeapRegion* from_region) {
 822   VerifyEvacuatedObjectClosure verify_evacuation;
 823   marked_object_iterate(from_region, &verify_evacuation);
 824 }
 825 #endif
 826 
 827 void ShenandoahHeap::parallel_evacuate_region(ShenandoahHeapRegion* from_region) {
 828 
 829   assert(from_region->has_live(), "all-garbage regions are reclaimed earlier");
 830 
 831   ParallelEvacuateRegionObjectClosure evacuate_region(this);
 832 
 833   marked_object_iterate(from_region, &evacuate_region);
 834 
 835 #ifdef ASSERT
 836   if (ShenandoahVerify && ! cancelled_concgc()) {
 837     verify_evacuated_region(from_region);
 838   }
 839 #endif
 840 }
 841 
 842 class ParallelEvacuationTask : public AbstractGangTask {
 843 private:
 844   ShenandoahHeap* _sh;
 845   ShenandoahCollectionSet* _cs;
 846 
 847 public:
 848   ParallelEvacuationTask(ShenandoahHeap* sh,
 849                          ShenandoahCollectionSet* cs) :
 850     AbstractGangTask("Parallel Evacuation Task"),
 851     _cs(cs),
 852     _sh(sh) {}
 853 
 854   void work(uint worker_id) {
 855 
 856     ShenandoahHeapRegion* from_hr = _cs->claim_next();
 857 
 858     while (from_hr != NULL) {
 859       log_develop_trace(gc, region)("Thread "INT32_FORMAT" claimed Heap Region "SIZE_FORMAT,
 860                                     worker_id,
 861                                     from_hr->region_number());
 862 
 863       assert(from_hr->has_live(), "all-garbage regions are reclaimed early");
 864       _sh->parallel_evacuate_region(from_hr);
 865 
 866       if (_sh->cancelled_concgc()) {
 867         log_develop_trace(gc, region)("Cancelled concgc while evacuating region " SIZE_FORMAT "\n", from_hr->region_number());
 868         break;
 869       }
 870       from_hr = _cs->claim_next();
 871     }
 872   }
 873 };
 874 
 875 class RecycleDirtyRegionsClosure: public ShenandoahHeapRegionClosure {
 876 private:
 877   ShenandoahHeap* _heap;
 878   size_t _bytes_reclaimed;
 879 public:
 880   RecycleDirtyRegionsClosure() : _heap(ShenandoahHeap::heap()) {}
 881 
 882   bool doHeapRegion(ShenandoahHeapRegion* r) {
 883 
 884     assert (! _heap->cancelled_concgc(), "no recycling after cancelled marking");
 885 
 886     if (_heap->in_collection_set(r)) {
 887       log_develop_trace(gc, region)("Recycling region " SIZE_FORMAT ":", r->region_number());
 888       _heap->decrease_used(r->used());
 889       _bytes_reclaimed += r->used();
 890       r->recycle();
 891     }
 892 
 893     return false;
 894   }
 895   size_t bytes_reclaimed() { return _bytes_reclaimed;}
 896   void clear_bytes_reclaimed() {_bytes_reclaimed = 0;}
 897 };
 898 
 899 void ShenandoahHeap::recycle_dirty_regions() {
 900   RecycleDirtyRegionsClosure cl;
 901   cl.clear_bytes_reclaimed();
 902 
 903   heap_region_iterate(&cl);
 904 
 905   _shenandoah_policy->record_bytes_reclaimed(cl.bytes_reclaimed());
 906   if (! cancelled_concgc()) {
 907     clear_cset_fast_test();
 908   }
 909 }
 910 
 911 ShenandoahFreeSet* ShenandoahHeap::free_regions() {
 912   return _free_regions;
 913 }
 914 
 915 void ShenandoahHeap::print_heap_regions(outputStream* st) const {
 916   _ordered_regions->print(st);
 917 }
 918 
 919 class PrintAllRefsOopClosure: public ExtendedOopClosure {
 920 private:
 921   int _index;
 922   const char* _prefix;
 923 
 924 public:
 925   PrintAllRefsOopClosure(const char* prefix) : _index(0), _prefix(prefix) {}
 926 
 927 private:
 928   template <class T>
 929   inline void do_oop_work(T* p) {
 930     oop o = oopDesc::load_decode_heap_oop(p);
 931     if (o != NULL) {
 932       if (ShenandoahHeap::heap()->is_in(o) && o->is_oop()) {
 933         tty->print_cr("%s "INT32_FORMAT" ("PTR_FORMAT")-> "PTR_FORMAT" (marked: %s) (%s "PTR_FORMAT")",
 934                       _prefix, _index,
 935                       p2i(p), p2i(o),
 936                       BOOL_TO_STR(ShenandoahHeap::heap()->is_marked_complete(o)),
 937                       o->klass()->internal_name(), p2i(o->klass()));
 938       } else {
 939         tty->print_cr("%s "INT32_FORMAT" ("PTR_FORMAT" dirty -> "PTR_FORMAT" (not in heap, possibly corrupted or dirty)",
 940                       _prefix, _index,
 941                       p2i(p), p2i(o));
 942       }
 943     } else {
 944       tty->print_cr("%s "INT32_FORMAT" ("PTR_FORMAT") -> "PTR_FORMAT, _prefix, _index, p2i(p), p2i((HeapWord*) o));
 945     }
 946     _index++;
 947   }
 948 
 949 public:
 950   void do_oop(oop* p) {
 951     do_oop_work(p);
 952   }
 953 
 954   void do_oop(narrowOop* p) {
 955     do_oop_work(p);
 956   }
 957 
 958 };
 959 
 960 class PrintAllRefsObjectClosure : public ObjectClosure {
 961   const char* _prefix;
 962 
 963 public:
 964   PrintAllRefsObjectClosure(const char* prefix) : _prefix(prefix) {}
 965 
 966   void do_object(oop p) {
 967     if (ShenandoahHeap::heap()->is_in(p)) {
 968         tty->print_cr("%s object "PTR_FORMAT" (marked: %s) (%s "PTR_FORMAT") refers to:",
 969                       _prefix, p2i(p),
 970                       BOOL_TO_STR(ShenandoahHeap::heap()->is_marked_complete(p)),
 971                       p->klass()->internal_name(), p2i(p->klass()));
 972         PrintAllRefsOopClosure cl(_prefix);
 973         p->oop_iterate(&cl);
 974       }
 975   }
 976 };
 977 
 978 void ShenandoahHeap::print_all_refs(const char* prefix) {
 979   tty->print_cr("printing all references in the heap");
 980   tty->print_cr("root references:");
 981 
 982   ensure_parsability(false);
 983 
 984   PrintAllRefsOopClosure cl(prefix);
 985   roots_iterate(&cl);
 986 
 987   tty->print_cr("heap references:");
 988   PrintAllRefsObjectClosure cl2(prefix);
 989   object_iterate(&cl2);
 990 }
 991 
 992 class VerifyAfterMarkingOopClosure: public ExtendedOopClosure {
 993 private:
 994   ShenandoahHeap*  _heap;
 995 
 996 public:
 997   VerifyAfterMarkingOopClosure() :
 998     _heap(ShenandoahHeap::heap()) { }
 999 
1000 private:
1001   template <class T>
1002   inline void do_oop_work(T* p) {
1003     oop o = oopDesc::load_decode_heap_oop(p);
1004     if (o != NULL) {
1005       if (! _heap->is_marked_complete(o)) {
1006         _heap->print_heap_regions();
1007         _heap->print_all_refs("post-mark");
1008         tty->print_cr("oop not marked, although referrer is marked: "PTR_FORMAT": in_heap: %s, is_marked: %s",
1009                       p2i((HeapWord*) o), BOOL_TO_STR(_heap->is_in(o)), BOOL_TO_STR(_heap->is_marked_complete(o)));
1010         _heap->print_heap_locations((HeapWord*) o, (HeapWord*) o + o->size());
1011 
1012         tty->print_cr("oop class: %s", o->klass()->internal_name());
1013         if (_heap->is_in(p)) {
1014           oop referrer = oop(_heap->heap_region_containing(p)->block_start_const(p));
1015           tty->print_cr("Referrer starts at addr "PTR_FORMAT, p2i((HeapWord*) referrer));
1016           referrer->print();
1017           _heap->print_heap_locations((HeapWord*) referrer, (HeapWord*) referrer + referrer->size());
1018         }
1019         tty->print_cr("heap region containing object:");
1020         _heap->heap_region_containing(o)->print();
1021         tty->print_cr("heap region containing referrer:");
1022         _heap->heap_region_containing(p)->print();
1023         tty->print_cr("heap region containing forwardee:");
1024         _heap->heap_region_containing(oopDesc::bs()->read_barrier(o))->print();
1025       }
1026       assert(o->is_oop(), "oop must be an oop");
1027       assert(Metaspace::contains(o->klass()), "klass pointer must go to metaspace");
1028       if (! oopDesc::unsafe_equals(o, oopDesc::bs()->read_barrier(o))) {
1029         tty->print_cr("oops has forwardee: p: "PTR_FORMAT" (%s), o = "PTR_FORMAT" (%s), new-o: "PTR_FORMAT" (%s)",
1030                       p2i(p),
1031                       BOOL_TO_STR(_heap->in_collection_set(p)),
1032                       p2i(o),
1033                       BOOL_TO_STR(_heap->in_collection_set(o)),
1034                       p2i((HeapWord*) oopDesc::bs()->read_barrier(o)),
1035                       BOOL_TO_STR(_heap->in_collection_set(oopDesc::bs()->read_barrier(o))));
1036         tty->print_cr("oop class: %s", o->klass()->internal_name());
1037       }
1038       assert(oopDesc::unsafe_equals(o, oopDesc::bs()->read_barrier(o)), "oops must not be forwarded");
1039       assert(! _heap->in_collection_set(o), "references must not point to dirty heap regions");
1040       assert(_heap->is_marked_complete(o), "live oops must be marked current");
1041     }
1042   }
1043 
1044 public:
1045   void do_oop(oop* p) {
1046     do_oop_work(p);
1047   }
1048 
1049   void do_oop(narrowOop* p) {
1050     do_oop_work(p);
1051   }
1052 
1053 };
1054 
1055 void ShenandoahHeap::verify_heap_after_marking() {
1056 
1057   verify_heap_size_consistency();
1058 
1059   log_trace(gc)("verifying heap after marking");
1060 
1061   VerifyAfterMarkingOopClosure cl;
1062   roots_iterate(&cl);
1063   ObjectToOopClosure objs(&cl);
1064   object_iterate(&objs);
1065 }
1066 
1067 
1068 void ShenandoahHeap::reclaim_humongous_region_at(ShenandoahHeapRegion* r) {
1069   assert(r->is_humongous_start(), "reclaim regions starting with the first one");
1070 
1071   oop humongous_obj = oop(r->bottom() + BrooksPointer::word_size());
1072   size_t size = humongous_obj->size() + BrooksPointer::word_size();
1073   uint required_regions = ShenandoahHumongous::required_regions(size * HeapWordSize);
1074   uint index = r->region_number();
1075 
1076 
1077   assert(!r->has_live(), "liveness must be zero");
1078 
1079   for(size_t i = 0; i < required_regions; i++) {
1080 
1081     ShenandoahHeapRegion* region = _ordered_regions->get(index++);
1082 
1083     assert((region->is_humongous_start() || region->is_humongous_continuation()),
1084            "expect correct humongous start or continuation");
1085 
1086     if (log_is_enabled(Debug, gc, humongous)) {
1087       log_debug(gc, humongous)("reclaiming "UINT32_FORMAT" humongous regions for object of size: "SIZE_FORMAT" words", required_regions, size);
1088       ResourceMark rm;
1089       outputStream* out = Log(gc, humongous)::debug_stream();
1090       region->print_on(out);
1091     }
1092 
1093     region->recycle();
1094     ShenandoahHeap::heap()->decrease_used(ShenandoahHeapRegion::RegionSizeBytes);
1095   }
1096 }
1097 
1098 class ShenandoahReclaimHumongousRegionsClosure : public ShenandoahHeapRegionClosure {
1099 
1100   bool doHeapRegion(ShenandoahHeapRegion* r) {
1101     ShenandoahHeap* heap = ShenandoahHeap::heap();
1102 
1103     if (r->is_humongous_start()) {
1104       oop humongous_obj = oop(r->bottom() + BrooksPointer::word_size());
1105       if (! heap->is_marked_complete(humongous_obj)) {
1106 
1107         heap->reclaim_humongous_region_at(r);
1108       }
1109     }
1110     return false;
1111   }
1112 };
1113 
1114 #ifdef ASSERT
1115 class CheckCollectionSetClosure: public ShenandoahHeapRegionClosure {
1116   bool doHeapRegion(ShenandoahHeapRegion* r) {
1117     assert(! ShenandoahHeap::heap()->in_collection_set(r), "Should have been cleared by now");
1118     return false;
1119   }
1120 };
1121 #endif
1122 
1123 void ShenandoahHeap::prepare_for_concurrent_evacuation() {
1124   assert(_ordered_regions->get(0)->region_number() == 0, "FIXME CHF. FIXME CHF!");
1125 
1126   log_develop_trace(gc)("Thread %d started prepare_for_concurrent_evacuation", Thread::current()->osthread()->thread_id());
1127 
1128   if (!cancelled_concgc()) {
1129 
1130     recycle_dirty_regions();
1131 
1132     ensure_parsability(true);
1133 
1134 #ifdef ASSERT
1135     if (ShenandoahVerify) {
1136       verify_heap_after_marking();
1137     }
1138 #endif
1139 
1140     // NOTE: This needs to be done during a stop the world pause, because
1141     // putting regions into the collection set concurrently with Java threads
1142     // will create a race. In particular, acmp could fail because when we
1143     // resolve the first operand, the containing region might not yet be in
1144     // the collection set, and thus return the original oop. When the 2nd
1145     // operand gets resolved, the region could be in the collection set
1146     // and the oop gets evacuated. If both operands have originally been
1147     // the same, we get false negatives.
1148 
1149     {
1150       ShenandoahHeapLock lock(this);
1151       _collection_set->clear();
1152       _free_regions->clear();
1153 
1154       ShenandoahReclaimHumongousRegionsClosure reclaim;
1155       heap_region_iterate(&reclaim);
1156 
1157 #ifdef ASSERT
1158       CheckCollectionSetClosure ccsc;
1159       _ordered_regions->heap_region_iterate(&ccsc);
1160 #endif
1161 
1162     if (UseShenandoahMatrix) {
1163       int num = num_regions();
1164       int *connections = NEW_C_HEAP_ARRAY(int, num * num, mtGC);
1165       calculate_matrix(connections);
1166       print_matrix(connections);
1167       _shenandoah_policy->choose_collection_set(_collection_set, connections);
1168       FREE_C_HEAP_ARRAY(int,connections);
1169     } else {
1170       _shenandoah_policy->choose_collection_set(_collection_set);
1171     }
1172 
1173     _shenandoah_policy->choose_free_set(_free_regions);
1174     }
1175 
1176     if (UseShenandoahMatrix) {
1177       _collection_set->print();
1178     }
1179 
1180     _bytes_allocated_since_cm = 0;
1181 
1182     Universe::update_heap_info_at_gc();
1183   }
1184 }
1185 
1186 
1187 class RetireTLABClosure : public ThreadClosure {
1188 private:
1189   bool _retire;
1190 
1191 public:
1192   RetireTLABClosure(bool retire) : _retire(retire) {
1193   }
1194 
1195   void do_thread(Thread* thread) {
1196     thread->gclab().make_parsable(_retire);
1197   }
1198 };
1199 
1200 void ShenandoahHeap::ensure_parsability(bool retire_tlabs) {
1201   if (UseTLAB) {
1202     CollectedHeap::ensure_parsability(retire_tlabs);
1203     RetireTLABClosure cl(retire_tlabs);
1204     Threads::threads_do(&cl);
1205   }
1206 }
1207 
1208 class ShenandoahEvacuateUpdateRootsClosure: public ExtendedOopClosure {
1209 private:
1210   ShenandoahHeap* _heap;
1211   Thread* _thread;
1212 public:
1213   ShenandoahEvacuateUpdateRootsClosure() :
1214     _heap(ShenandoahHeap::heap()), _thread(Thread::current()) {
1215   }
1216 
1217 private:
1218   template <class T>
1219   void do_oop_work(T* p) {
1220     assert(_heap->is_evacuation_in_progress(), "Only do this when evacuation is in progress");
1221 
1222     T o = oopDesc::load_heap_oop(p);
1223     if (! oopDesc::is_null(o)) {
1224       oop obj = oopDesc::decode_heap_oop_not_null(o);
1225       if (_heap->in_collection_set(obj)) {
1226         assert(_heap->is_marked_complete(obj), "only evacuate marked objects %d %d",
1227                _heap->is_marked_complete(obj), _heap->is_marked_complete(ShenandoahBarrierSet::resolve_oop_static_not_null(obj)));
1228         oop resolved = ShenandoahBarrierSet::resolve_oop_static_not_null(obj);
1229         if (oopDesc::unsafe_equals(resolved, obj)) {
1230           resolved = _heap->evacuate_object(obj, _thread);
1231         }
1232         oopDesc::encode_store_heap_oop(p, resolved);
1233       }
1234     }
1235 #ifdef ASSERT
1236     else {
1237       // tty->print_cr("not updating root at: "PTR_FORMAT" with object: "PTR_FORMAT", is_in_heap: %s, is_in_cset: %s, is_marked: %s",
1238       //               p2i(p),
1239       //               p2i((HeapWord*) obj),
1240       //               BOOL_TO_STR(_heap->is_in(obj)),
1241       //               BOOL_TO_STR(_heap->in_cset_fast_test(obj)),
1242       //               BOOL_TO_STR(_heap->is_marked_complete(obj)));
1243     }
1244 #endif
1245   }
1246 
1247 public:
1248   void do_oop(oop* p) {
1249     do_oop_work(p);
1250   }
1251   void do_oop(narrowOop* p) {
1252     do_oop_work(p);
1253   }
1254 };
1255 
1256 class ShenandoahEvacuateUpdateRootsTask : public AbstractGangTask {
1257   ShenandoahRootEvacuator* _rp;
1258 public:
1259 
1260   ShenandoahEvacuateUpdateRootsTask(ShenandoahRootEvacuator* rp) :
1261     AbstractGangTask("Shenandoah evacuate and update roots"),
1262     _rp(rp)
1263   {
1264     // Nothing else to do.
1265   }
1266 
1267   void work(uint worker_id) {
1268     ShenandoahEvacuateUpdateRootsClosure cl;
1269     MarkingCodeBlobClosure blobsCl(&cl, CodeBlobToOopClosure::FixRelocations);
1270 
1271     _rp->process_evacuate_roots(&cl, &blobsCl, worker_id);
1272   }
1273 };
1274 
1275 class ShenandoahFixRootsTask : public AbstractGangTask {
1276   ShenandoahRootEvacuator* _rp;
1277 public:
1278 
1279   ShenandoahFixRootsTask(ShenandoahRootEvacuator* rp) :
1280     AbstractGangTask("Shenandoah update roots"),
1281     _rp(rp)
1282   {
1283     // Nothing else to do.
1284   }
1285 
1286   void work(uint worker_id) {
1287     SCMUpdateRefsClosure cl;
1288     MarkingCodeBlobClosure blobsCl(&cl, CodeBlobToOopClosure::FixRelocations);
1289 
1290     _rp->process_evacuate_roots(&cl, &blobsCl, worker_id);
1291   }
1292 };
1293 void ShenandoahHeap::evacuate_and_update_roots() {
1294 
1295   COMPILER2_PRESENT(DerivedPointerTable::clear());
1296 
1297 #ifdef ASSERT
1298   if (ShenandoahVerifyReadsToFromSpace) {
1299     set_from_region_protection(false);
1300   }
1301 #endif
1302 
1303   assert(SafepointSynchronize::is_at_safepoint(), "Only iterate roots while world is stopped");
1304   ClassLoaderDataGraph::clear_claimed_marks();
1305 
1306   {
1307     ShenandoahRootEvacuator rp(this, workers()->active_workers(), ShenandoahCollectorPolicy::evac_thread_roots);
1308     ShenandoahEvacuateUpdateRootsTask roots_task(&rp);
1309     workers()->run_task(&roots_task);
1310   }
1311 
1312   COMPILER2_PRESENT(DerivedPointerTable::update_pointers());
1313 
1314   if (cancelled_concgc()) {
1315     // If initial evacuation has been cancelled, we need to update all references
1316     // after all workers have finished. Otherwise we might run into the following problem:
1317     // GC thread 1 cannot allocate anymore, thus evacuation fails, leaves from-space ptr of object X.
1318     // GC thread 2 evacuates the same object X to to-space
1319     // which leaves a truly dangling from-space reference in the first root oop*. This must not happen.
1320     // clear() and update_pointers() must always be called in pairs,
1321     // cannot nest with above clear()/update_pointers().
1322     COMPILER2_PRESENT(DerivedPointerTable::clear());
1323     ShenandoahRootEvacuator rp(this, workers()->active_workers(), ShenandoahCollectorPolicy::evac_thread_roots);
1324     ShenandoahFixRootsTask update_roots_task(&rp);
1325     workers()->run_task(&update_roots_task);
1326     COMPILER2_PRESENT(DerivedPointerTable::update_pointers());
1327   }
1328 
1329 #ifdef ASSERT
1330   if (ShenandoahVerifyReadsToFromSpace) {
1331     set_from_region_protection(true);
1332   }
1333 #endif
1334 
1335 #ifdef ASSERT
1336   {
1337     AssertToSpaceClosure cl;
1338     CodeBlobToOopClosure code_cl(&cl, !CodeBlobToOopClosure::FixRelocations);
1339     ShenandoahRootEvacuator rp(this, 1);
1340     rp.process_evacuate_roots(&cl, &code_cl, 0);
1341   }
1342 #endif
1343 }
1344 
1345 
1346 void ShenandoahHeap::do_evacuation() {
1347 
1348   parallel_evacuate();
1349 
1350   if (ShenandoahVerify && ! cancelled_concgc()) {
1351     VM_ShenandoahVerifyHeapAfterEvacuation verify_after_evacuation;
1352     if (Thread::current()->is_VM_thread()) {
1353       verify_after_evacuation.doit();
1354     } else {
1355       VMThread::execute(&verify_after_evacuation);
1356     }
1357   }
1358 
1359 }
1360 
1361 void ShenandoahHeap::parallel_evacuate() {
1362   log_develop_trace(gc)("starting parallel_evacuate");
1363 
1364   _shenandoah_policy->record_phase_start(ShenandoahCollectorPolicy::conc_evac);
1365 
1366   if (log_is_enabled(Trace, gc, region)) {
1367     ResourceMark rm;
1368     outputStream *out = Log(gc, region)::trace_stream();
1369     out->print("Printing all available regions");
1370     print_heap_regions(out);
1371   }
1372 
1373   if (log_is_enabled(Trace, gc, cset)) {
1374     ResourceMark rm;
1375     outputStream *out = Log(gc, cset)::trace_stream();
1376     out->print("Printing collection set which contains "SIZE_FORMAT" regions:\n", _collection_set->count());
1377     _collection_set->print(out);
1378 
1379     out->print("Printing free set which contains "SIZE_FORMAT" regions:\n", _free_regions->count());
1380     _free_regions->print(out);
1381   }
1382 
1383   ParallelEvacuationTask evacuationTask = ParallelEvacuationTask(this, _collection_set);
1384 
1385 
1386   workers()->run_task(&evacuationTask);
1387 
1388   if (log_is_enabled(Trace, gc, cset)) {
1389     ResourceMark rm;
1390     outputStream *out = Log(gc, cset)::trace_stream();
1391     out->print("Printing postgc collection set which contains "SIZE_FORMAT" regions:\n",
1392                _collection_set->count());
1393 
1394     _collection_set->print(out);
1395 
1396     out->print("Printing postgc free regions which contain "SIZE_FORMAT" free regions:\n",
1397                _free_regions->count());
1398     _free_regions->print(out);
1399 
1400   }
1401 
1402   if (log_is_enabled(Trace, gc, region)) {
1403     ResourceMark rm;
1404     outputStream *out = Log(gc, region)::trace_stream();
1405     out->print_cr("all regions after evacuation:");
1406     print_heap_regions(out);
1407   }
1408 
1409   _shenandoah_policy->record_phase_end(ShenandoahCollectorPolicy::conc_evac);
1410 }
1411 
1412 class VerifyEvacuationClosure: public ExtendedOopClosure {
1413 private:
1414   ShenandoahHeap*  _heap;
1415   ShenandoahHeapRegion* _from_region;
1416 
1417 public:
1418   VerifyEvacuationClosure(ShenandoahHeapRegion* from_region) :
1419     _heap(ShenandoahHeap::heap()), _from_region(from_region) { }
1420 private:
1421   template <class T>
1422   inline void do_oop_work(T* p) {
1423     oop heap_oop = oopDesc::load_decode_heap_oop(p);
1424     if (! oopDesc::is_null(heap_oop)) {
1425       guarantee(! _from_region->is_in(heap_oop), "no references to from-region allowed after evacuation: "PTR_FORMAT, p2i((HeapWord*) heap_oop));
1426     }
1427   }
1428 
1429 public:
1430   void do_oop(oop* p)       {
1431     do_oop_work(p);
1432   }
1433 
1434   void do_oop(narrowOop* p) {
1435     do_oop_work(p);
1436   }
1437 
1438 };
1439 
1440 void ShenandoahHeap::roots_iterate(OopClosure* cl) {
1441 
1442   assert(SafepointSynchronize::is_at_safepoint(), "Only iterate roots while world is stopped");
1443 
1444   CodeBlobToOopClosure blobsCl(cl, false);
1445   CLDToOopClosure cldCl(cl);
1446 
1447   ClassLoaderDataGraph::clear_claimed_marks();
1448 
1449   ShenandoahRootProcessor rp(this, 1);
1450   rp.process_all_roots(cl, NULL, &cldCl, &blobsCl, 0);
1451 }
1452 
1453 void ShenandoahHeap::verify_evacuation(ShenandoahHeapRegion* from_region) {
1454 
1455   VerifyEvacuationClosure rootsCl(from_region);
1456   roots_iterate(&rootsCl);
1457 
1458 }
1459 
1460 bool ShenandoahHeap::supports_tlab_allocation() const {
1461   return true;
1462 }
1463 
1464 
1465 size_t  ShenandoahHeap::unsafe_max_tlab_alloc(Thread *thread) const {
1466   size_t idx = _free_regions->current_index();
1467   ShenandoahHeapRegion* current = _free_regions->get(idx);
1468   if (current == NULL) {
1469     return 0;
1470   } else if (current->free() > MinTLABSize) {
1471     // Current region has enough space left, can use it.
1472     return current->free();
1473   } else {
1474     // No more space in current region, we will take next free region
1475     // on the next TLAB allocation.
1476     return ShenandoahHeapRegion::RegionSizeBytes;
1477   }
1478 }
1479 
1480 size_t ShenandoahHeap::max_tlab_size() const {
1481   return ShenandoahHeapRegion::RegionSizeBytes;
1482 }
1483 
1484 class ResizeGCLABClosure : public ThreadClosure {
1485 public:
1486   void do_thread(Thread* thread) {
1487     thread->gclab().resize();
1488   }
1489 };
1490 
1491 void ShenandoahHeap::resize_all_tlabs() {
1492   CollectedHeap::resize_all_tlabs();
1493 
1494   ResizeGCLABClosure cl;
1495   Threads::threads_do(&cl);
1496 }
1497 
1498 class AccumulateStatisticsGCLABClosure : public ThreadClosure {
1499 public:
1500   void do_thread(Thread* thread) {
1501     thread->gclab().accumulate_statistics();
1502     thread->gclab().initialize_statistics();
1503   }
1504 };
1505 
1506 void ShenandoahHeap::accumulate_statistics_all_gclabs() {
1507   AccumulateStatisticsGCLABClosure cl;
1508   Threads::threads_do(&cl);
1509 }
1510 
1511 bool  ShenandoahHeap::can_elide_tlab_store_barriers() const {
1512   return true;
1513 }
1514 
1515 oop ShenandoahHeap::new_store_pre_barrier(JavaThread* thread, oop new_obj) {
1516   // Overridden to do nothing.
1517   return new_obj;
1518 }
1519 
1520 bool  ShenandoahHeap::can_elide_initializing_store_barrier(oop new_obj) {
1521   return true;
1522 }
1523 
1524 bool ShenandoahHeap::card_mark_must_follow_store() const {
1525   return false;
1526 }
1527 
1528 void ShenandoahHeap::collect(GCCause::Cause cause) {
1529   assert(cause != GCCause::_gc_locker, "no JNI critical callback");
1530   if (GCCause::is_user_requested_gc(cause)) {
1531     if (! DisableExplicitGC) {
1532       _concurrent_gc_thread->do_full_gc(cause);
1533     }
1534   } else if (cause == GCCause::_allocation_failure) {
1535     collector_policy()->set_should_clear_all_soft_refs(true);
1536     _concurrent_gc_thread->do_full_gc(cause);
1537   }
1538 }
1539 
1540 void ShenandoahHeap::do_full_collection(bool clear_all_soft_refs) {
1541   //assert(false, "Shouldn't need to do full collections");
1542 }
1543 
1544 AdaptiveSizePolicy* ShenandoahHeap::size_policy() {
1545   Unimplemented();
1546   return NULL;
1547 
1548 }
1549 
1550 CollectorPolicy* ShenandoahHeap::collector_policy() const {
1551   return _shenandoah_policy;
1552 }
1553 
1554 
1555 HeapWord* ShenandoahHeap::block_start(const void* addr) const {
1556   Space* sp = heap_region_containing(addr);
1557   if (sp != NULL) {
1558     return sp->block_start(addr);
1559   }
1560   return NULL;
1561 }
1562 
1563 size_t ShenandoahHeap::block_size(const HeapWord* addr) const {
1564   Space* sp = heap_region_containing(addr);
1565   assert(sp != NULL, "block_size of address outside of heap");
1566   return sp->block_size(addr);
1567 }
1568 
1569 bool ShenandoahHeap::block_is_obj(const HeapWord* addr) const {
1570   Space* sp = heap_region_containing(addr);
1571   return sp->block_is_obj(addr);
1572 }
1573 
1574 jlong ShenandoahHeap::millis_since_last_gc() {
1575   return 0;
1576 }
1577 
1578 void ShenandoahHeap::prepare_for_verify() {
1579   if (SafepointSynchronize::is_at_safepoint() || ! UseTLAB) {
1580     ensure_parsability(false);
1581   }
1582 }
1583 
1584 void ShenandoahHeap::print_gc_threads_on(outputStream* st) const {
1585   workers()->print_worker_threads_on(st);
1586 }
1587 
1588 void ShenandoahHeap::gc_threads_do(ThreadClosure* tcl) const {
1589   workers()->threads_do(tcl);
1590 }
1591 
1592 void ShenandoahHeap::print_tracing_info() const {
1593   if (log_is_enabled(Info, gc, stats)) {
1594     ResourceMark rm;
1595     outputStream* out = Log(gc, stats)::info_stream();
1596     _shenandoah_policy->print_tracing_info(out);
1597   }
1598 }
1599 
1600 class ShenandoahVerifyRootsClosure: public ExtendedOopClosure {
1601 private:
1602   ShenandoahHeap*  _heap;
1603   VerifyOption     _vo;
1604   bool             _failures;
1605 public:
1606   // _vo == UsePrevMarking -> use "prev" marking information,
1607   // _vo == UseNextMarking -> use "next" marking information,
1608   // _vo == UseMarkWord    -> use mark word from object header.
1609   ShenandoahVerifyRootsClosure(VerifyOption vo) :
1610     _heap(ShenandoahHeap::heap()),
1611     _vo(vo),
1612     _failures(false) { }
1613 
1614   bool failures() { return _failures; }
1615 
1616 private:
1617   template <class T>
1618   inline void do_oop_work(T* p) {
1619     oop obj = oopDesc::load_decode_heap_oop(p);
1620     if (! oopDesc::is_null(obj) && ! obj->is_oop()) {
1621       { // Just for debugging.
1622         tty->print_cr("Root location "PTR_FORMAT
1623                       "verified "PTR_FORMAT, p2i(p), p2i((void*) obj));
1624         //      obj->print_on(tty);
1625       }
1626     }
1627     guarantee(obj->is_oop_or_null(), "is oop or null");
1628   }
1629 
1630 public:
1631   void do_oop(oop* p)       {
1632     do_oop_work(p);
1633   }
1634 
1635   void do_oop(narrowOop* p) {
1636     do_oop_work(p);
1637   }
1638 
1639 };
1640 
1641 class ShenandoahVerifyHeapClosure: public ObjectClosure {
1642 private:
1643   ShenandoahVerifyRootsClosure _rootsCl;
1644 public:
1645   ShenandoahVerifyHeapClosure(ShenandoahVerifyRootsClosure rc) :
1646     _rootsCl(rc) {};
1647 
1648   void do_object(oop p) {
1649     _rootsCl.do_oop(&p);
1650   }
1651 };
1652 
1653 class ShenandoahVerifyKlassClosure: public KlassClosure {
1654   OopClosure *_oop_closure;
1655  public:
1656   ShenandoahVerifyKlassClosure(OopClosure* cl) : _oop_closure(cl) {}
1657   void do_klass(Klass* k) {
1658     k->oops_do(_oop_closure);
1659   }
1660 };
1661 
1662 void ShenandoahHeap::verify(VerifyOption vo) {
1663   if (SafepointSynchronize::is_at_safepoint() || ! UseTLAB) {
1664 
1665     ShenandoahVerifyRootsClosure rootsCl(vo);
1666 
1667     assert(Thread::current()->is_VM_thread(),
1668            "Expected to be executed serially by the VM thread at this point");
1669 
1670     roots_iterate(&rootsCl);
1671 
1672     bool failures = rootsCl.failures();
1673     log_trace(gc)("verify failures: %s", BOOL_TO_STR(failures));
1674 
1675     ShenandoahVerifyHeapClosure heapCl(rootsCl);
1676 
1677     object_iterate(&heapCl);
1678     // TODO: Implement rest of it.
1679   } else {
1680     tty->print("(SKIPPING roots, heapRegions, remset) ");
1681   }
1682 }
1683 size_t ShenandoahHeap::tlab_capacity(Thread *thr) const {
1684   return _free_regions->capacity();
1685 }
1686 
1687 class ShenandoahIterateObjectClosureRegionClosure: public ShenandoahHeapRegionClosure {
1688   ObjectClosure* _cl;
1689 public:
1690   ShenandoahIterateObjectClosureRegionClosure(ObjectClosure* cl) : _cl(cl) {}
1691   bool doHeapRegion(ShenandoahHeapRegion* r) {
1692     ShenandoahHeap::heap()->marked_object_iterate(r, _cl);
1693     return false;
1694   }
1695 };
1696 
1697 void ShenandoahHeap::object_iterate(ObjectClosure* cl) {
1698   ShenandoahIterateObjectClosureRegionClosure blk(cl);
1699   heap_region_iterate(&blk, false, true);
1700 }
1701 
1702 class ShenandoahSafeObjectIterateAdjustPtrsClosure : public MetadataAwareOopClosure {
1703 private:
1704   ShenandoahHeap* _heap;
1705 
1706 public:
1707   ShenandoahSafeObjectIterateAdjustPtrsClosure() : _heap(ShenandoahHeap::heap()) {}
1708 
1709 private:
1710   template <class T>
1711   inline void do_oop_work(T* p) {
1712     T o = oopDesc::load_heap_oop(p);
1713     if (!oopDesc::is_null(o)) {
1714       oop obj = oopDesc::decode_heap_oop_not_null(o);
1715       oopDesc::encode_store_heap_oop(p, BrooksPointer::forwardee(obj));
1716     }
1717   }
1718 public:
1719   void do_oop(oop* p) {
1720     do_oop_work(p);
1721   }
1722   void do_oop(narrowOop* p) {
1723     do_oop_work(p);
1724   }
1725 };
1726 
1727 class ShenandoahSafeObjectIterateAndUpdate : public ObjectClosure {
1728 private:
1729   ObjectClosure* _cl;
1730 public:
1731   ShenandoahSafeObjectIterateAndUpdate(ObjectClosure *cl) : _cl(cl) {}
1732 
1733   virtual void do_object(oop obj) {
1734     assert (oopDesc::unsafe_equals(obj, BrooksPointer::forwardee(obj)),
1735             "avoid double-counting: only non-forwarded objects here");
1736 
1737     // Fix up the ptrs.
1738     ShenandoahSafeObjectIterateAdjustPtrsClosure adjust_ptrs;
1739     obj->oop_iterate(&adjust_ptrs);
1740 
1741     // Can reply the object now:
1742     _cl->do_object(obj);
1743   }
1744 };
1745 
1746 void ShenandoahHeap::safe_object_iterate(ObjectClosure* cl) {
1747   assert(SafepointSynchronize::is_at_safepoint(), "safe iteration is only available during safepoints");
1748 
1749   // Safe iteration does objects only with correct references.
1750   // This is why we skip dirty regions that have stale copies of objects,
1751   // and fix up the pointers in the returned objects.
1752 
1753   ShenandoahSafeObjectIterateAndUpdate safe_cl(cl);
1754   ShenandoahIterateObjectClosureRegionClosure blk(&safe_cl);
1755   heap_region_iterate(&blk,
1756                       /* skip_dirty_regions = */ true,
1757                       /* skip_humongous_continuations = */ true);
1758 
1759   _need_update_refs = false; // already updated the references
1760 }
1761 
1762 // Apply blk->doHeapRegion() on all committed regions in address order,
1763 // terminating the iteration early if doHeapRegion() returns true.
1764 void ShenandoahHeap::heap_region_iterate(ShenandoahHeapRegionClosure* blk, bool skip_dirty_regions, bool skip_humongous_continuation) const {
1765   for (size_t i = 0; i < _num_regions; i++) {
1766     ShenandoahHeapRegion* current  = _ordered_regions->get(i);
1767     if (skip_humongous_continuation && current->is_humongous_continuation()) {
1768       continue;
1769     }
1770     if (skip_dirty_regions && in_collection_set(current)) {
1771       continue;
1772     }
1773     if (blk->doHeapRegion(current)) {
1774       return;
1775     }
1776   }
1777 }
1778 
1779 class ClearLivenessClosure : public ShenandoahHeapRegionClosure {
1780   ShenandoahHeap* sh;
1781 public:
1782   ClearLivenessClosure(ShenandoahHeap* heap) : sh(heap) { }
1783 
1784   bool doHeapRegion(ShenandoahHeapRegion* r) {
1785     r->clear_live_data();
1786     sh->set_next_top_at_mark_start(r->bottom(), r->top());
1787     return false;
1788   }
1789 };
1790 
1791 
1792 void ShenandoahHeap::start_concurrent_marking() {
1793 
1794   shenandoahPolicy()->record_phase_start(ShenandoahCollectorPolicy::accumulate_stats);
1795   accumulate_statistics_all_tlabs();
1796   shenandoahPolicy()->record_phase_end(ShenandoahCollectorPolicy::accumulate_stats);
1797 
1798   set_concurrent_mark_in_progress(true);
1799   // We need to reset all TLABs because we'd lose marks on all objects allocated in them.
1800   if (UseTLAB) {
1801     shenandoahPolicy()->record_phase_start(ShenandoahCollectorPolicy::make_parsable);
1802     ensure_parsability(true);
1803     shenandoahPolicy()->record_phase_end(ShenandoahCollectorPolicy::make_parsable);
1804   }
1805 
1806   _shenandoah_policy->record_bytes_allocated(_bytes_allocated_since_cm);
1807   _used_start_gc = used();
1808 
1809 #ifdef ASSERT
1810   if (ShenandoahDumpHeapBeforeConcurrentMark) {
1811     ensure_parsability(false);
1812     print_all_refs("pre-mark");
1813   }
1814 #endif
1815 
1816   shenandoahPolicy()->record_phase_start(ShenandoahCollectorPolicy::clear_liveness);
1817   ClearLivenessClosure clc(this);
1818   heap_region_iterate(&clc);
1819   shenandoahPolicy()->record_phase_end(ShenandoahCollectorPolicy::clear_liveness);
1820 
1821   // print_all_refs("pre -mark");
1822 
1823   // oopDesc::_debug = true;
1824 
1825   // Make above changes visible to worker threads
1826   OrderAccess::fence();
1827 
1828   shenandoahPolicy()->record_phase_start(ShenandoahCollectorPolicy::scan_roots);
1829   concurrentMark()->init_mark_roots();
1830   shenandoahPolicy()->record_phase_end(ShenandoahCollectorPolicy::scan_roots);
1831 
1832   //  print_all_refs("pre-mark2");
1833 }
1834 
1835 class VerifyAfterEvacuationClosure : public ExtendedOopClosure {
1836 
1837   ShenandoahHeap* _sh;
1838 
1839 public:
1840   VerifyAfterEvacuationClosure() : _sh ( ShenandoahHeap::heap() ) {}
1841 
1842   template<class T> void do_oop_nv(T* p) {
1843     T heap_oop = oopDesc::load_heap_oop(p);
1844     if (!oopDesc::is_null(heap_oop)) {
1845       oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
1846       guarantee(_sh->in_collection_set(obj) == (! oopDesc::unsafe_equals(obj, oopDesc::bs()->read_barrier(obj))),
1847                 "forwarded objects can only exist in dirty (from-space) regions is_dirty: %s, is_forwarded: %s obj-klass: %s, marked: %s",
1848                 BOOL_TO_STR(_sh->in_collection_set(obj)),
1849                 BOOL_TO_STR(! oopDesc::unsafe_equals(obj, oopDesc::bs()->read_barrier(obj))),
1850                 obj->klass()->external_name(),
1851                 BOOL_TO_STR(_sh->is_marked_complete(obj))
1852                 );
1853       obj = oopDesc::bs()->read_barrier(obj);
1854       guarantee(! _sh->in_collection_set(obj), "forwarded oops must not point to dirty regions");
1855       guarantee(obj->is_oop(), "is_oop");
1856       guarantee(Metaspace::contains(obj->klass()), "klass pointer must go to metaspace");
1857     }
1858   }
1859 
1860   void do_oop(oop* p)       { do_oop_nv(p); }
1861   void do_oop(narrowOop* p) { do_oop_nv(p); }
1862 
1863 };
1864 
1865 void ShenandoahHeap::verify_heap_after_evacuation() {
1866 
1867   verify_heap_size_consistency();
1868 
1869   ensure_parsability(false);
1870 
1871   VerifyAfterEvacuationClosure cl;
1872   roots_iterate(&cl);
1873 
1874   ObjectToOopClosure objs(&cl);
1875   object_iterate(&objs);
1876 
1877 }
1878 
1879 class VerifyRegionsAfterUpdateRefsClosure : public ShenandoahHeapRegionClosure {
1880 public:
1881   bool doHeapRegion(ShenandoahHeapRegion* r) {
1882     assert(! ShenandoahHeap::heap()->in_collection_set(r), "no region must be in collection set");
1883     return false;
1884   }
1885 };
1886 
1887 void ShenandoahHeap::swap_mark_bitmaps() {
1888   // Swap bitmaps.
1889   CMBitMap* tmp1 = _complete_mark_bit_map;
1890   _complete_mark_bit_map = _next_mark_bit_map;
1891   _next_mark_bit_map = tmp1;
1892 
1893   // Swap top-at-mark-start pointers
1894   HeapWord** tmp2 = _complete_top_at_mark_starts;
1895   _complete_top_at_mark_starts = _next_top_at_mark_starts;
1896   _next_top_at_mark_starts = tmp2;
1897 
1898   HeapWord** tmp3 = _complete_top_at_mark_starts_base;
1899   _complete_top_at_mark_starts_base = _next_top_at_mark_starts_base;
1900   _next_top_at_mark_starts_base = tmp3;
1901 }
1902 
1903 void ShenandoahHeap::stop_concurrent_marking() {
1904   assert(concurrent_mark_in_progress(), "How else could we get here?");
1905   if (! cancelled_concgc()) {
1906     // If we needed to update refs, and concurrent marking has been cancelled,
1907     // we need to finish updating references.
1908     set_need_update_refs(false);
1909     swap_mark_bitmaps();
1910   }
1911   set_concurrent_mark_in_progress(false);
1912 
1913   if (log_is_enabled(Trace, gc, region)) {
1914     ResourceMark rm;
1915     outputStream* out = Log(gc, region)::trace_stream();
1916     print_heap_regions(out);
1917   }
1918 
1919 }
1920 
1921 void ShenandoahHeap::set_concurrent_mark_in_progress(bool in_progress) {
1922   _concurrent_mark_in_progress = in_progress ? 1 : 0;
1923   JavaThread::satb_mark_queue_set().set_active_all_threads(in_progress, !in_progress);
1924 }
1925 
1926 void ShenandoahHeap::set_evacuation_in_progress_concurrently(bool in_progress) {
1927   // Note: it is important to first release the _evacuation_in_progress flag here,
1928   // so that Java threads can get out of oom_during_evacuation() and reach a safepoint,
1929   // in case a VM task is pending.
1930   set_evacuation_in_progress(in_progress);
1931   MutexLocker mu(Threads_lock);
1932   JavaThread::set_evacuation_in_progress_all_threads(in_progress);
1933 }
1934 
1935 void ShenandoahHeap::set_evacuation_in_progress_at_safepoint(bool in_progress) {
1936   assert(SafepointSynchronize::is_at_safepoint(), "Only call this at safepoint");
1937   set_evacuation_in_progress(in_progress);
1938   JavaThread::set_evacuation_in_progress_all_threads(in_progress);
1939 }
1940 
1941 void ShenandoahHeap::set_evacuation_in_progress(bool in_progress) {
1942   _evacuation_in_progress = in_progress ? 1 : 0;
1943   OrderAccess::fence();
1944 }
1945 
1946 void ShenandoahHeap::verify_copy(oop p,oop c){
1947     assert(! oopDesc::unsafe_equals(p, oopDesc::bs()->read_barrier(p)), "forwarded correctly");
1948     assert(oopDesc::unsafe_equals(oopDesc::bs()->read_barrier(p), c), "verify pointer is correct");
1949     if (p->klass() != c->klass()) {
1950       print_heap_regions();
1951     }
1952     assert(p->klass() == c->klass(), "verify class p-size: "INT32_FORMAT" c-size: "INT32_FORMAT, p->size(), c->size());
1953     assert(p->size() == c->size(), "verify size");
1954     // Object may have been locked between copy and verification
1955     //    assert(p->mark() == c->mark(), "verify mark");
1956     assert(oopDesc::unsafe_equals(c, oopDesc::bs()->read_barrier(c)), "verify only forwarded once");
1957   }
1958 
1959 void ShenandoahHeap::oom_during_evacuation() {
1960   log_develop_trace(gc)("Out of memory during evacuation, cancel evacuation, schedule full GC by thread %d",
1961                         Thread::current()->osthread()->thread_id());
1962 
1963   // We ran out of memory during evacuation. Cancel evacuation, and schedule a full-GC.
1964   collector_policy()->set_should_clear_all_soft_refs(true);
1965   concurrent_thread()->try_set_full_gc();
1966   cancel_concgc(_oom_evacuation);
1967 
1968   if ((! Thread::current()->is_GC_task_thread()) && (! Thread::current()->is_ConcurrentGC_thread())) {
1969     assert(! Threads_lock->owned_by_self()
1970            || SafepointSynchronize::is_at_safepoint(), "must not hold Threads_lock here");
1971     log_warning(gc)("OOM during evacuation. Let Java thread wait until evacuation finishes.");
1972     while (_evacuation_in_progress) { // wait.
1973       Thread::current()->_ParkEvent->park(1);
1974     }
1975   }
1976 
1977 }
1978 
1979 HeapWord* ShenandoahHeap::tlab_post_allocation_setup(HeapWord* obj) {
1980   // Initialize Brooks pointer for the next object
1981   HeapWord* result = obj + BrooksPointer::word_size();
1982   BrooksPointer::initialize(oop(result));
1983   return result;
1984 }
1985 
1986 uint ShenandoahHeap::oop_extra_words() {
1987   return BrooksPointer::word_size();
1988 }
1989 
1990 void ShenandoahHeap::grow_heap_by(size_t num_regions) {
1991   size_t base = _num_regions;
1992   ensure_new_regions(num_regions);
1993   for (size_t i = 0; i < num_regions; i++) {
1994     size_t new_region_index = i + base;
1995     HeapWord* start = _first_region_bottom + (ShenandoahHeapRegion::RegionSizeBytes / HeapWordSize) * new_region_index;
1996     ShenandoahHeapRegion* new_region = new ShenandoahHeapRegion(this, start, ShenandoahHeapRegion::RegionSizeBytes / HeapWordSize, new_region_index);
1997 
1998     if (log_is_enabled(Trace, gc, region)) {
1999       ResourceMark rm;
2000       outputStream* out = Log(gc, region)::trace_stream();
2001       out->print_cr("allocating new region at index: "SIZE_FORMAT, new_region_index);
2002       new_region->print_on(out);
2003     }
2004 
2005     assert(_ordered_regions->active_regions() == new_region->region_number(), "must match");
2006     _ordered_regions->add_region(new_region);
2007     _in_cset_fast_test_base[new_region_index] = false; // Not in cset
2008     _next_top_at_mark_starts_base[new_region_index] = new_region->bottom();
2009     _complete_top_at_mark_starts_base[new_region_index] = new_region->bottom();
2010 
2011     _free_regions->add_region(new_region);
2012   }
2013 }
2014 
2015 void ShenandoahHeap::ensure_new_regions(size_t new_regions) {
2016 
2017   size_t num_regions = _num_regions;
2018   size_t new_num_regions = num_regions + new_regions;
2019   assert(new_num_regions <= _max_regions, "we checked this earlier");
2020 
2021   size_t expand_size = new_regions * ShenandoahHeapRegion::RegionSizeBytes;
2022   log_trace(gc, region)("expanding storage by "SIZE_FORMAT_HEX" bytes, for "SIZE_FORMAT" new regions", expand_size, new_regions);
2023   bool success = _storage.expand_by(expand_size, ShenandoahAlwaysPreTouch);
2024   assert(success, "should always be able to expand by requested size");
2025 
2026   _num_regions = new_num_regions;
2027 
2028 }
2029 
2030 ShenandoahForwardedIsAliveClosure::ShenandoahForwardedIsAliveClosure() :
2031   _heap(ShenandoahHeap::heap_no_check()) {
2032 }
2033 
2034 void ShenandoahForwardedIsAliveClosure::init(ShenandoahHeap* heap) {
2035   _heap = heap;
2036 }
2037 
2038 bool ShenandoahForwardedIsAliveClosure::do_object_b(oop obj) {
2039 
2040   assert(_heap != NULL, "sanity");
2041   obj = ShenandoahBarrierSet::resolve_oop_static_not_null(obj);
2042 #ifdef ASSERT
2043   if (_heap->concurrent_mark_in_progress()) {
2044     assert(oopDesc::unsafe_equals(obj, ShenandoahBarrierSet::resolve_oop_static_not_null(obj)), "only query to-space");
2045   }
2046 #endif
2047   assert(!oopDesc::is_null(obj), "null");
2048   return _heap->is_marked_next(obj);
2049 }
2050 
2051 void ShenandoahHeap::ref_processing_init() {
2052   MemRegion mr = reserved_region();
2053 
2054   isAlive.init(ShenandoahHeap::heap());
2055   assert(_max_workers > 0, "Sanity");
2056 
2057   _ref_processor =
2058     new ReferenceProcessor(mr,    // span
2059                            ParallelRefProcEnabled,
2060                            // mt processing
2061                            _max_workers,
2062                            // degree of mt processing
2063                            true,
2064                            // mt discovery
2065                            _max_workers,
2066                            // degree of mt discovery
2067                            false,
2068                            // Reference discovery is not atomic
2069                            &isAlive);
2070 }
2071 
2072 #ifdef ASSERT
2073 void ShenandoahHeap::set_from_region_protection(bool protect) {
2074   for (uint i = 0; i < _num_regions; i++) {
2075     ShenandoahHeapRegion* region = _ordered_regions->get(i);
2076     if (region != NULL && in_collection_set(region)) {
2077       if (protect) {
2078         region->memProtectionOn();
2079       } else {
2080         region->memProtectionOff();
2081       }
2082     }
2083   }
2084 }
2085 #endif
2086 
2087 size_t ShenandoahHeap::num_regions() {
2088   return _num_regions;
2089 }
2090 
2091 size_t ShenandoahHeap::max_regions() {
2092   return _max_regions;
2093 }
2094 
2095 GCTracer* ShenandoahHeap::tracer() {
2096   return shenandoahPolicy()->tracer();
2097 }
2098 
2099 size_t ShenandoahHeap::tlab_used(Thread* thread) const {
2100   return _free_regions->used();
2101 }
2102 
2103 void ShenandoahHeap::cancel_concgc(GCCause::Cause cause) {
2104   if (try_cancel_concgc()) {
2105     log_info(gc)("Cancelling concurrent GC: %s", GCCause::to_string(cause));
2106     _shenandoah_policy->report_concgc_cancelled();
2107   }
2108 }
2109 
2110 void ShenandoahHeap::cancel_concgc(ShenandoahCancelCause cause) {
2111   if (try_cancel_concgc()) {
2112     log_info(gc)("Cancelling concurrent GC: %s", cancel_cause_to_string(cause));
2113     _shenandoah_policy->report_concgc_cancelled();
2114   }
2115 }
2116 
2117 const char* ShenandoahHeap::cancel_cause_to_string(ShenandoahCancelCause cause) {
2118   switch (cause) {
2119     case _oom_evacuation:
2120       return "Out of memory for evacuation";
2121     case _vm_stop:
2122       return "Stopping VM";
2123     default:
2124       return "Unknown";
2125   }
2126 }
2127 
2128 void ShenandoahHeap::clear_cancelled_concgc() {
2129   set_cancelled_concgc(false);
2130 }
2131 
2132 uint ShenandoahHeap::max_workers() {
2133   return _max_workers;
2134 }
2135 
2136 void ShenandoahHeap::stop() {
2137   // The shutdown sequence should be able to terminate when GC is running.
2138 
2139   // Step 1. Notify control thread that we are in shutdown.
2140   // Note that we cannot do that with stop(), because stop() is blocking and waits for the actual shutdown.
2141   // Doing stop() here would wait for the normal GC cycle to complete, never falling through to cancel below.
2142   _concurrent_gc_thread->prepare_for_graceful_shutdown();
2143 
2144   // Step 2. Notify GC workers that we are cancelling GC.
2145   cancel_concgc(_vm_stop);
2146 
2147   // Step 3. Wait until GC worker exits normally.
2148   _concurrent_gc_thread->stop();
2149 }
2150 
2151 void ShenandoahHeap::unlink_string_and_symbol_table(BoolObjectClosure* is_alive, bool process_strings, bool process_symbols) {
2152 
2153   StringSymbolTableUnlinkTask shenandoah_unlink_task(is_alive, process_strings, process_symbols);
2154   workers()->run_task(&shenandoah_unlink_task);
2155 
2156   //  if (G1StringDedup::is_enabled()) {
2157   //    G1StringDedup::unlink(is_alive);
2158   //  }
2159 }
2160 
2161 void ShenandoahHeap::set_need_update_refs(bool need_update_refs) {
2162   _need_update_refs = need_update_refs;
2163 }
2164 
2165 //fixme this should be in heapregionset
2166 ShenandoahHeapRegion* ShenandoahHeap::next_compaction_region(const ShenandoahHeapRegion* r) {
2167   size_t region_idx = r->region_number() + 1;
2168   ShenandoahHeapRegion* next = _ordered_regions->get(region_idx);
2169   guarantee(next->region_number() == region_idx, "region number must match");
2170   while (next->is_humongous()) {
2171     region_idx = next->region_number() + 1;
2172     next = _ordered_regions->get(region_idx);
2173     guarantee(next->region_number() == region_idx, "region number must match");
2174   }
2175   return next;
2176 }
2177 
2178 void ShenandoahHeap::set_region_in_collection_set(size_t region_index, bool b) {
2179   _in_cset_fast_test_base[region_index] = b;
2180 }
2181 
2182 ShenandoahMonitoringSupport* ShenandoahHeap::monitoring_support() {
2183   return _monitoring_support;
2184 }
2185 
2186 CMBitMap* ShenandoahHeap::complete_mark_bit_map() {
2187   return _complete_mark_bit_map;
2188 }
2189 
2190 CMBitMap* ShenandoahHeap::next_mark_bit_map() {
2191   return _next_mark_bit_map;
2192 }
2193 
2194 void ShenandoahHeap::add_free_region(ShenandoahHeapRegion* r) {
2195   _free_regions->add_region(r);
2196 }
2197 
2198 void ShenandoahHeap::clear_free_regions() {
2199   _free_regions->clear();
2200 }
2201 
2202 address ShenandoahHeap::in_cset_fast_test_addr() {
2203   return (address) (ShenandoahHeap::heap()->_in_cset_fast_test);
2204 }
2205 
2206 address ShenandoahHeap::cancelled_concgc_addr() {
2207   return (address) &(ShenandoahHeap::heap()->_cancelled_concgc);
2208 }
2209 
2210 void ShenandoahHeap::clear_cset_fast_test() {
2211   assert(_in_cset_fast_test_base != NULL, "sanity");
2212   memset(_in_cset_fast_test_base, false,
2213          _in_cset_fast_test_length * sizeof(bool));
2214 }
2215 
2216 size_t ShenandoahHeap::conservative_max_heap_alignment() {
2217   return ShenandoahMaxRegionSize;
2218 }
2219 
2220 size_t ShenandoahHeap::bytes_allocated_since_cm() {
2221   return _bytes_allocated_since_cm;
2222 }
2223 
2224 void ShenandoahHeap::set_bytes_allocated_since_cm(size_t bytes) {
2225   _bytes_allocated_since_cm = bytes;
2226 }
2227 
2228 size_t ShenandoahHeap::max_allocated_gc() {
2229   return _max_allocated_gc;
2230 }
2231 
2232 void ShenandoahHeap::set_next_top_at_mark_start(HeapWord* region_base, HeapWord* addr) {
2233   uintx index = ((uintx) region_base) >> ShenandoahHeapRegion::RegionSizeShift;
2234   _next_top_at_mark_starts[index] = addr;
2235 }
2236 
2237 HeapWord* ShenandoahHeap::next_top_at_mark_start(HeapWord* region_base) {
2238   uintx index = ((uintx) region_base) >> ShenandoahHeapRegion::RegionSizeShift;
2239   return _next_top_at_mark_starts[index];
2240 }
2241 
2242 void ShenandoahHeap::set_complete_top_at_mark_start(HeapWord* region_base, HeapWord* addr) {
2243   uintx index = ((uintx) region_base) >> ShenandoahHeapRegion::RegionSizeShift;
2244   _complete_top_at_mark_starts[index] = addr;
2245 }
2246 
2247 HeapWord* ShenandoahHeap::complete_top_at_mark_start(HeapWord* region_base) {
2248   uintx index = ((uintx) region_base) >> ShenandoahHeapRegion::RegionSizeShift;
2249   return _complete_top_at_mark_starts[index];
2250 }
2251 
2252 void ShenandoahHeap::set_full_gc_in_progress(bool in_progress) {
2253   _full_gc_in_progress = in_progress;
2254 }
2255 
2256 bool ShenandoahHeap::is_full_gc_in_progress() const {
2257   return _full_gc_in_progress;
2258 }
2259 
2260 class NMethodOopInitializer : public OopClosure {
2261 private:
2262   ShenandoahHeap* _heap;
2263 public:
2264   NMethodOopInitializer() : _heap(ShenandoahHeap::heap()) {
2265   }
2266 
2267 private:
2268   template <class T>
2269   inline void do_oop_work(T* p) {
2270     T o = oopDesc::load_heap_oop(p);
2271     if (! oopDesc::is_null(o)) {
2272       oop obj1 = oopDesc::decode_heap_oop_not_null(o);
2273       oop obj2 = oopDesc::bs()->write_barrier(obj1);
2274       if (! oopDesc::unsafe_equals(obj1, obj2)) {
2275         oopDesc::encode_store_heap_oop(p, obj2);
2276       }
2277     }
2278   }
2279 
2280 public:
2281   void do_oop(oop* o) {
2282     do_oop_work(o);
2283   }
2284   void do_oop(narrowOop* o) {
2285     do_oop_work(o);
2286   }
2287 };
2288 
2289 void ShenandoahHeap::register_nmethod(nmethod* nm) {
2290   NMethodOopInitializer init;
2291   nm->oops_do(&init);
2292   nm->fix_oop_relocations();
2293 }
2294 
2295 void ShenandoahHeap::unregister_nmethod(nmethod* nm) {
2296 }
2297 
2298 void ShenandoahHeap::pin_object(oop o) {
2299   heap_region_containing(o)->pin();
2300 }
2301 
2302 void ShenandoahHeap::unpin_object(oop o) {
2303   heap_region_containing(o)->unpin();
2304 }
2305 
2306 
2307 GCTimer* ShenandoahHeap::gc_timer() const {
2308   return _gc_timer;
2309 }
2310 
2311 class RecordAllRefsOopClosure: public ExtendedOopClosure {
2312 private:
2313   int _x;
2314   int *_matrix;
2315   int _num_regions;
2316   oop _p;
2317 
2318 public:
2319   RecordAllRefsOopClosure(int *matrix, int x, size_t num_regions, oop p) :
2320     _matrix(matrix), _x(x), _num_regions(num_regions), _p(p) {}
2321 
2322   template <class T>
2323   void do_oop_work(T* p) {
2324     oop o = oopDesc::load_decode_heap_oop(p);
2325     if (o != NULL) {
2326       if (ShenandoahHeap::heap()->is_in(o) && o->is_oop() ) {
2327         int y = ShenandoahHeap::heap()->heap_region_containing(o)->region_number();
2328         _matrix[_x * _num_regions + y]++;
2329       }
2330     }
2331   }
2332   void do_oop(oop* p) {
2333     do_oop_work(p);
2334   }
2335 
2336   void do_oop(narrowOop* p) {
2337     do_oop_work(p);
2338   }
2339 
2340 };
2341 
2342 class RecordAllRefsObjectClosure : public ObjectClosure {
2343   int *_matrix;
2344   size_t _num_regions;
2345 
2346 public:
2347   RecordAllRefsObjectClosure(int *matrix, size_t num_regions) :
2348     _matrix(matrix), _num_regions(num_regions) {}
2349 
2350   void do_object(oop p) {
2351     if (ShenandoahHeap::heap()->is_in(p) && ShenandoahHeap::heap()->is_marked_next(p)  && p->is_oop()) {
2352       int x = ShenandoahHeap::heap()->heap_region_containing(p)->region_number();
2353       RecordAllRefsOopClosure cl(_matrix, x, _num_regions, p);
2354       p->oop_iterate(&cl);
2355     }
2356   }
2357 };
2358 void ShenandoahHeap::calculate_matrix(int* connections) {
2359   log_develop_trace(gc)("calculating matrix");
2360   ensure_parsability(false);
2361   int num = num_regions();
2362 
2363   for (int i = 0; i < num; i++) {
2364     for (int j = 0; j < num; j++) {
2365       connections[i * num + j] = 0;
2366     }
2367   }
2368 
2369   RecordAllRefsOopClosure cl(connections, 0, num, NULL);
2370   roots_iterate(&cl);
2371 
2372   RecordAllRefsObjectClosure cl2(connections, num);
2373   object_iterate(&cl2);
2374 
2375 }
2376 
2377 void ShenandoahHeap::print_matrix(int* connections) {
2378   int num = num_regions();
2379   int cs_regions = 0;
2380   int referenced = 0;
2381 
2382   for (int i = 0; i < num; i++) {
2383     size_t liveData = ShenandoahHeap::heap()->regions()->get(i)->get_live_data_bytes();
2384 
2385     int numReferencedRegions = 0;
2386     int numReferencedByRegions = 0;
2387 
2388     for (int j = 0; j < num; j++) {
2389       if (connections[i * num + j] > 0)
2390         numReferencedRegions++;
2391 
2392       if (connections [j * num + i] > 0)
2393         numReferencedByRegions++;
2394 
2395       cs_regions++;
2396       referenced += numReferencedByRegions;
2397     }
2398 
2399     if (ShenandoahHeap::heap()->regions()->get(i)->has_live()) {
2400       tty->print("Region %d is referenced by %d regions {",
2401                  i, numReferencedByRegions);
2402       int col_count = 0;
2403       for (int j = 0; j < num; j++) {
2404         int foo = connections[j * num + i];
2405         if (foo > 0) {
2406           col_count++;
2407           if ((col_count % 10) == 0)
2408             tty->print("\n");
2409           tty->print("%d(%d), ", j,foo);
2410         }
2411       }
2412       tty->print("} \n");
2413     }
2414   }
2415 
2416   double avg = (double)referenced / (double) cs_regions;
2417   tty->print("Average Number of regions scanned / region = %lf\n", avg);
2418 }
2419 
2420 class ShenandoahCountGarbageClosure : public ShenandoahHeapRegionClosure {
2421 private:
2422   size_t _garbage;
2423 public:
2424   ShenandoahCountGarbageClosure() : _garbage(0) {
2425   }
2426 
2427   bool doHeapRegion(ShenandoahHeapRegion* r) {
2428     if (! r->is_humongous() && ! r->is_pinned() && ! r->in_collection_set()) {
2429       _garbage += r->garbage();
2430     }
2431     return false;
2432   }
2433 
2434   size_t garbage() {
2435     return _garbage;
2436   }
2437 };
2438 
2439 size_t ShenandoahHeap::garbage() {
2440   ShenandoahCountGarbageClosure cl;
2441   heap_region_iterate(&cl);
2442   return cl.garbage();
2443 }
2444 
2445 #ifdef ASSERT
2446 void ShenandoahHeap::assert_heaplock_owned_by_current_thread() {
2447   assert(_heap_lock == locked, "must be locked");
2448   assert(_heap_lock_owner == Thread::current(), "must be owned by current thread");
2449 }
2450 
2451 void ShenandoahHeap::assert_heaplock_or_safepoint() {
2452   Thread* thr = Thread::current();
2453   assert((_heap_lock == locked && _heap_lock_owner == thr) ||
2454          (SafepointSynchronize::is_at_safepoint() && thr->is_VM_thread()),
2455   "must own heap lock or by VM thread at safepoint");
2456 }
2457 
2458 #endif