1 /*
   2  * Copyright (c) 2013, 2015, Red Hat, Inc. and/or its affiliates.
   3  *
   4  * This code is free software; you can redistribute it and/or modify it
   5  * under the terms of the GNU General Public License version 2 only, as
   6  * published by the Free Software Foundation.
   7  *
   8  * This code is distributed in the hope that it will be useful, but WITHOUT
   9  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  10  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  11  * version 2 for more details (a copy is included in the LICENSE file that
  12  * accompanied this code).
  13  *
  14  * You should have received a copy of the GNU General Public License version
  15  * 2 along with this work; if not, write to the Free Software Foundation,
  16  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  17  *
  18  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  19  * or visit www.oracle.com if you need additional information or have any
  20  * questions.
  21  *
  22  */
  23 
  24 #include "memory/allocation.hpp"
  25 #include "gc/g1/heapRegionBounds.inline.hpp"
  26 
  27 #include "gc/shared/gcTimer.hpp"
  28 #include "gc/shared/gcTraceTime.inline.hpp"
  29 #include "gc/shared/parallelCleaning.hpp"
  30 
  31 #include "gc/shenandoah/brooksPointer.hpp"
  32 #include "gc/shenandoah/shenandoahBarrierSet.hpp"
  33 #include "gc/shenandoah/shenandoahCollectionSet.hpp"
  34 #include "gc/shenandoah/shenandoahCollectorPolicy.hpp"
  35 #include "gc/shenandoah/shenandoahConcurrentMark.hpp"
  36 #include "gc/shenandoah/shenandoahConcurrentThread.hpp"
  37 #include "gc/shenandoah/shenandoahFreeSet.hpp"
  38 #include "gc/shenandoah/shenandoahHeap.inline.hpp"
  39 #include "gc/shenandoah/shenandoahHeapRegion.hpp"
  40 #include "gc/shenandoah/shenandoahHeapRegionSet.hpp"
  41 #include "gc/shenandoah/shenandoahHumongous.hpp"
  42 #include "gc/shenandoah/shenandoahMarkCompact.hpp"
  43 #include "gc/shenandoah/shenandoahMonitoringSupport.hpp"
  44 #include "gc/shenandoah/shenandoahRootProcessor.hpp"
  45 #include "gc/shenandoah/vm_operations_shenandoah.hpp"
  46 
  47 #include "runtime/vmThread.hpp"
  48 #include "services/mallocTracker.hpp"
  49 
  50 const char* ShenandoahHeap::name() const {
  51   return "Shenandoah";
  52 }
  53 
  54 void ShenandoahHeap::print_heap_locations(HeapWord* start, HeapWord* end) {
  55   HeapWord* cur = NULL;
  56   for (cur = start; cur < end; cur++) {
  57     tty->print_cr(PTR_FORMAT" : "PTR_FORMAT, p2i(cur), p2i(*((HeapWord**) cur)));
  58   }
  59 }
  60 
  61 class PrintHeapRegionsClosure : public
  62    ShenandoahHeapRegionClosure {
  63 private:
  64   outputStream* _st;
  65 public:
  66   PrintHeapRegionsClosure() : _st(tty) {}
  67   PrintHeapRegionsClosure(outputStream* st) : _st(st) {}
  68 
  69   bool doHeapRegion(ShenandoahHeapRegion* r) {
  70     r->print_on(_st);
  71     return false;
  72   }
  73 };
  74 
  75 class ShenandoahPretouchTask : public AbstractGangTask {
  76 private:
  77   char* volatile _cur_addr;
  78   char* const _start_addr;
  79   char* const _end_addr;
  80   size_t const _page_size;
  81 public:
  82   ShenandoahPretouchTask(char* start_address, char* end_address, size_t page_size) :
  83     AbstractGangTask("Shenandoah PreTouch",
  84                      Universe::is_fully_initialized() ? GCId::current_raw() :
  85                                                         // During VM initialization there is
  86                                                         // no GC cycle that this task can be
  87                                                         // associated with.
  88                                                         GCId::undefined()),
  89     _cur_addr(start_address),
  90     _start_addr(start_address),
  91     _end_addr(end_address),
  92     _page_size(page_size) {
  93   }
  94 
  95   virtual void work(uint worker_id) {
  96     size_t const actual_chunk_size = MAX2(PreTouchParallelChunkSize, _page_size);
  97     while (true) {
  98       char* touch_addr = (char*)Atomic::add_ptr((intptr_t)actual_chunk_size, (volatile void*) &_cur_addr) - actual_chunk_size;
  99       if (touch_addr < _start_addr || touch_addr >= _end_addr) {
 100         break;
 101       }
 102       char* end_addr = touch_addr + MIN2(actual_chunk_size, pointer_delta(_end_addr, touch_addr, sizeof(char)));
 103       os::pretouch_memory(touch_addr, end_addr, _page_size);
 104     }
 105   }
 106 };
 107 
 108 void ShenandoahHeap::pretouch_storage(char* start, char* end, WorkGang* workers) {
 109   assert (ShenandoahAlwaysPreTouch, "Sanity");
 110   assert (!AlwaysPreTouch, "Should have been overridden");
 111 
 112   size_t size = (size_t)(end - start);
 113   size_t page_size = UseLargePages ? (size_t)os::large_page_size() : (size_t)os::vm_page_size();
 114   size_t num_chunks = MAX2((size_t)1, size / MAX2(PreTouchParallelChunkSize, page_size));
 115   uint num_workers = MIN2((uint)num_chunks, workers->active_workers());
 116 
 117   log_info(gc, heap)("Parallel pretouch with %u workers for " SIZE_FORMAT " work units pre-touching " SIZE_FORMAT " bytes.",
 118                       num_workers, num_chunks, size);
 119 
 120   ShenandoahPretouchTask cl(start, end, page_size);
 121   workers->run_task(&cl, num_workers);
 122 }
 123 
 124 jint ShenandoahHeap::initialize() {
 125   CollectedHeap::pre_initialize();
 126 
 127   size_t init_byte_size = collector_policy()->initial_heap_byte_size();
 128   size_t max_byte_size = collector_policy()->max_heap_byte_size();
 129 
 130   Universe::check_alignment(max_byte_size,
 131                             ShenandoahHeapRegion::RegionSizeBytes,
 132                             "shenandoah heap");
 133   Universe::check_alignment(init_byte_size,
 134                             ShenandoahHeapRegion::RegionSizeBytes,
 135                             "shenandoah heap");
 136 
 137   ReservedSpace heap_rs = Universe::reserve_heap(max_byte_size,
 138                                                  Arguments::conservative_max_heap_alignment());
 139   initialize_reserved_region((HeapWord*)heap_rs.base(), (HeapWord*) (heap_rs.base() + heap_rs.size()));
 140 
 141   set_barrier_set(new ShenandoahBarrierSet(this));
 142   ReservedSpace pgc_rs = heap_rs.first_part(max_byte_size);
 143   _storage.initialize(pgc_rs, init_byte_size);
 144   if (ShenandoahAlwaysPreTouch) {
 145     pretouch_storage(_storage.low(), _storage.high(), _workers);
 146   }
 147 
 148   _num_regions = init_byte_size / ShenandoahHeapRegion::RegionSizeBytes;
 149   _max_regions = max_byte_size / ShenandoahHeapRegion::RegionSizeBytes;
 150   _initialSize = _num_regions * ShenandoahHeapRegion::RegionSizeBytes;
 151   size_t regionSizeWords = ShenandoahHeapRegion::RegionSizeBytes / HeapWordSize;
 152   assert(init_byte_size == _initialSize, "tautology");
 153   _ordered_regions = new ShenandoahHeapRegionSet(_max_regions);
 154   _sorted_regions = new ShenandoahHeapRegionSet(_max_regions);
 155   _collection_set = new ShenandoahCollectionSet(_max_regions);
 156   _free_regions = new ShenandoahFreeSet(_max_regions);
 157 
 158   // Initialize fast collection set test structure.
 159   _in_cset_fast_test_length = _max_regions;
 160   _in_cset_fast_test_base =
 161                    NEW_C_HEAP_ARRAY(bool, _in_cset_fast_test_length, mtGC);
 162   _in_cset_fast_test = _in_cset_fast_test_base -
 163                ((uintx) pgc_rs.base() >> ShenandoahHeapRegion::RegionSizeShift);
 164 
 165   _next_top_at_mark_starts_base =
 166                    NEW_C_HEAP_ARRAY(HeapWord*, _max_regions, mtGC);
 167   _next_top_at_mark_starts = _next_top_at_mark_starts_base -
 168                ((uintx) pgc_rs.base() >> ShenandoahHeapRegion::RegionSizeShift);
 169 
 170   _complete_top_at_mark_starts_base =
 171                    NEW_C_HEAP_ARRAY(HeapWord*, _max_regions, mtGC);
 172   _complete_top_at_mark_starts = _complete_top_at_mark_starts_base -
 173                ((uintx) pgc_rs.base() >> ShenandoahHeapRegion::RegionSizeShift);
 174 
 175   size_t i = 0;
 176   for (i = 0; i < _num_regions; i++) {
 177     _in_cset_fast_test_base[i] = false; // Not in cset
 178     HeapWord* bottom = (HeapWord*) pgc_rs.base() + regionSizeWords * i;
 179     _complete_top_at_mark_starts_base[i] = bottom;
 180     _next_top_at_mark_starts_base[i] = bottom;
 181   }
 182 
 183   for (i = 0; i < _num_regions; i++) {
 184     ShenandoahHeapRegion* current = new ShenandoahHeapRegion();
 185     current->initialize_heap_region(this, (HeapWord*) pgc_rs.base() +
 186                                     regionSizeWords * i, regionSizeWords, i);
 187     _free_regions->add_region(current);
 188     _ordered_regions->add_region(current);
 189     _sorted_regions->add_region(current);
 190   }
 191   assert(((size_t) _ordered_regions->active_regions()) == _num_regions, "");
 192   _first_region = _ordered_regions->get(0);
 193   _first_region_bottom = _first_region->bottom();
 194   assert((((size_t) _first_region_bottom) &
 195           (ShenandoahHeapRegion::RegionSizeBytes - 1)) == 0,
 196          "misaligned heap: "PTR_FORMAT, p2i(_first_region_bottom));
 197 
 198   _numAllocs = 0;
 199 
 200   if (log_is_enabled(Trace, gc, region)) {
 201     ResourceMark rm;
 202     outputStream* out = Log(gc, region)::trace_stream();
 203     log_trace(gc, region)("All Regions");
 204     _ordered_regions->print(out);
 205     log_trace(gc, region)("Free Regions");
 206     _free_regions->print(out);
 207   }
 208 
 209   // The call below uses stuff (the SATB* things) that are in G1, but probably
 210   // belong into a shared location.
 211   JavaThread::satb_mark_queue_set().initialize(SATB_Q_CBL_mon,
 212                                                SATB_Q_FL_lock,
 213                                                20 /*G1SATBProcessCompletedThreshold */,
 214                                                Shared_SATB_Q_lock);
 215 
 216   // Reserve space for prev and next bitmap.
 217   size_t bitmap_size = CMBitMap::compute_size(heap_rs.size());
 218   MemRegion heap_region = MemRegion((HeapWord*) heap_rs.base(), heap_rs.size() / HeapWordSize);
 219 
 220   size_t page_size = UseLargePages ? (size_t)os::large_page_size() : (size_t)os::vm_page_size();
 221 
 222   ReservedSpace bitmap0(bitmap_size, page_size);
 223   os::commit_memory_or_exit(bitmap0.base(), bitmap0.size(), false, "couldn't allocate mark bitmap");
 224   MemTracker::record_virtual_memory_type(bitmap0.base(), mtGC);
 225   MemRegion bitmap_region0 = MemRegion((HeapWord*) bitmap0.base(), bitmap0.size() / HeapWordSize);
 226   _mark_bit_map0.initialize(heap_region, bitmap_region0);
 227   _complete_mark_bit_map = &_mark_bit_map0;
 228 
 229   ReservedSpace bitmap1(bitmap_size, page_size);
 230   os::commit_memory_or_exit(bitmap1.base(), bitmap1.size(), false, "couldn't allocate mark bitmap");
 231   MemTracker::record_virtual_memory_type(bitmap1.base(), mtGC);
 232   MemRegion bitmap_region1 = MemRegion((HeapWord*) bitmap1.base(), bitmap1.size() / HeapWordSize);
 233   _mark_bit_map1.initialize(heap_region, bitmap_region1);
 234   _next_mark_bit_map = &_mark_bit_map1;
 235 
 236   _monitoring_support = new ShenandoahMonitoringSupport(this);
 237 
 238   _concurrent_gc_thread = new ShenandoahConcurrentThread();
 239 
 240   ShenandoahMarkCompact::initialize();
 241 
 242   return JNI_OK;
 243 }
 244 
 245 ShenandoahHeap::ShenandoahHeap(ShenandoahCollectorPolicy* policy) :
 246   CollectedHeap(),
 247   _shenandoah_policy(policy),
 248   _concurrent_mark_in_progress(0),
 249   _evacuation_in_progress(0),
 250   _full_gc_in_progress(false),
 251   _free_regions(NULL),
 252   _collection_set(NULL),
 253   _bytes_allocated_since_cm(0),
 254   _bytes_allocated_during_cm(0),
 255   _max_allocated_gc(0),
 256   _allocated_last_gc(0),
 257   _used_start_gc(0),
 258   _max_conc_workers((int) MAX2((uint) ConcGCThreads, 1U)),
 259   _max_parallel_workers((int) MAX2((uint) ParallelGCThreads, 1U)),
 260   _ref_processor(NULL),
 261   _in_cset_fast_test(NULL),
 262   _in_cset_fast_test_base(NULL),
 263   _next_top_at_mark_starts(NULL),
 264   _next_top_at_mark_starts_base(NULL),
 265   _complete_top_at_mark_starts(NULL),
 266   _complete_top_at_mark_starts_base(NULL),
 267   _mark_bit_map0(),
 268   _mark_bit_map1(),
 269   _cancelled_concgc(false),
 270   _need_update_refs(false),
 271   _need_reset_bitmaps(false),
 272   _growing_heap(0),
 273   _gc_timer(new (ResourceObj::C_HEAP, mtGC) ConcurrentGCTimer())
 274 
 275 {
 276   log_info(gc, init)("Parallel GC threads: "UINT32_FORMAT, ParallelGCThreads);
 277   log_info(gc, init)("Concurrent GC threads: "UINT32_FORMAT, ConcGCThreads);
 278   log_info(gc, init)("Parallel reference processing enabled: %s", BOOL_TO_STR(ParallelRefProcEnabled));
 279 
 280   _scm = new ShenandoahConcurrentMark();
 281   _used = 0;
 282 
 283   // This is odd.  They are concurrent gc threads, but they are also task threads.
 284   // Framework doesn't allow both.
 285   _workers = new WorkGang("Parallel GC Threads", ParallelGCThreads,
 286                             /* are_GC_task_threads */true,
 287                             /* are_ConcurrentGC_threads */false);
 288   _conc_workers = new WorkGang("Concurrent GC Threads", ConcGCThreads,
 289                             /* are_GC_task_threads */true,
 290                             /* are_ConcurrentGC_threads */false);
 291   if ((_workers == NULL) || (_conc_workers == NULL)) {
 292     vm_exit_during_initialization("Failed necessary allocation.");
 293   } else {
 294     _workers->initialize_workers();
 295     _conc_workers->initialize_workers();
 296   }
 297 }
 298 
 299 class ResetNextBitmapTask : public AbstractGangTask {
 300 private:
 301   ShenandoahHeapRegionSet* _regions;
 302 
 303 public:
 304   ResetNextBitmapTask(ShenandoahHeapRegionSet* regions) :
 305     AbstractGangTask("Parallel Reset Bitmap Task"),
 306     _regions(regions) {
 307     _regions->clear_current_index();
 308   }
 309 
 310   void work(uint worker_id) {
 311     ShenandoahHeapRegion* region = _regions->claim_next();
 312     ShenandoahHeap* heap = ShenandoahHeap::heap();
 313     while (region != NULL) {
 314       HeapWord* bottom = region->bottom();
 315       HeapWord* top = heap->next_top_at_mark_start(region->bottom());
 316       if (top > bottom) {
 317         heap->next_mark_bit_map()->clear_range(MemRegion(bottom, top));
 318       }
 319       region = _regions->claim_next();
 320     }
 321   }
 322 };
 323 
 324 void ShenandoahHeap::reset_next_mark_bitmap(WorkGang* workers) {
 325   GCTraceTime(Info, gc, phases) time("Concurrent reset bitmaps", gc_timer(), GCCause::_no_gc);
 326 
 327   ResetNextBitmapTask task = ResetNextBitmapTask(_ordered_regions);
 328   workers->run_task(&task);
 329 }
 330 
 331 class ResetCompleteBitmapTask : public AbstractGangTask {
 332 private:
 333   ShenandoahHeapRegionSet* _regions;
 334 
 335 public:
 336   ResetCompleteBitmapTask(ShenandoahHeapRegionSet* regions) :
 337     AbstractGangTask("Parallel Reset Bitmap Task"),
 338     _regions(regions) {
 339     _regions->clear_current_index();
 340   }
 341 
 342   void work(uint worker_id) {
 343     ShenandoahHeapRegion* region = _regions->claim_next();
 344     ShenandoahHeap* heap = ShenandoahHeap::heap();
 345     while (region != NULL) {
 346       HeapWord* bottom = region->bottom();
 347       HeapWord* top = heap->complete_top_at_mark_start(region->bottom());
 348       if (top > bottom) {
 349         heap->complete_mark_bit_map()->clear_range(MemRegion(bottom, top));
 350       }
 351       region = _regions->claim_next();
 352     }
 353   }
 354 };
 355 
 356 void ShenandoahHeap::reset_complete_mark_bitmap(WorkGang* workers) {
 357   GCTraceTime(Info, gc, phases) time("Concurrent reset bitmaps", gc_timer(), GCCause::_no_gc);
 358 
 359   ResetCompleteBitmapTask task = ResetCompleteBitmapTask(_ordered_regions);
 360   workers->run_task(&task);
 361 }
 362 
 363 bool ShenandoahHeap::is_next_bitmap_clear() {
 364   HeapWord* start = _ordered_regions->bottom();
 365   HeapWord* end = _ordered_regions->end();
 366   return _next_mark_bit_map->getNextMarkedWordAddress(start, end) == end;
 367 }
 368 
 369 bool ShenandoahHeap::is_complete_bitmap_clear_range(HeapWord* start, HeapWord* end) {
 370   return _complete_mark_bit_map->getNextMarkedWordAddress(start, end) == end;
 371 }
 372 
 373 void ShenandoahHeap::print_on(outputStream* st) const {
 374   st->print("Shenandoah Heap");
 375   st->print(" total = " SIZE_FORMAT " K, used " SIZE_FORMAT " K ", capacity()/ K, used() /K);
 376   st->print("Region size = " SIZE_FORMAT "K ", ShenandoahHeapRegion::RegionSizeBytes / K);
 377   if (_concurrent_mark_in_progress) {
 378     st->print("marking ");
 379   }
 380   if (_evacuation_in_progress) {
 381     st->print("evacuating ");
 382   }
 383   if (cancelled_concgc()) {
 384     st->print("cancelled ");
 385   }
 386   st->print("\n");
 387 
 388   if (Verbose) {
 389     print_heap_regions(st);
 390   }
 391 }
 392 
 393 class InitGCLABClosure : public ThreadClosure {
 394 public:
 395   void do_thread(Thread* thread) {
 396     thread->gclab().initialize(true);
 397   }
 398 };
 399 
 400 void ShenandoahHeap::post_initialize() {
 401 
 402   {
 403     if (UseTLAB) {
 404       InitGCLABClosure init_gclabs;
 405       for (JavaThread *thread = Threads::first(); thread != NULL; thread = thread->next()) {
 406         init_gclabs.do_thread(thread);
 407       }
 408       gc_threads_do(&init_gclabs);
 409     }
 410   }
 411 
 412   _max_workers = MAX(_max_parallel_workers, _max_conc_workers);
 413   _scm->initialize(_max_workers);
 414 
 415   ref_processing_init();
 416 }
 417 
 418 class CalculateUsedRegionClosure : public ShenandoahHeapRegionClosure {
 419   size_t sum;
 420 public:
 421 
 422   CalculateUsedRegionClosure() {
 423     sum = 0;
 424   }
 425 
 426   bool doHeapRegion(ShenandoahHeapRegion* r) {
 427     sum = sum + r->used();
 428     return false;
 429   }
 430 
 431   size_t getResult() { return sum;}
 432 };
 433 
 434 size_t ShenandoahHeap::calculateUsed() {
 435   CalculateUsedRegionClosure cl;
 436   heap_region_iterate(&cl);
 437   return cl.getResult();
 438 }
 439 
 440 void ShenandoahHeap::verify_heap_size_consistency() {
 441 
 442   assert(calculateUsed() == used(),
 443          "heap used size must be consistent heap-used: "SIZE_FORMAT" regions-used: "SIZE_FORMAT, used(), calculateUsed());
 444 }
 445 
 446 size_t ShenandoahHeap::used() const {
 447   OrderAccess::acquire();
 448   return _used;
 449 }
 450 
 451 void ShenandoahHeap::increase_used(size_t bytes) {
 452   Atomic::add(bytes, &_used);
 453 }
 454 
 455 void ShenandoahHeap::set_used(size_t bytes) {
 456   _used = bytes;
 457   OrderAccess::release();
 458 }
 459 
 460 void ShenandoahHeap::decrease_used(size_t bytes) {
 461   assert(_used >= bytes, "never decrease heap size by more than we've left");
 462   Atomic::add(-bytes, &_used);
 463 }
 464 
 465 size_t ShenandoahHeap::capacity() const {
 466   return _num_regions * ShenandoahHeapRegion::RegionSizeBytes;
 467 
 468 }
 469 
 470 bool ShenandoahHeap::is_maximal_no_gc() const {
 471   Unimplemented();
 472   return true;
 473 }
 474 
 475 size_t ShenandoahHeap::max_capacity() const {
 476   return _max_regions * ShenandoahHeapRegion::RegionSizeBytes;
 477 }
 478 
 479 size_t ShenandoahHeap::min_capacity() const {
 480   return _initialSize;
 481 }
 482 
 483 VirtualSpace* ShenandoahHeap::storage() const {
 484   return (VirtualSpace*) &_storage;
 485 }
 486 
 487 bool ShenandoahHeap::is_in(const void* p) const {
 488   HeapWord* first_region_bottom = _first_region->bottom();
 489   HeapWord* last_region_end = first_region_bottom + (ShenandoahHeapRegion::RegionSizeBytes / HeapWordSize) * _num_regions;
 490   return p >= _first_region_bottom && p < last_region_end;
 491 }
 492 
 493 bool ShenandoahHeap::is_scavengable(const void* p) {
 494   return true;
 495 }
 496 
 497 HeapWord* ShenandoahHeap::allocate_from_gclab_slow(Thread* thread, size_t size) {
 498   // Retain tlab and allocate object in shared space if
 499   // the amount free in the tlab is too large to discard.
 500   if (thread->gclab().free() > thread->gclab().refill_waste_limit()) {
 501     thread->gclab().record_slow_allocation(size);
 502     return NULL;
 503   }
 504 
 505   // Discard gclab and allocate a new one.
 506   // To minimize fragmentation, the last GCLAB may be smaller than the rest.
 507   size_t new_gclab_size = thread->gclab().compute_size(size);
 508 
 509   thread->gclab().clear_before_allocation();
 510 
 511   if (new_gclab_size == 0) {
 512     return NULL;
 513   }
 514 
 515   // Allocate a new GCLAB...
 516   HeapWord* obj = allocate_new_gclab(new_gclab_size);
 517   if (obj == NULL) {
 518     return NULL;
 519   }
 520 
 521   if (ZeroTLAB) {
 522     // ..and clear it.
 523     Copy::zero_to_words(obj, new_gclab_size);
 524   } else {
 525     // ...and zap just allocated object.
 526 #ifdef ASSERT
 527     // Skip mangling the space corresponding to the object header to
 528     // ensure that the returned space is not considered parsable by
 529     // any concurrent GC thread.
 530     size_t hdr_size = oopDesc::header_size();
 531     Copy::fill_to_words(obj + hdr_size, new_gclab_size - hdr_size, badHeapWordVal);
 532 #endif // ASSERT
 533   }
 534   thread->gclab().fill(obj, obj + size, new_gclab_size);
 535   return obj;
 536 }
 537 
 538 HeapWord* ShenandoahHeap::allocate_new_tlab(size_t word_size) {
 539   return allocate_new_tlab(word_size, false);
 540 }
 541 
 542 HeapWord* ShenandoahHeap::allocate_new_gclab(size_t word_size) {
 543   return allocate_new_tlab(word_size, true);
 544 }
 545 
 546 HeapWord* ShenandoahHeap::allocate_new_tlab(size_t word_size, bool evacuating) {
 547   HeapWord* result = allocate_memory(word_size, evacuating);
 548 
 549   if (result != NULL) {
 550     assert(! in_collection_set(result), "Never allocate in dirty region");
 551     _bytes_allocated_since_cm += word_size * HeapWordSize;
 552 
 553     log_develop_trace(gc, tlab)("allocating new tlab of size "SIZE_FORMAT" at addr "PTR_FORMAT, word_size, p2i(result));
 554 
 555   }
 556   return result;
 557 }
 558 
 559 ShenandoahHeap* ShenandoahHeap::heap() {
 560   CollectedHeap* heap = Universe::heap();
 561   assert(heap != NULL, "Unitialized access to ShenandoahHeap::heap()");
 562   assert(heap->kind() == CollectedHeap::ShenandoahHeap, "not a shenandoah heap");
 563   return (ShenandoahHeap*) heap;
 564 }
 565 
 566 ShenandoahHeap* ShenandoahHeap::heap_no_check() {
 567   CollectedHeap* heap = Universe::heap();
 568   return (ShenandoahHeap*) heap;
 569 }
 570 
 571 HeapWord* ShenandoahHeap::allocate_memory(size_t word_size, bool evacuating) {
 572   HeapWord* result = NULL;
 573   result = allocate_memory_work(word_size);
 574 
 575   if (result == NULL) {
 576     bool retry;
 577     do {
 578       // Try to grow the heap.
 579       retry = check_grow_heap();
 580       result = allocate_memory_work(word_size);
 581     } while (retry && result == NULL);
 582   }
 583 
 584   if (result == NULL && ! evacuating) { // Allocation failed, try full-GC, then retry allocation.
 585     log_develop_trace(gc)("Failed to allocate " SIZE_FORMAT " bytes, free regions: ", word_size * HeapWordSize);
 586     collect(GCCause::_allocation_failure);
 587     result = allocate_memory_work(word_size);
 588   }
 589 
 590   // Only update monitoring counters when not calling from a write-barrier.
 591   // Otherwise we might attempt to grab the Service_lock, which we must
 592   // not do when coming from a write-barrier (because the thread might
 593   // already hold the Compile_lock).
 594   if (! evacuating) {
 595     monitoring_support()->update_counters();
 596   }
 597 
 598   log_develop_trace(gc, alloc)("allocate memory chunk of size "SIZE_FORMAT" at addr "PTR_FORMAT " by thread %d ",
 599                                word_size, p2i(result), Thread::current()->osthread()->thread_id());
 600 
 601   return result;
 602 }
 603 
 604 bool ShenandoahHeap::call_from_write_barrier(bool evacuating) {
 605   return evacuating && Thread::current()->is_Java_thread();
 606 }
 607 
 608 bool ShenandoahHeap::check_grow_heap() {
 609 
 610   assert(_free_regions->max_regions() >= _free_regions->active_regions(), "don't get negative");
 611 
 612   size_t available = _max_regions - _num_regions;
 613   if (available == 0) {
 614     return false; // Don't retry.
 615   }
 616 
 617   jbyte growing = Atomic::cmpxchg(1, &_growing_heap, 0);
 618   if (growing == 0) {
 619     // Only one thread succeeds this, and this one gets
 620     // to grow the heap. All other threads can continue
 621     // to allocate from the reserve.
 622     grow_heap_by(MIN2(available, ShenandoahAllocReserveRegions));
 623 
 624     // Reset it back to 0, so that other threads can take it again.
 625     Atomic::store(0, &_growing_heap);
 626     return true;
 627   } else {
 628     // Let other threads work, then try again.
 629     os::naked_yield();
 630     return true;
 631   }
 632 }
 633 
 634 HeapWord* ShenandoahHeap::allocate_memory_work(size_t word_size) {
 635   if (word_size * HeapWordSize > ShenandoahHeapRegion::RegionSizeBytes) {
 636     return allocate_large_memory(word_size);
 637   }
 638 
 639   // Not enough memory in free region set.
 640   // Coming out of full GC, it is possible that there is not
 641   // free region available, so current_index may not be valid.
 642   if (word_size * HeapWordSize > _free_regions->capacity()) return NULL;
 643 
 644   size_t current_idx = _free_regions->current_index();
 645   ShenandoahHeapRegion* my_current_region = _free_regions->get(current_idx);
 646 
 647   if (my_current_region == NULL) {
 648     return NULL; // No more room to make a new region. OOM.
 649   }
 650   assert(my_current_region != NULL, "should have a region at this point");
 651 
 652 #ifdef ASSERT
 653   if (in_collection_set(my_current_region)) {
 654     print_heap_regions();
 655   }
 656 #endif
 657   assert(! in_collection_set(my_current_region), "never get targetted regions in free-lists");
 658   assert(! my_current_region->is_humongous(), "never attempt to allocate from humongous object regions");
 659 
 660   HeapWord* result = my_current_region->par_allocate(word_size);
 661 
 662   while (result == NULL) {
 663     // 2nd attempt. Try next region.
 664     current_idx = _free_regions->par_claim_next(current_idx);
 665     my_current_region = _free_regions->get(current_idx);
 666 
 667     if (my_current_region == NULL) {
 668       return NULL; // No more room to make a new region. OOM.
 669     }
 670     // _free_regions->increase_used(remaining);
 671     assert(my_current_region != NULL, "should have a region at this point");
 672     assert(! in_collection_set(my_current_region), "never get targetted regions in free-lists");
 673     assert(! my_current_region->is_humongous(), "never attempt to allocate from humongous object regions");
 674     result = my_current_region->par_allocate(word_size);
 675   }
 676 
 677   my_current_region->increase_live_data(word_size * HeapWordSize);
 678   increase_used(word_size * HeapWordSize);
 679   _free_regions->increase_used(word_size * HeapWordSize);
 680   return result;
 681 }
 682 
 683 HeapWord* ShenandoahHeap::allocate_large_memory(size_t words) {
 684 
 685   uint required_regions = ShenandoahHumongous::required_regions(words * HeapWordSize);
 686   if (required_regions > _max_regions) return NULL;
 687 
 688   ShenandoahHeapRegion* r = _free_regions->claim_contiguous(required_regions);
 689 
 690   HeapWord* result = NULL;
 691 
 692   if (r != NULL)  {
 693     result = r->bottom();
 694 
 695     log_debug(gc, humongous)("allocating humongous object of size: "SIZE_FORMAT" KB at location "PTR_FORMAT" in start region "SIZE_FORMAT,
 696                              (words * HeapWordSize) / K, p2i(result), r->region_number());
 697   } else {
 698     log_debug(gc, humongous)("allocating humongous object of size: "SIZE_FORMAT" KB at location "PTR_FORMAT" failed",
 699                              (words * HeapWordSize) / K, p2i(result));
 700   }
 701 
 702 
 703   return result;
 704 
 705 }
 706 
 707 HeapWord*  ShenandoahHeap::mem_allocate(size_t size,
 708                                         bool*  gc_overhead_limit_was_exceeded) {
 709 
 710 #ifdef ASSERT
 711   if (ShenandoahVerify && _numAllocs > 1000000) {
 712     _numAllocs = 0;
 713   }
 714   _numAllocs++;
 715 #endif
 716   HeapWord* filler = allocate_memory(BrooksPointer::word_size() + size, false);
 717   HeapWord* result = filler + BrooksPointer::word_size();
 718   if (filler != NULL) {
 719     BrooksPointer::initialize(oop(result));
 720     _bytes_allocated_since_cm += size * HeapWordSize;
 721 
 722     assert(! in_collection_set(result), "never allocate in targetted region");
 723     return result;
 724   } else {
 725     /*
 726     tty->print_cr("Out of memory. Requested number of words: "SIZE_FORMAT" used heap: "INT64_FORMAT", bytes allocated since last CM: "INT64_FORMAT,
 727                   size, used(), _bytes_allocated_since_cm);
 728     {
 729       print_heap_regions();
 730       tty->print("Printing "SIZE_FORMAT" free regions:\n", _free_regions->count());
 731       _free_regions->print();
 732     }
 733     */
 734     return NULL;
 735   }
 736 }
 737 
 738 class ParallelEvacuateRegionObjectClosure : public ObjectClosure {
 739 private:
 740   ShenandoahHeap* _heap;
 741   Thread* _thread;
 742   public:
 743   ParallelEvacuateRegionObjectClosure(ShenandoahHeap* heap) :
 744     _heap(heap), _thread(Thread::current()) {
 745   }
 746 
 747   void do_object(oop p) {
 748 
 749     log_develop_trace(gc, compaction)("Calling ParallelEvacuateRegionObjectClosure on "PTR_FORMAT" of size %d\n", p2i((HeapWord*) p), p->size());
 750 
 751     assert(_heap->is_marked_complete(p), "expect only marked objects");
 752     if (oopDesc::unsafe_equals(p, ShenandoahBarrierSet::resolve_oop_static_not_null(p))) {
 753       _heap->evacuate_object(p, _thread);
 754     }
 755   }
 756 };
 757 
 758 #ifdef ASSERT
 759 class VerifyEvacuatedObjectClosure : public ObjectClosure {
 760 
 761 public:
 762 
 763   void do_object(oop p) {
 764     if (ShenandoahHeap::heap()->is_marked_complete(p)) {
 765       oop p_prime = oopDesc::bs()->read_barrier(p);
 766       assert(! oopDesc::unsafe_equals(p, p_prime), "Should point to evacuated copy");
 767       if (p->klass() != p_prime->klass()) {
 768         tty->print_cr("copy has different class than original:");
 769         p->klass()->print_on(tty);
 770         p_prime->klass()->print_on(tty);
 771       }
 772       assert(p->klass() == p_prime->klass(), "Should have the same class p: "PTR_FORMAT", p_prime: "PTR_FORMAT, p2i(p), p2i(p_prime));
 773       //      assert(p->mark() == p_prime->mark(), "Should have the same mark");
 774       assert(p->size() == p_prime->size(), "Should be the same size");
 775       assert(oopDesc::unsafe_equals(p_prime, oopDesc::bs()->read_barrier(p_prime)), "One forward once");
 776     }
 777   }
 778 };
 779 
 780 void ShenandoahHeap::verify_evacuated_region(ShenandoahHeapRegion* from_region) {
 781   VerifyEvacuatedObjectClosure verify_evacuation;
 782   marked_object_iterate(from_region, &verify_evacuation);
 783 }
 784 #endif
 785 
 786 void ShenandoahHeap::parallel_evacuate_region(ShenandoahHeapRegion* from_region) {
 787 
 788   assert(from_region->get_live_data() > 0, "all-garbage regions are reclaimed earlier");
 789 
 790   ParallelEvacuateRegionObjectClosure evacuate_region(this);
 791 
 792   marked_object_iterate(from_region, &evacuate_region);
 793 
 794 #ifdef ASSERT
 795   if (ShenandoahVerify && ! cancelled_concgc()) {
 796     verify_evacuated_region(from_region);
 797   }
 798 #endif
 799 }
 800 
 801 class ParallelEvacuationTask : public AbstractGangTask {
 802 private:
 803   ShenandoahHeap* _sh;
 804   ShenandoahCollectionSet* _cs;
 805 
 806 public:
 807   ParallelEvacuationTask(ShenandoahHeap* sh,
 808                          ShenandoahCollectionSet* cs) :
 809     AbstractGangTask("Parallel Evacuation Task"),
 810     _cs(cs),
 811     _sh(sh) {}
 812 
 813   void work(uint worker_id) {
 814 
 815     ShenandoahHeapRegion* from_hr = _cs->claim_next();
 816 
 817     while (from_hr != NULL) {
 818       log_develop_trace(gc, region)("Thread "INT32_FORMAT" claimed Heap Region "SIZE_FORMAT,
 819                                     worker_id,
 820                                     from_hr->region_number());
 821 
 822       assert(from_hr->get_live_data() > 0, "all-garbage regions are reclaimed early");
 823       _sh->parallel_evacuate_region(from_hr);
 824 
 825       if (_sh->cancelled_concgc()) {
 826         log_develop_trace(gc, region)("Cancelled concgc while evacuating region " SIZE_FORMAT "\n", from_hr->region_number());
 827         break;
 828       }
 829       from_hr = _cs->claim_next();
 830     }
 831   }
 832 };
 833 
 834 class RecycleDirtyRegionsClosure: public ShenandoahHeapRegionClosure {
 835 private:
 836   ShenandoahHeap* _heap;
 837   size_t _bytes_reclaimed;
 838 public:
 839   RecycleDirtyRegionsClosure() : _heap(ShenandoahHeap::heap()) {}
 840 
 841   bool doHeapRegion(ShenandoahHeapRegion* r) {
 842 
 843     assert (! _heap->cancelled_concgc(), "no recycling after cancelled marking");
 844 
 845     if (_heap->in_collection_set(r)) {
 846       log_develop_trace(gc, region)("Recycling region " SIZE_FORMAT ":", r->region_number());
 847       _heap->decrease_used(r->used());
 848       _bytes_reclaimed += r->used();
 849       r->recycle();
 850       _heap->free_regions()->add_region(r);
 851     }
 852 
 853     return false;
 854   }
 855   size_t bytes_reclaimed() { return _bytes_reclaimed;}
 856   void clear_bytes_reclaimed() {_bytes_reclaimed = 0;}
 857 };
 858 
 859 void ShenandoahHeap::recycle_dirty_regions() {
 860   RecycleDirtyRegionsClosure cl;
 861   cl.clear_bytes_reclaimed();
 862 
 863   heap_region_iterate(&cl);
 864 
 865   _shenandoah_policy->record_bytes_reclaimed(cl.bytes_reclaimed());
 866   if (! cancelled_concgc()) {
 867     clear_cset_fast_test();
 868   }
 869 }
 870 
 871 ShenandoahFreeSet* ShenandoahHeap::free_regions() {
 872   return _free_regions;
 873 }
 874 
 875 void ShenandoahHeap::print_heap_regions(outputStream* st) const {
 876   _ordered_regions->print(st);
 877 }
 878 
 879 class PrintAllRefsOopClosure: public ExtendedOopClosure {
 880 private:
 881   int _index;
 882   const char* _prefix;
 883 
 884 public:
 885   PrintAllRefsOopClosure(const char* prefix) : _index(0), _prefix(prefix) {}
 886 
 887 private:
 888   template <class T>
 889   inline void do_oop_work(T* p) {
 890     oop o = oopDesc::load_decode_heap_oop(p);
 891     if (o != NULL) {
 892       if (ShenandoahHeap::heap()->is_in(o) && o->is_oop()) {
 893         tty->print_cr("%s "INT32_FORMAT" ("PTR_FORMAT")-> "PTR_FORMAT" (marked: %s) (%s "PTR_FORMAT")",
 894                       _prefix, _index,
 895                       p2i(p), p2i(o),
 896                       BOOL_TO_STR(ShenandoahHeap::heap()->is_marked_complete(o)),
 897                       o->klass()->internal_name(), p2i(o->klass()));
 898       } else {
 899         tty->print_cr("%s "INT32_FORMAT" ("PTR_FORMAT" dirty -> "PTR_FORMAT" (not in heap, possibly corrupted or dirty)",
 900                       _prefix, _index,
 901                       p2i(p), p2i(o));
 902       }
 903     } else {
 904       tty->print_cr("%s "INT32_FORMAT" ("PTR_FORMAT") -> "PTR_FORMAT, _prefix, _index, p2i(p), p2i((HeapWord*) o));
 905     }
 906     _index++;
 907   }
 908 
 909 public:
 910   void do_oop(oop* p) {
 911     do_oop_work(p);
 912   }
 913 
 914   void do_oop(narrowOop* p) {
 915     do_oop_work(p);
 916   }
 917 
 918 };
 919 
 920 class PrintAllRefsObjectClosure : public ObjectClosure {
 921   const char* _prefix;
 922 
 923 public:
 924   PrintAllRefsObjectClosure(const char* prefix) : _prefix(prefix) {}
 925 
 926   void do_object(oop p) {
 927     if (ShenandoahHeap::heap()->is_in(p)) {
 928         tty->print_cr("%s object "PTR_FORMAT" (marked: %s) (%s "PTR_FORMAT") refers to:",
 929                       _prefix, p2i(p),
 930                       BOOL_TO_STR(ShenandoahHeap::heap()->is_marked_complete(p)),
 931                       p->klass()->internal_name(), p2i(p->klass()));
 932         PrintAllRefsOopClosure cl(_prefix);
 933         p->oop_iterate(&cl);
 934       }
 935   }
 936 };
 937 
 938 void ShenandoahHeap::print_all_refs(const char* prefix) {
 939   tty->print_cr("printing all references in the heap");
 940   tty->print_cr("root references:");
 941 
 942   ensure_parsability(false);
 943 
 944   PrintAllRefsOopClosure cl(prefix);
 945   roots_iterate(&cl);
 946 
 947   tty->print_cr("heap references:");
 948   PrintAllRefsObjectClosure cl2(prefix);
 949   object_iterate(&cl2);
 950 }
 951 
 952 class VerifyAfterMarkingOopClosure: public ExtendedOopClosure {
 953 private:
 954   ShenandoahHeap*  _heap;
 955 
 956 public:
 957   VerifyAfterMarkingOopClosure() :
 958     _heap(ShenandoahHeap::heap()) { }
 959 
 960 private:
 961   template <class T>
 962   inline void do_oop_work(T* p) {
 963     oop o = oopDesc::load_decode_heap_oop(p);
 964     if (o != NULL) {
 965       if (! _heap->is_marked_complete(o)) {
 966         _heap->print_heap_regions();
 967         _heap->print_all_refs("post-mark");
 968         tty->print_cr("oop not marked, although referrer is marked: "PTR_FORMAT": in_heap: %s, is_marked: %s",
 969                       p2i((HeapWord*) o), BOOL_TO_STR(_heap->is_in(o)), BOOL_TO_STR(_heap->is_marked_complete(o)));
 970         _heap->print_heap_locations((HeapWord*) o, (HeapWord*) o + o->size());
 971 
 972         tty->print_cr("oop class: %s", o->klass()->internal_name());
 973         if (_heap->is_in(p)) {
 974           oop referrer = oop(_heap->heap_region_containing(p)->block_start_const(p));
 975           tty->print_cr("Referrer starts at addr "PTR_FORMAT, p2i((HeapWord*) referrer));
 976           referrer->print();
 977           _heap->print_heap_locations((HeapWord*) referrer, (HeapWord*) referrer + referrer->size());
 978         }
 979         tty->print_cr("heap region containing object:");
 980         _heap->heap_region_containing(o)->print();
 981         tty->print_cr("heap region containing referrer:");
 982         _heap->heap_region_containing(p)->print();
 983         tty->print_cr("heap region containing forwardee:");
 984         _heap->heap_region_containing(oopDesc::bs()->read_barrier(o))->print();
 985       }
 986       assert(o->is_oop(), "oop must be an oop");
 987       assert(Metaspace::contains(o->klass()), "klass pointer must go to metaspace");
 988       if (! oopDesc::unsafe_equals(o, oopDesc::bs()->read_barrier(o))) {
 989         tty->print_cr("oops has forwardee: p: "PTR_FORMAT" (%s), o = "PTR_FORMAT" (%s), new-o: "PTR_FORMAT" (%s)",
 990                       p2i(p),
 991                       BOOL_TO_STR(_heap->in_collection_set(p)),
 992                       p2i(o),
 993                       BOOL_TO_STR(_heap->in_collection_set(o)),
 994                       p2i((HeapWord*) oopDesc::bs()->read_barrier(o)),
 995                       BOOL_TO_STR(_heap->in_collection_set(oopDesc::bs()->read_barrier(o))));
 996         tty->print_cr("oop class: %s", o->klass()->internal_name());
 997       }
 998       assert(oopDesc::unsafe_equals(o, oopDesc::bs()->read_barrier(o)), "oops must not be forwarded");
 999       assert(! _heap->in_collection_set(o), "references must not point to dirty heap regions");
1000       assert(_heap->is_marked_complete(o), "live oops must be marked current");
1001     }
1002   }
1003 
1004 public:
1005   void do_oop(oop* p) {
1006     do_oop_work(p);
1007   }
1008 
1009   void do_oop(narrowOop* p) {
1010     do_oop_work(p);
1011   }
1012 
1013 };
1014 
1015 void ShenandoahHeap::verify_heap_after_marking() {
1016 
1017   verify_heap_size_consistency();
1018 
1019   log_trace(gc)("verifying heap after marking");
1020 
1021   VerifyAfterMarkingOopClosure cl;
1022   roots_iterate(&cl);
1023   ObjectToOopClosure objs(&cl);
1024   object_iterate(&objs);
1025 }
1026 
1027 
1028 void ShenandoahHeap::reclaim_humongous_region_at(ShenandoahHeapRegion* r) {
1029   assert(r->is_humongous_start(), "reclaim regions starting with the first one");
1030 
1031   oop humongous_obj = oop(r->bottom() + BrooksPointer::word_size());
1032   size_t size = humongous_obj->size() + BrooksPointer::word_size();
1033   uint required_regions = ShenandoahHumongous::required_regions(size * HeapWordSize);
1034   uint index = r->region_number();
1035 
1036 
1037   assert(r->get_live_data() == 0, "liveness must be zero");
1038 
1039   for(size_t i = 0; i < required_regions; i++) {
1040 
1041     ShenandoahHeapRegion* region = _ordered_regions->get(index++);
1042 
1043     assert((region->is_humongous_start() || region->is_humongous_continuation()),
1044            "expect correct humongous start or continuation");
1045 
1046     if (log_is_enabled(Debug, gc, humongous)) {
1047       log_debug(gc, humongous)("reclaiming "UINT32_FORMAT" humongous regions for object of size: "SIZE_FORMAT" words", required_regions, size);
1048       ResourceMark rm;
1049       outputStream* out = Log(gc, humongous)::debug_stream();
1050       region->print_on(out);
1051     }
1052 
1053     region->recycle();
1054     ShenandoahHeap::heap()->decrease_used(ShenandoahHeapRegion::RegionSizeBytes);
1055   }
1056 }
1057 
1058 class ShenandoahReclaimHumongousRegionsClosure : public ShenandoahHeapRegionClosure {
1059 
1060   bool doHeapRegion(ShenandoahHeapRegion* r) {
1061     ShenandoahHeap* heap = ShenandoahHeap::heap();
1062 
1063     if (r->is_humongous_start()) {
1064       oop humongous_obj = oop(r->bottom() + BrooksPointer::word_size());
1065       if (! heap->is_marked_complete(humongous_obj)) {
1066 
1067         heap->reclaim_humongous_region_at(r);
1068       }
1069     }
1070     return false;
1071   }
1072 };
1073 
1074 #ifdef ASSERT
1075 class CheckCollectionSetClosure: public ShenandoahHeapRegionClosure {
1076   bool doHeapRegion(ShenandoahHeapRegion* r) {
1077     assert(! ShenandoahHeap::heap()->in_collection_set(r), "Should have been cleared by now");
1078     return false;
1079   }
1080 };
1081 #endif
1082 
1083 void ShenandoahHeap::prepare_for_concurrent_evacuation() {
1084   assert(_ordered_regions->get(0)->region_number() == 0, "FIXME CHF. FIXME CHF!");
1085 
1086   log_develop_trace(gc)("Thread %d started prepare_for_concurrent_evacuation", Thread::current()->osthread()->thread_id());
1087 
1088   if (!cancelled_concgc()) {
1089 
1090     recycle_dirty_regions();
1091 
1092     ensure_parsability(true);
1093 
1094 #ifdef ASSERT
1095     if (ShenandoahVerify) {
1096       verify_heap_after_marking();
1097     }
1098 #endif
1099 
1100     // NOTE: This needs to be done during a stop the world pause, because
1101     // putting regions into the collection set concurrently with Java threads
1102     // will create a race. In particular, acmp could fail because when we
1103     // resolve the first operand, the containing region might not yet be in
1104     // the collection set, and thus return the original oop. When the 2nd
1105     // operand gets resolved, the region could be in the collection set
1106     // and the oop gets evacuated. If both operands have originally been
1107     // the same, we get false negatives.
1108 
1109 
1110     _collection_set->clear();
1111     _free_regions->clear();
1112 
1113     ShenandoahReclaimHumongousRegionsClosure reclaim;
1114     heap_region_iterate(&reclaim);
1115 
1116     // _ordered_regions->print();
1117 #ifdef ASSERT
1118     CheckCollectionSetClosure ccsc;
1119     _ordered_regions->heap_region_iterate(&ccsc);
1120 #endif
1121 
1122     _shenandoah_policy->choose_collection_set(_collection_set);
1123 
1124     _shenandoah_policy->choose_free_set(_free_regions);
1125 
1126     /*
1127     tty->print("Sorted free regions\n");
1128     _free_regions->print();
1129     */
1130 
1131     _bytes_allocated_since_cm = 0;
1132 
1133     Universe::update_heap_info_at_gc();
1134   }
1135 }
1136 
1137 
1138 class RetireTLABClosure : public ThreadClosure {
1139 private:
1140   bool _retire;
1141 
1142 public:
1143   RetireTLABClosure(bool retire) : _retire(retire) {
1144   }
1145 
1146   void do_thread(Thread* thread) {
1147     thread->gclab().make_parsable(_retire);
1148   }
1149 };
1150 
1151 void ShenandoahHeap::ensure_parsability(bool retire_tlabs) {
1152   if (UseTLAB) {
1153     CollectedHeap::ensure_parsability(retire_tlabs);
1154 
1155   RetireTLABClosure cl(retire_tlabs);
1156   for (JavaThread *thread = Threads::first(); thread != NULL; thread = thread->next()) {
1157     cl.do_thread(thread);
1158   }
1159   gc_threads_do(&cl);
1160   }
1161 }
1162 
1163 class ShenandoahEvacuateUpdateRootsClosure: public ExtendedOopClosure {
1164 private:
1165   ShenandoahHeap* _heap;
1166   Thread* _thread;
1167 public:
1168   ShenandoahEvacuateUpdateRootsClosure() :
1169     _heap(ShenandoahHeap::heap()), _thread(Thread::current()) {
1170   }
1171 
1172 private:
1173   template <class T>
1174   void do_oop_work(T* p) {
1175     assert(_heap->is_evacuation_in_progress(), "Only do this when evacuation is in progress");
1176 
1177     T o = oopDesc::load_heap_oop(p);
1178     if (! oopDesc::is_null(o)) {
1179       oop obj = oopDesc::decode_heap_oop_not_null(o);
1180       if (_heap->in_collection_set(obj)) {
1181         assert(_heap->is_marked_complete(obj), "only evacuate marked objects %d %d",
1182                _heap->is_marked_complete(obj), _heap->is_marked_complete(ShenandoahBarrierSet::resolve_oop_static_not_null(obj)));
1183         oop resolved = ShenandoahBarrierSet::resolve_oop_static_not_null(obj);
1184         if (oopDesc::unsafe_equals(resolved, obj)) {
1185           resolved = _heap->evacuate_object(obj, _thread);
1186         }
1187         oopDesc::encode_store_heap_oop(p, resolved);
1188       }
1189     }
1190 #ifdef ASSERT
1191     else {
1192       // tty->print_cr("not updating root at: "PTR_FORMAT" with object: "PTR_FORMAT", is_in_heap: %s, is_in_cset: %s, is_marked: %s",
1193       //               p2i(p),
1194       //               p2i((HeapWord*) obj),
1195       //               BOOL_TO_STR(_heap->is_in(obj)),
1196       //               BOOL_TO_STR(_heap->in_cset_fast_test(obj)),
1197       //               BOOL_TO_STR(_heap->is_marked_complete(obj)));
1198     }
1199 #endif
1200   }
1201 
1202 public:
1203   void do_oop(oop* p) {
1204     do_oop_work(p);
1205   }
1206   void do_oop(narrowOop* p) {
1207     do_oop_work(p);
1208   }
1209 };
1210 
1211 class ShenandoahEvacuateUpdateRootsTask : public AbstractGangTask {
1212   ShenandoahRootEvacuator* _rp;
1213 public:
1214 
1215   ShenandoahEvacuateUpdateRootsTask(ShenandoahRootEvacuator* rp) :
1216     AbstractGangTask("Shenandoah evacuate and update roots"),
1217     _rp(rp)
1218   {
1219     // Nothing else to do.
1220   }
1221 
1222   void work(uint worker_id) {
1223     ShenandoahEvacuateUpdateRootsClosure cl;
1224     MarkingCodeBlobClosure blobsCl(&cl, CodeBlobToOopClosure::FixRelocations);
1225 
1226     _rp->process_evacuate_roots(&cl, &blobsCl, worker_id);
1227   }
1228 };
1229 
1230 void ShenandoahHeap::evacuate_and_update_roots() {
1231 
1232   COMPILER2_PRESENT(DerivedPointerTable::clear());
1233 
1234   if (ShenandoahVerifyReadsToFromSpace) {
1235     set_from_region_protection(false);
1236   }
1237 
1238   assert(SafepointSynchronize::is_at_safepoint(), "Only iterate roots while world is stopped");
1239   ClassLoaderDataGraph::clear_claimed_marks();
1240 
1241   {
1242     ShenandoahRootEvacuator rp(this, _max_parallel_workers, ShenandoahCollectorPolicy::evac_thread_roots);
1243     ShenandoahEvacuateUpdateRootsTask roots_task(&rp);
1244     workers()->run_task(&roots_task);
1245   }
1246 
1247   if (ShenandoahVerifyReadsToFromSpace) {
1248     set_from_region_protection(true);
1249   }
1250 
1251   COMPILER2_PRESENT(DerivedPointerTable::update_pointers());
1252 
1253 }
1254 
1255 
1256 void ShenandoahHeap::do_evacuation() {
1257 
1258   parallel_evacuate();
1259 
1260   if (ShenandoahVerify && ! cancelled_concgc()) {
1261     VM_ShenandoahVerifyHeapAfterEvacuation verify_after_evacuation;
1262     if (Thread::current()->is_VM_thread()) {
1263       verify_after_evacuation.doit();
1264     } else {
1265       VMThread::execute(&verify_after_evacuation);
1266     }
1267   }
1268 
1269 }
1270 
1271 void ShenandoahHeap::parallel_evacuate() {
1272 
1273   if (! cancelled_concgc()) {
1274 
1275     log_develop_trace(gc)("starting parallel_evacuate");
1276 
1277     _shenandoah_policy->record_phase_start(ShenandoahCollectorPolicy::conc_evac);
1278 
1279     if (log_is_enabled(Trace, gc, region)) {
1280       ResourceMark rm;
1281       outputStream* out = Log(gc, region)::trace_stream();
1282       out->print("Printing all available regions");
1283       print_heap_regions(out);
1284     }
1285 
1286     if (log_is_enabled(Trace, gc, cset)) {
1287       ResourceMark rm;
1288       outputStream* out = Log(gc, cset)::trace_stream();
1289       out->print("Printing collection set which contains "SIZE_FORMAT" regions:\n", _collection_set->count());
1290       _collection_set->print(out);
1291 
1292       out->print("Printing free set which contains "SIZE_FORMAT" regions:\n", _free_regions->count());
1293       _free_regions->print(out);
1294     }
1295 
1296     ParallelEvacuationTask evacuationTask = ParallelEvacuationTask(this, _collection_set);
1297 
1298     conc_workers()->run_task(&evacuationTask);
1299 
1300     if (log_is_enabled(Trace, gc, cset)) {
1301       ResourceMark rm;
1302       outputStream* out = Log(gc, cset)::trace_stream();
1303       out->print("Printing postgc collection set which contains "SIZE_FORMAT" regions:\n",
1304                  _collection_set->count());
1305 
1306       _collection_set->print(out);
1307 
1308       out->print("Printing postgc free regions which contain "SIZE_FORMAT" free regions:\n",
1309                  _free_regions->count());
1310       _free_regions->print(out);
1311 
1312     }
1313 
1314     if (log_is_enabled(Trace, gc, region)) {
1315       ResourceMark rm;
1316       outputStream* out = Log(gc, region)::trace_stream();
1317       out->print_cr("all regions after evacuation:");
1318       print_heap_regions(out);
1319     }
1320 
1321     _shenandoah_policy->record_phase_end(ShenandoahCollectorPolicy::conc_evac);
1322 
1323     if (cancelled_concgc()) {
1324       // tty->print("GOTCHA: by thread %d", Thread::current()->osthread()->thread_id());
1325       concurrent_thread()->schedule_full_gc();
1326       // tty->print("PostGotcha: by thread %d FullGC should be scheduled\n",
1327       //            Thread::current()->osthread()->thread_id());
1328     }
1329   }
1330 }
1331 
1332 class VerifyEvacuationClosure: public ExtendedOopClosure {
1333 private:
1334   ShenandoahHeap*  _heap;
1335   ShenandoahHeapRegion* _from_region;
1336 
1337 public:
1338   VerifyEvacuationClosure(ShenandoahHeapRegion* from_region) :
1339     _heap(ShenandoahHeap::heap()), _from_region(from_region) { }
1340 private:
1341   template <class T>
1342   inline void do_oop_work(T* p) {
1343     oop heap_oop = oopDesc::load_decode_heap_oop(p);
1344     if (! oopDesc::is_null(heap_oop)) {
1345       guarantee(! _from_region->is_in(heap_oop), "no references to from-region allowed after evacuation: "PTR_FORMAT, p2i((HeapWord*) heap_oop));
1346     }
1347   }
1348 
1349 public:
1350   void do_oop(oop* p)       {
1351     do_oop_work(p);
1352   }
1353 
1354   void do_oop(narrowOop* p) {
1355     do_oop_work(p);
1356   }
1357 
1358 };
1359 
1360 void ShenandoahHeap::roots_iterate(OopClosure* cl) {
1361 
1362   assert(SafepointSynchronize::is_at_safepoint(), "Only iterate roots while world is stopped");
1363 
1364   CodeBlobToOopClosure blobsCl(cl, false);
1365   CLDToOopClosure cldCl(cl);
1366 
1367   ClassLoaderDataGraph::clear_claimed_marks();
1368 
1369   ShenandoahRootProcessor rp(this, 1);
1370   rp.process_all_roots(cl, NULL, &cldCl, &blobsCl, 0);
1371 }
1372 
1373 void ShenandoahHeap::verify_evacuation(ShenandoahHeapRegion* from_region) {
1374 
1375   VerifyEvacuationClosure rootsCl(from_region);
1376   roots_iterate(&rootsCl);
1377 
1378 }
1379 
1380 bool ShenandoahHeap::supports_tlab_allocation() const {
1381   return true;
1382 }
1383 
1384 
1385 size_t  ShenandoahHeap::unsafe_max_tlab_alloc(Thread *thread) const {
1386   size_t idx = _free_regions->current_index();
1387   ShenandoahHeapRegion* current = _free_regions->get(idx);
1388   if (current == NULL)
1389     return 0;
1390   else if (current->free() > MinTLABSize) {
1391     return current->free();
1392   } else {
1393     return MinTLABSize;
1394   }
1395 }
1396 
1397 size_t ShenandoahHeap::max_tlab_size() const {
1398   return ShenandoahHeapRegion::RegionSizeBytes;
1399 }
1400 
1401 class ResizeGCLABClosure : public ThreadClosure {
1402 public:
1403   void do_thread(Thread* thread) {
1404     thread->gclab().resize();
1405   }
1406 };
1407 
1408 void ShenandoahHeap::resize_all_tlabs() {
1409   CollectedHeap::resize_all_tlabs();
1410 
1411   ResizeGCLABClosure cl;
1412   for (JavaThread *thread = Threads::first(); thread != NULL; thread = thread->next()) {
1413     cl.do_thread(thread);
1414   }
1415   gc_threads_do(&cl);
1416 
1417 }
1418 
1419 class AccumulateStatisticsGCLABClosure : public ThreadClosure {
1420 public:
1421   void do_thread(Thread* thread) {
1422     thread->gclab().accumulate_statistics();
1423     thread->gclab().initialize_statistics();
1424   }
1425 };
1426 
1427 void ShenandoahHeap::accumulate_statistics_all_gclabs() {
1428 
1429   AccumulateStatisticsGCLABClosure cl;
1430   for (JavaThread *thread = Threads::first(); thread != NULL; thread = thread->next()) {
1431     cl.do_thread(thread);
1432   }
1433   gc_threads_do(&cl);
1434 }
1435 
1436 bool  ShenandoahHeap::can_elide_tlab_store_barriers() const {
1437   return true;
1438 }
1439 
1440 oop ShenandoahHeap::new_store_pre_barrier(JavaThread* thread, oop new_obj) {
1441   // Overridden to do nothing.
1442   return new_obj;
1443 }
1444 
1445 bool  ShenandoahHeap::can_elide_initializing_store_barrier(oop new_obj) {
1446   return true;
1447 }
1448 
1449 bool ShenandoahHeap::card_mark_must_follow_store() const {
1450   return false;
1451 }
1452 
1453 void ShenandoahHeap::collect(GCCause::Cause cause) {
1454   assert(cause != GCCause::_gc_locker, "no JNI critical callback");
1455   if (GCCause::is_user_requested_gc(cause)) {
1456     if (! DisableExplicitGC) {
1457       cancel_concgc(cause);
1458       _concurrent_gc_thread->do_full_gc(cause);
1459     }
1460   } else if (cause == GCCause::_allocation_failure) {
1461     cancel_concgc(cause);
1462     collector_policy()->set_should_clear_all_soft_refs(true);
1463       _concurrent_gc_thread->do_full_gc(cause);
1464 
1465   }
1466 }
1467 
1468 void ShenandoahHeap::do_full_collection(bool clear_all_soft_refs) {
1469   //assert(false, "Shouldn't need to do full collections");
1470 }
1471 
1472 AdaptiveSizePolicy* ShenandoahHeap::size_policy() {
1473   Unimplemented();
1474   return NULL;
1475 
1476 }
1477 
1478 CollectorPolicy* ShenandoahHeap::collector_policy() const {
1479   return _shenandoah_policy;
1480 }
1481 
1482 
1483 HeapWord* ShenandoahHeap::block_start(const void* addr) const {
1484   Space* sp = heap_region_containing(addr);
1485   if (sp != NULL) {
1486     return sp->block_start(addr);
1487   }
1488   return NULL;
1489 }
1490 
1491 size_t ShenandoahHeap::block_size(const HeapWord* addr) const {
1492   Space* sp = heap_region_containing(addr);
1493   assert(sp != NULL, "block_size of address outside of heap");
1494   return sp->block_size(addr);
1495 }
1496 
1497 bool ShenandoahHeap::block_is_obj(const HeapWord* addr) const {
1498   Space* sp = heap_region_containing(addr);
1499   return sp->block_is_obj(addr);
1500 }
1501 
1502 jlong ShenandoahHeap::millis_since_last_gc() {
1503   return 0;
1504 }
1505 
1506 void ShenandoahHeap::prepare_for_verify() {
1507   if (SafepointSynchronize::is_at_safepoint() || ! UseTLAB) {
1508     ensure_parsability(false);
1509   }
1510 }
1511 
1512 void ShenandoahHeap::print_gc_threads_on(outputStream* st) const {
1513   workers()->print_worker_threads_on(st);
1514   conc_workers()->print_worker_threads_on(st);
1515 }
1516 
1517 void ShenandoahHeap::gc_threads_do(ThreadClosure* tcl) const {
1518   workers()->threads_do(tcl);
1519   conc_workers()->threads_do(tcl);
1520 }
1521 
1522 void ShenandoahHeap::print_tracing_info() const {
1523   if (log_is_enabled(Info, gc, stats)) {
1524     ResourceMark rm;
1525     outputStream* out = Log(gc, stats)::info_stream();
1526     _shenandoah_policy->print_tracing_info(out);
1527   }
1528 }
1529 
1530 class ShenandoahVerifyRootsClosure: public ExtendedOopClosure {
1531 private:
1532   ShenandoahHeap*  _heap;
1533   VerifyOption     _vo;
1534   bool             _failures;
1535 public:
1536   // _vo == UsePrevMarking -> use "prev" marking information,
1537   // _vo == UseNextMarking -> use "next" marking information,
1538   // _vo == UseMarkWord    -> use mark word from object header.
1539   ShenandoahVerifyRootsClosure(VerifyOption vo) :
1540     _heap(ShenandoahHeap::heap()),
1541     _vo(vo),
1542     _failures(false) { }
1543 
1544   bool failures() { return _failures; }
1545 
1546 private:
1547   template <class T>
1548   inline void do_oop_work(T* p) {
1549     oop obj = oopDesc::load_decode_heap_oop(p);
1550     if (! oopDesc::is_null(obj) && ! obj->is_oop()) {
1551       { // Just for debugging.
1552         tty->print_cr("Root location "PTR_FORMAT
1553                       "verified "PTR_FORMAT, p2i(p), p2i((void*) obj));
1554         //      obj->print_on(tty);
1555       }
1556     }
1557     guarantee(obj->is_oop_or_null(), "is oop or null");
1558   }
1559 
1560 public:
1561   void do_oop(oop* p)       {
1562     do_oop_work(p);
1563   }
1564 
1565   void do_oop(narrowOop* p) {
1566     do_oop_work(p);
1567   }
1568 
1569 };
1570 
1571 class ShenandoahVerifyHeapClosure: public ObjectClosure {
1572 private:
1573   ShenandoahVerifyRootsClosure _rootsCl;
1574 public:
1575   ShenandoahVerifyHeapClosure(ShenandoahVerifyRootsClosure rc) :
1576     _rootsCl(rc) {};
1577 
1578   void do_object(oop p) {
1579     _rootsCl.do_oop(&p);
1580   }
1581 };
1582 
1583 class ShenandoahVerifyKlassClosure: public KlassClosure {
1584   OopClosure *_oop_closure;
1585  public:
1586   ShenandoahVerifyKlassClosure(OopClosure* cl) : _oop_closure(cl) {}
1587   void do_klass(Klass* k) {
1588     k->oops_do(_oop_closure);
1589   }
1590 };
1591 
1592 void ShenandoahHeap::verify(VerifyOption vo) {
1593   if (SafepointSynchronize::is_at_safepoint() || ! UseTLAB) {
1594 
1595     ShenandoahVerifyRootsClosure rootsCl(vo);
1596 
1597     assert(Thread::current()->is_VM_thread(),
1598            "Expected to be executed serially by the VM thread at this point");
1599 
1600     roots_iterate(&rootsCl);
1601 
1602     bool failures = rootsCl.failures();
1603     log_trace(gc)("verify failures: %s", BOOL_TO_STR(failures));
1604 
1605     ShenandoahVerifyHeapClosure heapCl(rootsCl);
1606 
1607     object_iterate(&heapCl);
1608     // TODO: Implement rest of it.
1609   } else {
1610     tty->print("(SKIPPING roots, heapRegions, remset) ");
1611   }
1612 }
1613 size_t ShenandoahHeap::tlab_capacity(Thread *thr) const {
1614   return _free_regions->capacity();
1615 }
1616 
1617 class ShenandoahIterateObjectClosureRegionClosure: public ShenandoahHeapRegionClosure {
1618   ObjectClosure* _cl;
1619 public:
1620   ShenandoahIterateObjectClosureRegionClosure(ObjectClosure* cl) : _cl(cl) {}
1621   bool doHeapRegion(ShenandoahHeapRegion* r) {
1622     ShenandoahHeap::heap()->marked_object_iterate(r, _cl);
1623     return false;
1624   }
1625 };
1626 
1627 void ShenandoahHeap::object_iterate(ObjectClosure* cl) {
1628   ShenandoahIterateObjectClosureRegionClosure blk(cl);
1629   heap_region_iterate(&blk, false, true);
1630 }
1631 
1632 void ShenandoahHeap::safe_object_iterate(ObjectClosure* cl) {
1633   Unimplemented();
1634 }
1635 
1636 // Apply blk->doHeapRegion() on all committed regions in address order,
1637 // terminating the iteration early if doHeapRegion() returns true.
1638 void ShenandoahHeap::heap_region_iterate(ShenandoahHeapRegionClosure* blk, bool skip_dirty_regions, bool skip_humongous_continuation) const {
1639   for (size_t i = 0; i < _num_regions; i++) {
1640     ShenandoahHeapRegion* current  = _ordered_regions->get(i);
1641     if (skip_humongous_continuation && current->is_humongous_continuation()) {
1642       continue;
1643     }
1644     if (skip_dirty_regions && in_collection_set(current)) {
1645       continue;
1646     }
1647     if (blk->doHeapRegion(current)) {
1648       return;
1649     }
1650   }
1651 }
1652 
1653 class ClearLivenessClosure : public ShenandoahHeapRegionClosure {
1654   ShenandoahHeap* sh;
1655 public:
1656   ClearLivenessClosure(ShenandoahHeap* heap) : sh(heap) { }
1657 
1658   bool doHeapRegion(ShenandoahHeapRegion* r) {
1659     r->clear_live_data();
1660     sh->set_next_top_at_mark_start(r->bottom(), r->top());
1661     return false;
1662   }
1663 };
1664 
1665 
1666 void ShenandoahHeap::start_concurrent_marking() {
1667 
1668   shenandoahPolicy()->record_phase_start(ShenandoahCollectorPolicy::accumulate_stats);
1669   accumulate_statistics_all_tlabs();
1670   shenandoahPolicy()->record_phase_end(ShenandoahCollectorPolicy::accumulate_stats);
1671 
1672   set_concurrent_mark_in_progress(true);
1673   // We need to reset all TLABs because we'd lose marks on all objects allocated in them.
1674   if (UseTLAB) {
1675     shenandoahPolicy()->record_phase_start(ShenandoahCollectorPolicy::make_parsable);
1676     ensure_parsability(true);
1677     shenandoahPolicy()->record_phase_end(ShenandoahCollectorPolicy::make_parsable);
1678   }
1679 
1680   _shenandoah_policy->record_bytes_allocated(_bytes_allocated_since_cm);
1681   _used_start_gc = used();
1682 
1683 #ifdef ASSERT
1684   if (ShenandoahDumpHeapBeforeConcurrentMark) {
1685     ensure_parsability(false);
1686     print_all_refs("pre-mark");
1687   }
1688 #endif
1689 
1690   shenandoahPolicy()->record_phase_start(ShenandoahCollectorPolicy::clear_liveness);
1691   ClearLivenessClosure clc(this);
1692   heap_region_iterate(&clc);
1693   shenandoahPolicy()->record_phase_end(ShenandoahCollectorPolicy::clear_liveness);
1694 
1695   // print_all_refs("pre -mark");
1696 
1697   // oopDesc::_debug = true;
1698 
1699   // Make above changes visible to worker threads
1700   OrderAccess::fence();
1701 
1702   shenandoahPolicy()->record_phase_start(ShenandoahCollectorPolicy::scan_roots);
1703   concurrentMark()->init_mark_roots();
1704   shenandoahPolicy()->record_phase_end(ShenandoahCollectorPolicy::scan_roots);
1705 
1706   //  print_all_refs("pre-mark2");
1707 }
1708 
1709 class VerifyAfterEvacuationClosure : public ExtendedOopClosure {
1710 
1711   ShenandoahHeap* _sh;
1712 
1713 public:
1714   VerifyAfterEvacuationClosure() : _sh ( ShenandoahHeap::heap() ) {}
1715 
1716   template<class T> void do_oop_nv(T* p) {
1717     T heap_oop = oopDesc::load_heap_oop(p);
1718     if (!oopDesc::is_null(heap_oop)) {
1719       oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
1720       guarantee(_sh->in_collection_set(obj) == (! oopDesc::unsafe_equals(obj, oopDesc::bs()->read_barrier(obj))),
1721                 "forwarded objects can only exist in dirty (from-space) regions is_dirty: %s, is_forwarded: %s obj-klass: %s, marked: %s",
1722                 BOOL_TO_STR(_sh->in_collection_set(obj)),
1723                 BOOL_TO_STR(! oopDesc::unsafe_equals(obj, oopDesc::bs()->read_barrier(obj))),
1724                 obj->klass()->external_name(),
1725                 BOOL_TO_STR(_sh->is_marked_complete(obj))
1726                 );
1727       obj = oopDesc::bs()->read_barrier(obj);
1728       guarantee(! _sh->in_collection_set(obj), "forwarded oops must not point to dirty regions");
1729       guarantee(obj->is_oop(), "is_oop");
1730       guarantee(Metaspace::contains(obj->klass()), "klass pointer must go to metaspace");
1731     }
1732   }
1733 
1734   void do_oop(oop* p)       { do_oop_nv(p); }
1735   void do_oop(narrowOop* p) { do_oop_nv(p); }
1736 
1737 };
1738 
1739 void ShenandoahHeap::verify_heap_after_evacuation() {
1740 
1741   verify_heap_size_consistency();
1742 
1743   ensure_parsability(false);
1744 
1745   VerifyAfterEvacuationClosure cl;
1746   roots_iterate(&cl);
1747 
1748   ObjectToOopClosure objs(&cl);
1749   object_iterate(&objs);
1750 
1751 }
1752 
1753 class VerifyRegionsAfterUpdateRefsClosure : public ShenandoahHeapRegionClosure {
1754 public:
1755   bool doHeapRegion(ShenandoahHeapRegion* r) {
1756     assert(! ShenandoahHeap::heap()->in_collection_set(r), "no region must be in collection set");
1757     return false;
1758   }
1759 };
1760 
1761 void ShenandoahHeap::swap_mark_bitmaps() {
1762   // Swap bitmaps.
1763   CMBitMap* tmp1 = _complete_mark_bit_map;
1764   _complete_mark_bit_map = _next_mark_bit_map;
1765   _next_mark_bit_map = tmp1;
1766 
1767   // Swap top-at-mark-start pointers
1768   HeapWord** tmp2 = _complete_top_at_mark_starts;
1769   _complete_top_at_mark_starts = _next_top_at_mark_starts;
1770   _next_top_at_mark_starts = tmp2;
1771 
1772   HeapWord** tmp3 = _complete_top_at_mark_starts_base;
1773   _complete_top_at_mark_starts_base = _next_top_at_mark_starts_base;
1774   _next_top_at_mark_starts_base = tmp3;
1775 }
1776 
1777 void ShenandoahHeap::stop_concurrent_marking() {
1778   assert(concurrent_mark_in_progress(), "How else could we get here?");
1779   if (! cancelled_concgc()) {
1780     // If we needed to update refs, and concurrent marking has been cancelled,
1781     // we need to finish updating references.
1782     set_need_update_refs(false);
1783     swap_mark_bitmaps();
1784   }
1785   set_concurrent_mark_in_progress(false);
1786 
1787   if (log_is_enabled(Trace, gc, region)) {
1788     ResourceMark rm;
1789     outputStream* out = Log(gc, region)::trace_stream();
1790     print_heap_regions(out);
1791   }
1792 
1793 }
1794 
1795 void ShenandoahHeap::set_concurrent_mark_in_progress(bool in_progress) {
1796   _concurrent_mark_in_progress = in_progress ? 1 : 0;
1797   JavaThread::satb_mark_queue_set().set_active_all_threads(in_progress, !in_progress);
1798 }
1799 
1800 void ShenandoahHeap::set_evacuation_in_progress(bool in_progress) {
1801   JavaThread::set_evacuation_in_progress_all_threads(in_progress);
1802   _evacuation_in_progress = in_progress ? 1 : 0;
1803   OrderAccess::fence();
1804 }
1805 
1806 void ShenandoahHeap::verify_copy(oop p,oop c){
1807     assert(! oopDesc::unsafe_equals(p, oopDesc::bs()->read_barrier(p)), "forwarded correctly");
1808     assert(oopDesc::unsafe_equals(oopDesc::bs()->read_barrier(p), c), "verify pointer is correct");
1809     if (p->klass() != c->klass()) {
1810       print_heap_regions();
1811     }
1812     assert(p->klass() == c->klass(), "verify class p-size: "INT32_FORMAT" c-size: "INT32_FORMAT, p->size(), c->size());
1813     assert(p->size() == c->size(), "verify size");
1814     // Object may have been locked between copy and verification
1815     //    assert(p->mark() == c->mark(), "verify mark");
1816     assert(oopDesc::unsafe_equals(c, oopDesc::bs()->read_barrier(c)), "verify only forwarded once");
1817   }
1818 
1819 void ShenandoahHeap::oom_during_evacuation() {
1820   log_develop_trace(gc)("Out of memory during evacuation, cancel evacuation, schedule full GC by thread %d",
1821                         Thread::current()->osthread()->thread_id());
1822 
1823   // We ran out of memory during evacuation. Cancel evacuation, and schedule a full-GC.
1824   collector_policy()->set_should_clear_all_soft_refs(true);
1825   concurrent_thread()->schedule_full_gc();
1826   cancel_concgc(_oom_evacuation);
1827 
1828   if ((! Thread::current()->is_GC_task_thread()) && (! Thread::current()->is_ConcurrentGC_thread())) {
1829     log_warning(gc)("OOM during evacuation. Let Java thread wait until evacuation finishes.");
1830     while (_evacuation_in_progress) { // wait.
1831       Thread::current()->_ParkEvent->park(1);
1832     }
1833   }
1834 
1835 }
1836 
1837 HeapWord* ShenandoahHeap::tlab_post_allocation_setup(HeapWord* obj) {
1838   // Initialize Brooks pointer for the next object
1839   HeapWord* result = obj + BrooksPointer::word_size();
1840   BrooksPointer::initialize(oop(result));
1841   return result;
1842 }
1843 
1844 uint ShenandoahHeap::oop_extra_words() {
1845   return BrooksPointer::word_size();
1846 }
1847 
1848 void ShenandoahHeap::grow_heap_by(size_t num_regions) {
1849   size_t base = _num_regions;
1850   ensure_new_regions(num_regions);
1851 
1852   ShenandoahHeapRegion* regions[num_regions];
1853   for (size_t i = 0; i < num_regions; i++) {
1854     ShenandoahHeapRegion* new_region = new ShenandoahHeapRegion();
1855     size_t new_region_index = i + base;
1856     HeapWord* start = _first_region_bottom + (ShenandoahHeapRegion::RegionSizeBytes / HeapWordSize) * new_region_index;
1857     new_region->initialize_heap_region(this, start, ShenandoahHeapRegion::RegionSizeBytes / HeapWordSize, new_region_index);
1858 
1859     if (log_is_enabled(Trace, gc, region)) {
1860       ResourceMark rm;
1861       outputStream* out = Log(gc, region)::trace_stream();
1862       out->print_cr("allocating new region at index: "SIZE_FORMAT, new_region_index);
1863       new_region->print_on(out);
1864     }
1865 
1866     assert(_ordered_regions->active_regions() == new_region->region_number(), "must match");
1867     _ordered_regions->add_region(new_region);
1868     _sorted_regions->add_region(new_region);
1869     _in_cset_fast_test_base[new_region_index] = false; // Not in cset
1870     _next_top_at_mark_starts_base[new_region_index] = new_region->bottom();
1871     _complete_top_at_mark_starts_base[new_region_index] = new_region->bottom();
1872 
1873     regions[i] = new_region;
1874   }
1875   _free_regions->par_add_regions(regions, 0, num_regions, num_regions);
1876 }
1877 
1878 void ShenandoahHeap::ensure_new_regions(size_t new_regions) {
1879 
1880   size_t num_regions = _num_regions;
1881   size_t new_num_regions = num_regions + new_regions;
1882   assert(new_num_regions <= _max_regions, "we checked this earlier");
1883 
1884   size_t expand_size = new_regions * ShenandoahHeapRegion::RegionSizeBytes;
1885   log_trace(gc, region)("expanding storage by "SIZE_FORMAT_HEX" bytes, for "SIZE_FORMAT" new regions", expand_size, new_regions);
1886   bool success = _storage.expand_by(expand_size, ShenandoahAlwaysPreTouch);
1887   assert(success, "should always be able to expand by requested size");
1888 
1889   _num_regions = new_num_regions;
1890 
1891 }
1892 
1893 ShenandoahForwardedIsAliveClosure::ShenandoahForwardedIsAliveClosure() :
1894   _heap(ShenandoahHeap::heap_no_check()) {
1895 }
1896 
1897 void ShenandoahForwardedIsAliveClosure::init(ShenandoahHeap* heap) {
1898   _heap = heap;
1899 }
1900 
1901 bool ShenandoahForwardedIsAliveClosure::do_object_b(oop obj) {
1902 
1903   assert(_heap != NULL, "sanity");
1904   obj = ShenandoahBarrierSet::resolve_oop_static_not_null(obj);
1905 #ifdef ASSERT
1906   if (_heap->concurrent_mark_in_progress()) {
1907     assert(oopDesc::unsafe_equals(obj, ShenandoahBarrierSet::resolve_oop_static_not_null(obj)), "only query to-space");
1908   }
1909 #endif
1910   assert(!oopDesc::is_null(obj), "null");
1911   return _heap->is_marked_next(obj);
1912 }
1913 
1914 void ShenandoahHeap::ref_processing_init() {
1915   MemRegion mr = reserved_region();
1916 
1917   isAlive.init(ShenandoahHeap::heap());
1918   assert(_max_workers > 0, "Sanity");
1919 
1920   _ref_processor =
1921     new ReferenceProcessor(mr,    // span
1922                            ParallelRefProcEnabled,
1923                            // mt processing
1924                            _max_workers,
1925                            // degree of mt processing
1926                            true,
1927                            // mt discovery
1928                            _max_workers,
1929                            // degree of mt discovery
1930                            false,
1931                            // Reference discovery is not atomic
1932                            &isAlive);
1933 }
1934 
1935 #ifdef ASSERT
1936 void ShenandoahHeap::set_from_region_protection(bool protect) {
1937   for (uint i = 0; i < _num_regions; i++) {
1938     ShenandoahHeapRegion* region = _ordered_regions->get(i);
1939     if (region != NULL && in_collection_set(region)) {
1940       if (protect) {
1941         region->memProtectionOn();
1942       } else {
1943         region->memProtectionOff();
1944       }
1945     }
1946   }
1947 }
1948 #endif
1949 
1950 size_t ShenandoahHeap::num_regions() {
1951   return _num_regions;
1952 }
1953 
1954 size_t ShenandoahHeap::max_regions() {
1955   return _max_regions;
1956 }
1957 
1958 GCTracer* ShenandoahHeap::tracer() {
1959   return shenandoahPolicy()->tracer();
1960 }
1961 
1962 size_t ShenandoahHeap::tlab_used(Thread* thread) const {
1963   return _free_regions->used();
1964 }
1965 
1966 void ShenandoahHeap::cancel_concgc(GCCause::Cause cause) {
1967   if (try_cancel_concgc()) {
1968     log_info(gc)("Cancelling concurrent GC: %s", GCCause::to_string(cause));
1969     _shenandoah_policy->report_concgc_cancelled();
1970   }
1971 }
1972 
1973 void ShenandoahHeap::cancel_concgc(ShenandoahCancelCause cause) {
1974   if (try_cancel_concgc()) {
1975     log_info(gc)("Cancelling concurrent GC: %s", cancel_cause_to_string(cause));
1976     _shenandoah_policy->report_concgc_cancelled();
1977   }
1978 }
1979 
1980 const char* ShenandoahHeap::cancel_cause_to_string(ShenandoahCancelCause cause) {
1981   switch (cause) {
1982     case _oom_evacuation:
1983       return "Out of memory for evacuation";
1984     case _vm_stop:
1985       return "Stopping VM";
1986     default:
1987       return "Unknown";
1988   }
1989 }
1990 
1991 void ShenandoahHeap::clear_cancelled_concgc() {
1992   set_cancelled_concgc(false);
1993 }
1994 
1995 uint ShenandoahHeap::max_workers() {
1996   return _max_workers;
1997 }
1998 
1999 uint ShenandoahHeap::max_parallel_workers() {
2000   return _max_parallel_workers;
2001 }
2002 uint ShenandoahHeap::max_conc_workers() {
2003   return _max_conc_workers;
2004 }
2005 
2006 void ShenandoahHeap::stop() {
2007   // We set this early here, to let GC threads terminate before we ask the concurrent thread
2008   // to terminate, which would otherwise block until all GC threads come to finish normally.
2009   set_cancelled_concgc(true);
2010   _concurrent_gc_thread->stop();
2011   cancel_concgc(_vm_stop);
2012 }
2013 
2014 void ShenandoahHeap::unlink_string_and_symbol_table(BoolObjectClosure* is_alive, bool process_strings, bool process_symbols) {
2015 
2016   StringSymbolTableUnlinkTask shenandoah_unlink_task(is_alive, process_strings, process_symbols);
2017   workers()->run_task(&shenandoah_unlink_task);
2018 
2019   //  if (G1StringDedup::is_enabled()) {
2020   //    G1StringDedup::unlink(is_alive);
2021   //  }
2022 }
2023 
2024 void ShenandoahHeap::set_need_update_refs(bool need_update_refs) {
2025   _need_update_refs = need_update_refs;
2026 }
2027 
2028 //fixme this should be in heapregionset
2029 ShenandoahHeapRegion* ShenandoahHeap::next_compaction_region(const ShenandoahHeapRegion* r) {
2030   size_t region_idx = r->region_number() + 1;
2031   ShenandoahHeapRegion* next = _ordered_regions->get(region_idx);
2032   guarantee(next->region_number() == region_idx, "region number must match");
2033   while (next->is_humongous()) {
2034     region_idx = next->region_number() + 1;
2035     next = _ordered_regions->get(region_idx);
2036     guarantee(next->region_number() == region_idx, "region number must match");
2037   }
2038   return next;
2039 }
2040 
2041 void ShenandoahHeap::set_region_in_collection_set(size_t region_index, bool b) {
2042   _in_cset_fast_test_base[region_index] = b;
2043 }
2044 
2045 ShenandoahMonitoringSupport* ShenandoahHeap::monitoring_support() {
2046   return _monitoring_support;
2047 }
2048 
2049 CMBitMap* ShenandoahHeap::complete_mark_bit_map() {
2050   return _complete_mark_bit_map;
2051 }
2052 
2053 CMBitMap* ShenandoahHeap::next_mark_bit_map() {
2054   return _next_mark_bit_map;
2055 }
2056 
2057 void ShenandoahHeap::add_free_region(ShenandoahHeapRegion* r) {
2058   _free_regions->add_region(r);
2059 }
2060 
2061 void ShenandoahHeap::clear_free_regions() {
2062   _free_regions->clear();
2063 }
2064 
2065 address ShenandoahHeap::in_cset_fast_test_addr() {
2066   return (address) (ShenandoahHeap::heap()->_in_cset_fast_test);
2067 }
2068 
2069 address ShenandoahHeap::cancelled_concgc_addr() {
2070   return (address) &(ShenandoahHeap::heap()->_cancelled_concgc);
2071 }
2072 
2073 void ShenandoahHeap::clear_cset_fast_test() {
2074   assert(_in_cset_fast_test_base != NULL, "sanity");
2075   memset(_in_cset_fast_test_base, false,
2076          _in_cset_fast_test_length * sizeof(bool));
2077 }
2078 
2079 size_t ShenandoahHeap::conservative_max_heap_alignment() {
2080   return HeapRegionBounds::max_size();
2081 }
2082 
2083 size_t ShenandoahHeap::bytes_allocated_since_cm() {
2084   return _bytes_allocated_since_cm;
2085 }
2086 
2087 void ShenandoahHeap::set_bytes_allocated_since_cm(size_t bytes) {
2088   _bytes_allocated_since_cm = bytes;
2089 }
2090 
2091 size_t ShenandoahHeap::max_allocated_gc() {
2092   return _max_allocated_gc;
2093 }
2094 
2095 void ShenandoahHeap::set_next_top_at_mark_start(HeapWord* region_base, HeapWord* addr) {
2096   uintx index = ((uintx) region_base) >> ShenandoahHeapRegion::RegionSizeShift;
2097   _next_top_at_mark_starts[index] = addr;
2098 }
2099 
2100 HeapWord* ShenandoahHeap::next_top_at_mark_start(HeapWord* region_base) {
2101   uintx index = ((uintx) region_base) >> ShenandoahHeapRegion::RegionSizeShift;
2102   return _next_top_at_mark_starts[index];
2103 }
2104 
2105 void ShenandoahHeap::set_complete_top_at_mark_start(HeapWord* region_base, HeapWord* addr) {
2106   uintx index = ((uintx) region_base) >> ShenandoahHeapRegion::RegionSizeShift;
2107   _complete_top_at_mark_starts[index] = addr;
2108 }
2109 
2110 HeapWord* ShenandoahHeap::complete_top_at_mark_start(HeapWord* region_base) {
2111   uintx index = ((uintx) region_base) >> ShenandoahHeapRegion::RegionSizeShift;
2112   return _complete_top_at_mark_starts[index];
2113 }
2114 
2115 void ShenandoahHeap::set_full_gc_in_progress(bool in_progress) {
2116   _full_gc_in_progress = in_progress;
2117 }
2118 
2119 bool ShenandoahHeap::is_full_gc_in_progress() const {
2120   return _full_gc_in_progress;
2121 }
2122 
2123 class NMethodOopInitializer : public OopClosure {
2124 private:
2125   ShenandoahHeap* _heap;
2126 public:
2127   NMethodOopInitializer() : _heap(ShenandoahHeap::heap()) {
2128   }
2129 
2130 private:
2131   template <class T>
2132   inline void do_oop_work(T* p) {
2133     T o = oopDesc::load_heap_oop(p);
2134     if (! oopDesc::is_null(o)) {
2135       oop obj1 = oopDesc::decode_heap_oop_not_null(o);
2136       oop obj2 = oopDesc::bs()->write_barrier(obj1);
2137       if (! oopDesc::unsafe_equals(obj1, obj2)) {
2138         oopDesc::encode_store_heap_oop(p, obj2);
2139       }
2140     }
2141   }
2142 
2143 public:
2144   void do_oop(oop* o) {
2145     do_oop_work(o);
2146   }
2147   void do_oop(narrowOop* o) {
2148     do_oop_work(o);
2149   }
2150 };
2151 
2152 void ShenandoahHeap::register_nmethod(nmethod* nm) {
2153   NMethodOopInitializer init;
2154   nm->oops_do(&init);
2155   nm->fix_oop_relocations();
2156 }
2157 
2158 void ShenandoahHeap::unregister_nmethod(nmethod* nm) {
2159 }
2160 
2161 void ShenandoahHeap::pin_object(oop o) {
2162   heap_region_containing(o)->pin();
2163 }
2164 
2165 void ShenandoahHeap::unpin_object(oop o) {
2166   heap_region_containing(o)->unpin();
2167 }
2168 
2169 
2170 GCTimer* ShenandoahHeap::gc_timer() const {
2171   return _gc_timer;
2172 }