1 /*
   2  * Copyright (c) 2013, 2015, Red Hat, Inc. and/or its affiliates.
   3  *
   4  * This code is free software; you can redistribute it and/or modify it
   5  * under the terms of the GNU General Public License version 2 only, as
   6  * published by the Free Software Foundation.
   7  *
   8  * This code is distributed in the hope that it will be useful, but WITHOUT
   9  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  10  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  11  * version 2 for more details (a copy is included in the LICENSE file that
  12  * accompanied this code).
  13  *
  14  * You should have received a copy of the GNU General Public License version
  15  * 2 along with this work; if not, write to the Free Software Foundation,
  16  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  17  *
  18  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  19  * or visit www.oracle.com if you need additional information or have any
  20  * questions.
  21  *
  22  */
  23 
  24 #include "memory/allocation.hpp"
  25 #include "gc/g1/heapRegionBounds.inline.hpp"
  26 
  27 #include "gc/shared/gcTimer.hpp"
  28 #include "gc/shared/gcTraceTime.inline.hpp"
  29 #include "gc/shared/parallelCleaning.hpp"
  30 
  31 #include "gc/shenandoah/brooksPointer.hpp"
  32 #include "gc/shenandoah/shenandoahBarrierSet.hpp"
  33 #include "gc/shenandoah/shenandoahCollectionSet.hpp"
  34 #include "gc/shenandoah/shenandoahCollectorPolicy.hpp"
  35 #include "gc/shenandoah/shenandoahConcurrentMark.hpp"
  36 #include "gc/shenandoah/shenandoahConcurrentThread.hpp"
  37 #include "gc/shenandoah/shenandoahFreeSet.hpp"
  38 #include "gc/shenandoah/shenandoahHeap.inline.hpp"
  39 #include "gc/shenandoah/shenandoahHeapRegion.hpp"
  40 #include "gc/shenandoah/shenandoahHeapRegionSet.hpp"
  41 #include "gc/shenandoah/shenandoahHumongous.hpp"
  42 #include "gc/shenandoah/shenandoahMarkCompact.hpp"
  43 #include "gc/shenandoah/shenandoahMonitoringSupport.hpp"
  44 #include "gc/shenandoah/shenandoahRootProcessor.hpp"
  45 #include "gc/shenandoah/vm_operations_shenandoah.hpp"
  46 
  47 #include "runtime/vmThread.hpp"
  48 #include "services/mallocTracker.hpp"
  49 
  50 const char* ShenandoahHeap::name() const {
  51   return "Shenandoah";
  52 }
  53 
  54 void ShenandoahHeap::print_heap_locations(HeapWord* start, HeapWord* end) {
  55   HeapWord* cur = NULL;
  56   for (cur = start; cur < end; cur++) {
  57     tty->print_cr(PTR_FORMAT" : "PTR_FORMAT, p2i(cur), p2i(*((HeapWord**) cur)));
  58   }
  59 }
  60 
  61 class PrintHeapRegionsClosure : public
  62    ShenandoahHeapRegionClosure {
  63 private:
  64   outputStream* _st;
  65 public:
  66   PrintHeapRegionsClosure() : _st(tty) {}
  67   PrintHeapRegionsClosure(outputStream* st) : _st(st) {}
  68 
  69   bool doHeapRegion(ShenandoahHeapRegion* r) {
  70     r->print_on(_st);
  71     return false;
  72   }
  73 };
  74 
  75 class ShenandoahPretouchTask : public AbstractGangTask {
  76 private:
  77   char* volatile _cur_addr;
  78   char* const _start_addr;
  79   char* const _end_addr;
  80   size_t const _page_size;
  81 public:
  82   ShenandoahPretouchTask(char* start_address, char* end_address, size_t page_size) :
  83     AbstractGangTask("Shenandoah PreTouch",
  84                      Universe::is_fully_initialized() ? GCId::current_raw() :
  85                                                         // During VM initialization there is
  86                                                         // no GC cycle that this task can be
  87                                                         // associated with.
  88                                                         GCId::undefined()),
  89     _cur_addr(start_address),
  90     _start_addr(start_address),
  91     _end_addr(end_address),
  92     _page_size(page_size) {
  93   }
  94 
  95   virtual void work(uint worker_id) {
  96     size_t const actual_chunk_size = MAX2(PreTouchParallelChunkSize, _page_size);
  97     while (true) {
  98       char* touch_addr = (char*)Atomic::add_ptr((intptr_t)actual_chunk_size, (volatile void*) &_cur_addr) - actual_chunk_size;
  99       if (touch_addr < _start_addr || touch_addr >= _end_addr) {
 100         break;
 101       }
 102       char* end_addr = touch_addr + MIN2(actual_chunk_size, pointer_delta(_end_addr, touch_addr, sizeof(char)));
 103       os::pretouch_memory(touch_addr, end_addr, _page_size);
 104     }
 105   }
 106 };
 107 
 108 void ShenandoahHeap::pretouch_storage(char* start, char* end, WorkGang* workers) {
 109   assert (ShenandoahAlwaysPreTouch, "Sanity");
 110   assert (!AlwaysPreTouch, "Should have been overridden");
 111 
 112   size_t size = (size_t)(end - start);
 113   size_t page_size = UseLargePages ? (size_t)os::large_page_size() : (size_t)os::vm_page_size();
 114   size_t num_chunks = MAX2((size_t)1, size / MAX2(PreTouchParallelChunkSize, page_size));
 115   uint num_workers = MIN2((uint)num_chunks, workers->active_workers());
 116 
 117   log_info(gc, heap)("Parallel pretouch with %u workers for " SIZE_FORMAT " work units pre-touching " SIZE_FORMAT " bytes.",
 118                       num_workers, num_chunks, size);
 119 
 120   ShenandoahPretouchTask cl(start, end, page_size);
 121   workers->run_task(&cl, num_workers);
 122 }
 123 
 124 jint ShenandoahHeap::initialize() {
 125   CollectedHeap::pre_initialize();
 126 
 127   size_t init_byte_size = collector_policy()->initial_heap_byte_size();
 128   size_t max_byte_size = collector_policy()->max_heap_byte_size();
 129 
 130   Universe::check_alignment(max_byte_size,
 131                             ShenandoahHeapRegion::RegionSizeBytes,
 132                             "shenandoah heap");
 133   Universe::check_alignment(init_byte_size,
 134                             ShenandoahHeapRegion::RegionSizeBytes,
 135                             "shenandoah heap");
 136 
 137   ReservedSpace heap_rs = Universe::reserve_heap(max_byte_size,
 138                                                  Arguments::conservative_max_heap_alignment());
 139   initialize_reserved_region((HeapWord*)heap_rs.base(), (HeapWord*) (heap_rs.base() + heap_rs.size()));
 140 
 141   set_barrier_set(new ShenandoahBarrierSet(this));
 142   ReservedSpace pgc_rs = heap_rs.first_part(max_byte_size);
 143   _storage.initialize(pgc_rs, init_byte_size);
 144   if (ShenandoahAlwaysPreTouch) {
 145     pretouch_storage(_storage.low(), _storage.high(), _workers);
 146   }
 147 
 148   _num_regions = init_byte_size / ShenandoahHeapRegion::RegionSizeBytes;
 149   _max_regions = max_byte_size / ShenandoahHeapRegion::RegionSizeBytes;
 150   _initialSize = _num_regions * ShenandoahHeapRegion::RegionSizeBytes;
 151   size_t regionSizeWords = ShenandoahHeapRegion::RegionSizeBytes / HeapWordSize;
 152   assert(init_byte_size == _initialSize, "tautology");
 153   _ordered_regions = new ShenandoahHeapRegionSet(_max_regions);
 154   _sorted_regions = new ShenandoahHeapRegionSet(_max_regions);
 155   _collection_set = new ShenandoahCollectionSet(_max_regions);
 156   _free_regions = new ShenandoahFreeSet(_max_regions);
 157 
 158   size_t i = 0;
 159   for (i = 0; i < _num_regions; i++) {
 160     ShenandoahHeapRegion* current = new ShenandoahHeapRegion();
 161     current->initialize_heap_region((HeapWord*) pgc_rs.base() +
 162                                     regionSizeWords * i, regionSizeWords, i);
 163     _free_regions->add_region(current);
 164     _ordered_regions->add_region(current);
 165     _sorted_regions->add_region(current);
 166   }
 167   assert(((size_t) _ordered_regions->active_regions()) == _num_regions, "");
 168   _first_region = _ordered_regions->get(0);
 169   _first_region_bottom = _first_region->bottom();
 170   assert((((size_t) _first_region_bottom) &
 171           (ShenandoahHeapRegion::RegionSizeBytes - 1)) == 0,
 172          "misaligned heap: "PTR_FORMAT, p2i(_first_region_bottom));
 173 
 174   _numAllocs = 0;
 175 
 176   if (log_is_enabled(Trace, gc, region)) {
 177     ResourceMark rm;
 178     outputStream* out = Log(gc, region)::trace_stream();
 179     log_trace(gc, region)("All Regions");
 180     _ordered_regions->print(out);
 181     log_trace(gc, region)("Free Regions");
 182     _free_regions->print(out);
 183   }
 184 
 185   // The call below uses stuff (the SATB* things) that are in G1, but probably
 186   // belong into a shared location.
 187   JavaThread::satb_mark_queue_set().initialize(SATB_Q_CBL_mon,
 188                                                SATB_Q_FL_lock,
 189                                                20 /*G1SATBProcessCompletedThreshold */,
 190                                                Shared_SATB_Q_lock);
 191 
 192   // Reserve space for prev and next bitmap.
 193   size_t bitmap_size = CMBitMap::compute_size(heap_rs.size());
 194   MemRegion heap_region = MemRegion((HeapWord*) heap_rs.base(), heap_rs.size() / HeapWordSize);
 195 
 196   size_t page_size = UseLargePages ? (size_t)os::large_page_size() : (size_t)os::vm_page_size();
 197 
 198   ReservedSpace bitmap0(bitmap_size, page_size);
 199   os::commit_memory_or_exit(bitmap0.base(), bitmap0.size(), false, "couldn't allocate mark bitmap");
 200   MemTracker::record_virtual_memory_type(bitmap0.base(), mtGC);
 201   MemRegion bitmap_region0 = MemRegion((HeapWord*) bitmap0.base(), bitmap0.size() / HeapWordSize);
 202   _mark_bit_map0.initialize(heap_region, bitmap_region0);
 203   _prev_mark_bit_map = &_mark_bit_map0;
 204 
 205   ReservedSpace bitmap1(bitmap_size, page_size);
 206   os::commit_memory_or_exit(bitmap1.base(), bitmap1.size(), false, "couldn't allocate mark bitmap");
 207   MemTracker::record_virtual_memory_type(bitmap1.base(), mtGC);
 208   MemRegion bitmap_region1 = MemRegion((HeapWord*) bitmap1.base(), bitmap1.size() / HeapWordSize);
 209   _mark_bit_map1.initialize(heap_region, bitmap_region1);
 210   _next_mark_bit_map = &_mark_bit_map1;
 211 
 212   // Initialize fast collection set test structure.
 213   _in_cset_fast_test_length = _max_regions;
 214   _in_cset_fast_test_base =
 215                    NEW_C_HEAP_ARRAY(bool, (size_t) _in_cset_fast_test_length, mtGC);
 216   _in_cset_fast_test = _in_cset_fast_test_base -
 217                ((uintx) pgc_rs.base() >> ShenandoahHeapRegion::RegionSizeShift);
 218   clear_cset_fast_test();
 219 
 220   _top_at_mark_starts_base =
 221                    NEW_C_HEAP_ARRAY(HeapWord*, _max_regions, mtGC);
 222   _top_at_mark_starts = _top_at_mark_starts_base -
 223                ((uintx) pgc_rs.base() >> ShenandoahHeapRegion::RegionSizeShift);
 224 
 225   for (i = 0; i < _num_regions; i++) {
 226     _in_cset_fast_test_base[i] = false; // Not in cset
 227     _top_at_mark_starts_base[i] = _ordered_regions->get(i)->bottom();
 228   }
 229 
 230   _monitoring_support = new ShenandoahMonitoringSupport(this);
 231 
 232   _concurrent_gc_thread = new ShenandoahConcurrentThread();
 233 
 234   ShenandoahMarkCompact::initialize();
 235 
 236   return JNI_OK;
 237 }
 238 
 239 ShenandoahHeap::ShenandoahHeap(ShenandoahCollectorPolicy* policy) :
 240   CollectedHeap(),
 241   _shenandoah_policy(policy),
 242   _concurrent_mark_in_progress(false),
 243   _evacuation_in_progress(false),
 244   _full_gc_in_progress(false),
 245   _free_regions(NULL),
 246   _collection_set(NULL),
 247   _bytes_allocated_since_cm(0),
 248   _bytes_allocated_during_cm(0),
 249   _max_allocated_gc(0),
 250   _allocated_last_gc(0),
 251   _used_start_gc(0),
 252   _max_conc_workers((int) MAX2((uint) ConcGCThreads, 1U)),
 253   _max_parallel_workers((int) MAX2((uint) ParallelGCThreads, 1U)),
 254   _ref_processor(NULL),
 255   _in_cset_fast_test(NULL),
 256   _in_cset_fast_test_base(NULL),
 257   _top_at_mark_starts(NULL),
 258   _top_at_mark_starts_base(NULL),
 259   _mark_bit_map0(),
 260   _mark_bit_map1(),
 261   _cancelled_concgc(false),
 262   _need_update_refs(false),
 263   _need_reset_bitmaps(false),
 264   _growing_heap(0),
 265   _gc_timer(new (ResourceObj::C_HEAP, mtGC) ConcurrentGCTimer())
 266 
 267 {
 268   log_info(gc, init)("Parallel GC threads: "UINT32_FORMAT, ParallelGCThreads);
 269   log_info(gc, init)("Concurrent GC threads: "UINT32_FORMAT, ConcGCThreads);
 270   log_info(gc, init)("Parallel reference processing enabled: %s", BOOL_TO_STR(ParallelRefProcEnabled));
 271 
 272   _scm = new ShenandoahConcurrentMark();
 273   _used = 0;
 274   // This is odd.  They are concurrent gc threads, but they are also task threads.
 275   // Framework doesn't allow both.
 276   _workers = new WorkGang("Parallel GC Threads", ParallelGCThreads,
 277                             /* are_GC_task_threads */true,
 278                             /* are_ConcurrentGC_threads */false);
 279   _conc_workers = new WorkGang("Concurrent GC Threads", ConcGCThreads,
 280                             /* are_GC_task_threads */true,
 281                             /* are_ConcurrentGC_threads */false);
 282   if ((_workers == NULL) || (_conc_workers == NULL)) {
 283     vm_exit_during_initialization("Failed necessary allocation.");
 284   } else {
 285     _workers->initialize_workers();
 286     _conc_workers->initialize_workers();
 287   }
 288 }
 289 
 290 class ResetBitmapTask : public AbstractGangTask {
 291 private:
 292   ShenandoahHeapRegionSet* _regions;
 293 
 294 public:
 295   ResetBitmapTask(ShenandoahHeapRegionSet* regions) :
 296     AbstractGangTask("Parallel Reset Bitmap Task"),
 297     _regions(regions) {
 298     _regions->clear_current_index();
 299   }
 300 
 301   void work(uint worker_id) {
 302     ShenandoahHeapRegion* region = _regions->claim_next();
 303     ShenandoahHeap* heap = ShenandoahHeap::heap();
 304     while (region != NULL) {
 305       HeapWord* bottom = region->bottom();
 306       HeapWord* top = region->top_prev_mark_bitmap();
 307       region->set_top_prev_mark_bitmap(region->top_at_prev_mark_start());
 308       if (top > bottom) {
 309         heap->reset_mark_bitmap_range(bottom, top);
 310       }
 311       region = _regions->claim_next();
 312     }
 313   }
 314 };
 315 
 316 void ShenandoahHeap::reset_mark_bitmap() {
 317   GCTraceTime(Info, gc, phases) time("Concurrent reset bitmaps", gc_timer(), GCCause::_no_gc);
 318 
 319   ResetBitmapTask task = ResetBitmapTask(_ordered_regions);
 320   conc_workers()->run_task(&task);
 321 }
 322 
 323 void ShenandoahHeap::reset_mark_bitmap_range(HeapWord* from, HeapWord* to) {
 324   _next_mark_bit_map->clear_range(MemRegion(from, to));
 325 }
 326 
 327 bool ShenandoahHeap::is_bitmap_clear() {
 328   HeapWord* start = _ordered_regions->bottom();
 329   HeapWord* end = _ordered_regions->end();
 330   return _next_mark_bit_map->getNextMarkedWordAddress(start, end) == end;
 331 }
 332 
 333 void ShenandoahHeap::print_on(outputStream* st) const {
 334   st->print("Shenandoah Heap");
 335   st->print(" total = " SIZE_FORMAT " K, used " SIZE_FORMAT " K ", capacity()/ K, used() /K);
 336   st->print("Region size = " SIZE_FORMAT "K ", ShenandoahHeapRegion::RegionSizeBytes / K);
 337   if (_concurrent_mark_in_progress) {
 338     st->print("marking ");
 339   }
 340   if (_evacuation_in_progress) {
 341     st->print("evacuating ");
 342   }
 343   if (_cancelled_concgc) {
 344     st->print("cancelled ");
 345   }
 346   st->print("\n");
 347 
 348   if (Verbose) {
 349     print_heap_regions(st);
 350   }
 351 }
 352 
 353 class InitGCLABClosure : public ThreadClosure {
 354 public:
 355   void do_thread(Thread* thread) {
 356     thread->gclab().initialize(true);
 357   }
 358 };
 359 
 360 void ShenandoahHeap::post_initialize() {
 361 
 362   {
 363     if (UseTLAB) {
 364       InitGCLABClosure init_gclabs;
 365       for (JavaThread *thread = Threads::first(); thread != NULL; thread = thread->next()) {
 366         init_gclabs.do_thread(thread);
 367       }
 368       gc_threads_do(&init_gclabs);
 369     }
 370   }
 371   _scm->initialize();
 372 
 373   ref_processing_init();
 374 
 375   _max_workers = MAX(_max_parallel_workers, _max_conc_workers);
 376 }
 377 
 378 class CalculateUsedRegionClosure : public ShenandoahHeapRegionClosure {
 379   size_t sum;
 380 public:
 381 
 382   CalculateUsedRegionClosure() {
 383     sum = 0;
 384   }
 385 
 386   bool doHeapRegion(ShenandoahHeapRegion* r) {
 387     sum = sum + r->used();
 388     return false;
 389   }
 390 
 391   size_t getResult() { return sum;}
 392 };
 393 
 394 size_t ShenandoahHeap::calculateUsed() {
 395   CalculateUsedRegionClosure cl;
 396   heap_region_iterate(&cl);
 397   return cl.getResult();
 398 }
 399 
 400 void ShenandoahHeap::verify_heap_size_consistency() {
 401 
 402   assert(calculateUsed() == used(),
 403          "heap used size must be consistent heap-used: "SIZE_FORMAT" regions-used: "SIZE_FORMAT, used(), calculateUsed());
 404 }
 405 
 406 size_t ShenandoahHeap::used() const {
 407   OrderAccess::acquire();
 408   return _used;
 409 }
 410 
 411 void ShenandoahHeap::increase_used(size_t bytes) {
 412   Atomic::add(bytes, &_used);
 413 }
 414 
 415 void ShenandoahHeap::set_used(size_t bytes) {
 416   _used = bytes;
 417   OrderAccess::release();
 418 }
 419 
 420 void ShenandoahHeap::decrease_used(size_t bytes) {
 421   assert(_used >= bytes, "never decrease heap size by more than we've left");
 422   Atomic::add(-bytes, &_used);
 423 }
 424 
 425 size_t ShenandoahHeap::capacity() const {
 426   return _num_regions * ShenandoahHeapRegion::RegionSizeBytes;
 427 
 428 }
 429 
 430 bool ShenandoahHeap::is_maximal_no_gc() const {
 431   Unimplemented();
 432   return true;
 433 }
 434 
 435 size_t ShenandoahHeap::max_capacity() const {
 436   return _max_regions * ShenandoahHeapRegion::RegionSizeBytes;
 437 }
 438 
 439 size_t ShenandoahHeap::min_capacity() const {
 440   return _initialSize;
 441 }
 442 
 443 VirtualSpace* ShenandoahHeap::storage() const {
 444   return (VirtualSpace*) &_storage;
 445 }
 446 
 447 bool ShenandoahHeap::is_in(const void* p) const {
 448   HeapWord* first_region_bottom = _first_region->bottom();
 449   HeapWord* last_region_end = first_region_bottom + (ShenandoahHeapRegion::RegionSizeBytes / HeapWordSize) * _num_regions;
 450   return p > _first_region_bottom && p < last_region_end;
 451 }
 452 
 453 bool ShenandoahHeap::is_scavengable(const void* p) {
 454   return true;
 455 }
 456 
 457 HeapWord* ShenandoahHeap::allocate_from_gclab_slow(Thread* thread, size_t size) {
 458   // Retain tlab and allocate object in shared space if
 459   // the amount free in the tlab is too large to discard.
 460   if (thread->gclab().free() > thread->gclab().refill_waste_limit()) {
 461     thread->gclab().record_slow_allocation(size);
 462     return NULL;
 463   }
 464 
 465   // Discard gclab and allocate a new one.
 466   // To minimize fragmentation, the last GCLAB may be smaller than the rest.
 467   size_t new_gclab_size = thread->gclab().compute_size(size);
 468 
 469   thread->gclab().clear_before_allocation();
 470 
 471   if (new_gclab_size == 0) {
 472     return NULL;
 473   }
 474 
 475   // Allocate a new GCLAB...
 476   HeapWord* obj = allocate_new_gclab(new_gclab_size);
 477   if (obj == NULL) {
 478     return NULL;
 479   }
 480 
 481   if (ZeroTLAB) {
 482     // ..and clear it.
 483     Copy::zero_to_words(obj, new_gclab_size);
 484   } else {
 485     // ...and zap just allocated object.
 486 #ifdef ASSERT
 487     // Skip mangling the space corresponding to the object header to
 488     // ensure that the returned space is not considered parsable by
 489     // any concurrent GC thread.
 490     size_t hdr_size = oopDesc::header_size();
 491     Copy::fill_to_words(obj + hdr_size, new_gclab_size - hdr_size, badHeapWordVal);
 492 #endif // ASSERT
 493   }
 494   thread->gclab().fill(obj, obj + size, new_gclab_size);
 495   return obj;
 496 }
 497 
 498 HeapWord* ShenandoahHeap::allocate_new_tlab(size_t word_size) {
 499   return allocate_new_tlab(word_size, false);
 500 }
 501 
 502 HeapWord* ShenandoahHeap::allocate_new_gclab(size_t word_size) {
 503   return allocate_new_tlab(word_size, true);
 504 }
 505 
 506 HeapWord* ShenandoahHeap::allocate_new_tlab(size_t word_size, bool evacuating) {
 507   HeapWord* result = allocate_memory(word_size, evacuating);
 508 
 509   if (result != NULL) {
 510     assert(! heap_region_containing(result)->is_in_collection_set(), "Never allocate in dirty region");
 511     _bytes_allocated_since_cm += word_size * HeapWordSize;
 512 
 513     log_develop_trace(gc, tlab)("allocating new tlab of size "SIZE_FORMAT" at addr "PTR_FORMAT, word_size, p2i(result));
 514 
 515   }
 516   return result;
 517 }
 518 
 519 ShenandoahHeap* ShenandoahHeap::heap() {
 520   CollectedHeap* heap = Universe::heap();
 521   assert(heap != NULL, "Unitialized access to ShenandoahHeap::heap()");
 522   assert(heap->kind() == CollectedHeap::ShenandoahHeap, "not a shenandoah heap");
 523   return (ShenandoahHeap*) heap;
 524 }
 525 
 526 ShenandoahHeap* ShenandoahHeap::heap_no_check() {
 527   CollectedHeap* heap = Universe::heap();
 528   return (ShenandoahHeap*) heap;
 529 }
 530 
 531 HeapWord* ShenandoahHeap::allocate_memory(size_t word_size, bool evacuating) {
 532   HeapWord* result = NULL;
 533   result = allocate_memory_work(word_size);
 534 
 535   if (result == NULL) {
 536     bool retry;
 537     do {
 538       // Try to grow the heap.
 539       retry = check_grow_heap();
 540       result = allocate_memory_work(word_size);
 541     } while (retry && result == NULL);
 542   }
 543 
 544   if (result == NULL && ! evacuating) { // Allocation failed, try full-GC, then retry allocation.
 545     // tty->print_cr("failed to allocate "SIZE_FORMAT " bytes, free regions:", word_size * HeapWordSize);
 546     // _free_regions->print();
 547     collect(GCCause::_allocation_failure);
 548     result = allocate_memory_work(word_size);
 549   }
 550 
 551   // Only update monitoring counters when not calling from a write-barrier.
 552   // Otherwise we might attempt to grab the Service_lock, which we must
 553   // not do when coming from a write-barrier (because the thread might
 554   // already hold the Compile_lock).
 555   if (! evacuating) {
 556     monitoring_support()->update_counters();
 557   }
 558 
 559   log_develop_trace(gc, alloc)("allocate memory chunk of size "SIZE_FORMAT" at addr "PTR_FORMAT " by thread %d ", word_size, p2i(result), Thread::current()->osthread()->thread_id());
 560 
 561   return result;
 562 }
 563 
 564 bool ShenandoahHeap::call_from_write_barrier(bool evacuating) {
 565   return evacuating && Thread::current()->is_Java_thread();
 566 }
 567 
 568 bool ShenandoahHeap::check_grow_heap() {
 569 
 570   assert(_free_regions->max_regions() >= _free_regions->active_regions(), "don't get negative");
 571 
 572   size_t available = _max_regions - _num_regions;
 573   if (available == 0) {
 574     return false; // Don't retry.
 575   }
 576 
 577   jbyte growing = Atomic::cmpxchg(1, &_growing_heap, 0);
 578   if (growing == 0) {
 579     // Only one thread succeeds this, and this one gets
 580     // to grow the heap. All other threads can continue
 581     // to allocate from the reserve.
 582     grow_heap_by(MIN2(available, ShenandoahAllocReserveRegions));
 583 
 584     // Reset it back to 0, so that other threads can take it again.
 585     Atomic::store(0, &_growing_heap);
 586     return true;
 587   } else {
 588     // Let other threads work, then try again.
 589     os::naked_yield();
 590     return true;
 591   }
 592 }
 593 
 594 HeapWord* ShenandoahHeap::allocate_memory_work(size_t word_size) {
 595   if (word_size * HeapWordSize > ShenandoahHeapRegion::RegionSizeBytes) {
 596     return allocate_large_memory(word_size);
 597   }
 598 
 599   // Not enough memory in free region set.
 600   // Coming out of full GC, it is possible that there is not
 601   // free region available, so current_index may not be valid.
 602   if (word_size * HeapWordSize > _free_regions->capacity()) return NULL;
 603 
 604   jlong current_idx = _free_regions->current_index();
 605   assert(current_idx >= 0, "expect >= 0");
 606 
 607   ShenandoahHeapRegion* my_current_region = _free_regions->get(current_idx);
 608 
 609   if (my_current_region == NULL) {
 610     return NULL; // No more room to make a new region. OOM.
 611   }
 612   assert(my_current_region != NULL, "should have a region at this point");
 613 
 614 #ifdef ASSERT
 615   if (my_current_region->is_in_collection_set()) {
 616     print_heap_regions();
 617   }
 618 #endif
 619   assert(! my_current_region->is_in_collection_set(), "never get targetted regions in free-lists");
 620   assert(! my_current_region->is_humongous(), "never attempt to allocate from humongous object regions");
 621 
 622   HeapWord* result = my_current_region->par_allocate(word_size);
 623 
 624   while (result == NULL && my_current_region != NULL) {
 625 
 626     // 2nd attempt. Try next region.
 627     size_t remaining = my_current_region->free();
 628     current_idx = _free_regions->par_claim_next(current_idx);
 629     my_current_region = _free_regions->get(current_idx);
 630 
 631     if (my_current_region == NULL) {
 632       // tty->print("WTF: OOM error trying to allocate %ld words\n", word_size);
 633       return NULL; // No more room to make a new region. OOM.
 634     }
 635     // _free_regions->increase_used(remaining);
 636     assert(my_current_region != NULL, "should have a region at this point");
 637     assert(! my_current_region->is_in_collection_set(), "never get targetted regions in free-lists");
 638     assert(! my_current_region->is_humongous(), "never attempt to allocate from humongous object regions");
 639     result = my_current_region->par_allocate(word_size);
 640   }
 641 
 642   if (result != NULL) {
 643     my_current_region->increase_live_data(word_size * HeapWordSize);
 644     increase_used(word_size * HeapWordSize);
 645     _free_regions->increase_used(word_size * HeapWordSize);
 646   }
 647   return result;
 648 }
 649 
 650 HeapWord* ShenandoahHeap::allocate_large_memory(size_t words) {
 651 
 652   uint required_regions = ShenandoahHumongous::required_regions(words * HeapWordSize);
 653   if (required_regions > _max_regions) return NULL;
 654 
 655   ShenandoahHeapRegion* r = _free_regions->claim_contiguous(required_regions);
 656 
 657   HeapWord* result = NULL;
 658 
 659   if (r != NULL)  {
 660     result = r->bottom();
 661 
 662     log_debug(gc, humongous)("allocating humongous object of size: "SIZE_FORMAT" KB at location "PTR_FORMAT" in start region "SIZE_FORMAT,
 663                              (words * HeapWordSize) / K, p2i(result), r->region_number());
 664   } else {
 665     log_debug(gc, humongous)("allocating humongous object of size: "SIZE_FORMAT" KB at location "PTR_FORMAT" failed",
 666                              (words * HeapWordSize) / K, p2i(result));
 667   }
 668 
 669 
 670   return result;
 671 
 672 }
 673 
 674 HeapWord*  ShenandoahHeap::mem_allocate(size_t size,
 675                                         bool*  gc_overhead_limit_was_exceeded) {
 676 
 677 #ifdef ASSERT
 678   if (ShenandoahVerify && _numAllocs > 1000000) {
 679     _numAllocs = 0;
 680   }
 681   _numAllocs++;
 682 #endif
 683   HeapWord* filler = allocate_memory(BrooksPointer::word_size() + size, false);
 684   HeapWord* result = filler + BrooksPointer::word_size();
 685   if (filler != NULL) {
 686     BrooksPointer::initialize(oop(result));
 687     _bytes_allocated_since_cm += size * HeapWordSize;
 688 
 689     assert(! heap_region_containing(result)->is_in_collection_set(), "never allocate in targetted region");
 690     return result;
 691   } else {
 692     /*
 693     tty->print_cr("Out of memory. Requested number of words: "SIZE_FORMAT" used heap: "INT64_FORMAT", bytes allocated since last CM: "INT64_FORMAT, size, used(), _bytes_allocated_since_cm);
 694     {
 695       print_heap_regions();
 696       tty->print("Printing "SIZE_FORMAT" free regions:\n", _free_regions->count());
 697       _free_regions->print();
 698     }
 699     */
 700     return NULL;
 701   }
 702 }
 703 
 704 class ParallelEvacuateRegionObjectClosure : public ObjectClosure {
 705 private:
 706   ShenandoahHeap* _heap;
 707   Thread* _thread;
 708   public:
 709   ParallelEvacuateRegionObjectClosure(ShenandoahHeap* heap) :
 710     _heap(heap), _thread(Thread::current()) {
 711   }
 712 
 713   void do_object(oop p) {
 714 
 715     log_develop_trace(gc, compaction)("Calling ParallelEvacuateRegionObjectClosure on "PTR_FORMAT" of size %d\n", p2i((HeapWord*) p), p->size());
 716 
 717     assert(_heap->is_marked_prev(p), "expect only marked objects");
 718     if (oopDesc::unsafe_equals(p, ShenandoahBarrierSet::resolve_oop_static_not_null(p))) {
 719       _heap->evacuate_object(p, _thread);
 720     }
 721   }
 722 };
 723 
 724 #ifdef ASSERT
 725 class VerifyEvacuatedObjectClosure : public ObjectClosure {
 726 
 727 public:
 728 
 729   void do_object(oop p) {
 730     if (ShenandoahHeap::heap()->is_marked_prev(p)) {
 731       oop p_prime = oopDesc::bs()->read_barrier(p);
 732       assert(! oopDesc::unsafe_equals(p, p_prime), "Should point to evacuated copy");
 733       if (p->klass() != p_prime->klass()) {
 734         tty->print_cr("copy has different class than original:");
 735         p->klass()->print_on(tty);
 736         p_prime->klass()->print_on(tty);
 737       }
 738       assert(p->klass() == p_prime->klass(), "Should have the same class p: "PTR_FORMAT", p_prime: "PTR_FORMAT, p2i((HeapWord*) p), p2i((HeapWord*) p_prime));
 739       //      assert(p->mark() == p_prime->mark(), "Should have the same mark");
 740       assert(p->size() == p_prime->size(), "Should be the same size");
 741       assert(oopDesc::unsafe_equals(p_prime, oopDesc::bs()->read_barrier(p_prime)), "One forward once");
 742     }
 743   }
 744 };
 745 
 746 void ShenandoahHeap::verify_evacuated_region(ShenandoahHeapRegion* from_region) {
 747   VerifyEvacuatedObjectClosure verify_evacuation;
 748   from_region->marked_object_iterate(&verify_evacuation);
 749 }
 750 #endif
 751 
 752 void ShenandoahHeap::parallel_evacuate_region(ShenandoahHeapRegion* from_region) {
 753 
 754   assert(from_region->getLiveData() > 0, "all-garbage regions are reclaimed earlier");
 755 
 756   ParallelEvacuateRegionObjectClosure evacuate_region(this);
 757 
 758   marked_object_iterate(from_region, &evacuate_region);
 759 
 760 #ifdef ASSERT
 761   if (ShenandoahVerify && ! cancelled_concgc()) {
 762     verify_evacuated_region(from_region);
 763   }
 764 #endif
 765 }
 766 
 767 class ParallelEvacuationTask : public AbstractGangTask {
 768 private:
 769   ShenandoahHeap* _sh;
 770   ShenandoahCollectionSet* _cs;
 771 
 772 public:
 773   ParallelEvacuationTask(ShenandoahHeap* sh,
 774                          ShenandoahCollectionSet* cs) :
 775     AbstractGangTask("Parallel Evacuation Task"),
 776     _cs(cs),
 777     _sh(sh) {}
 778 
 779   void work(uint worker_id) {
 780 
 781     ShenandoahHeapRegion* from_hr = _cs->claim_next();
 782 
 783     while (from_hr != NULL) {
 784       log_develop_trace(gc, region)("Thread "INT32_FORMAT" claimed Heap Region "SIZE_FORMAT,
 785                                     worker_id,
 786                                     from_hr->region_number());
 787 
 788       assert(from_hr->getLiveData() > 0, "all-garbage regions are reclaimed early");
 789       _sh->parallel_evacuate_region(from_hr);
 790 
 791       if (_sh->cancelled_concgc()) {
 792         // tty->print("We cancelled concgc while working on region %d\n", from_hr->region_number());
 793         // from_hr->print();
 794         break;
 795       }
 796       from_hr = _cs->claim_next();
 797     }
 798   }
 799 };
 800 
 801 class RecycleDirtyRegionsClosure: public ShenandoahHeapRegionClosure {
 802 private:
 803   ShenandoahHeap* _heap;
 804   size_t _bytes_reclaimed;
 805 public:
 806   RecycleDirtyRegionsClosure() : _heap(ShenandoahHeap::heap()) {}
 807 
 808   bool doHeapRegion(ShenandoahHeapRegion* r) {
 809 
 810     if (_heap->cancelled_concgc()) {
 811       // The aborted marking bitmap needs to be cleared at the end of cycle.
 812       // Setup the top-marker for this.
 813       r->set_top_prev_mark_bitmap(r->top_at_mark_start());
 814 
 815       return false;
 816     }
 817 
 818     r->swap_top_at_mark_start();
 819 
 820     if (r->is_in_collection_set()) {
 821       //      tty->print_cr("recycling region "INT32_FORMAT":", r->region_number());
 822       //      r->print_on(tty);
 823       //      tty->print_cr(" ");
 824       _heap->decrease_used(r->used());
 825       _bytes_reclaimed += r->used();
 826       r->recycle();
 827       _heap->free_regions()->add_region(r);
 828     }
 829 
 830     return false;
 831   }
 832   size_t bytes_reclaimed() { return _bytes_reclaimed;}
 833   void clear_bytes_reclaimed() {_bytes_reclaimed = 0;}
 834 };
 835 
 836 void ShenandoahHeap::recycle_dirty_regions() {
 837   RecycleDirtyRegionsClosure cl;
 838   cl.clear_bytes_reclaimed();
 839 
 840   heap_region_iterate(&cl);
 841 
 842   _shenandoah_policy->record_bytes_reclaimed(cl.bytes_reclaimed());
 843   if (! cancelled_concgc()) {
 844     clear_cset_fast_test();
 845   }
 846 }
 847 
 848 ShenandoahFreeSet* ShenandoahHeap::free_regions() {
 849   return _free_regions;
 850 }
 851 
 852 void ShenandoahHeap::print_heap_regions(outputStream* st) const {
 853   _ordered_regions->print(st);
 854 }
 855 
 856 class PrintAllRefsOopClosure: public ExtendedOopClosure {
 857 private:
 858   int _index;
 859   const char* _prefix;
 860 
 861 public:
 862   PrintAllRefsOopClosure(const char* prefix) : _index(0), _prefix(prefix) {}
 863 
 864 private:
 865   template <class T>
 866   inline void do_oop_work(T* p) {
 867     oop o = oopDesc::load_decode_heap_oop(p);
 868     if (o != NULL) {
 869       if (ShenandoahHeap::heap()->is_in(o) && o->is_oop()) {
 870         tty->print_cr("%s "INT32_FORMAT" ("PTR_FORMAT")-> "PTR_FORMAT" (marked: %s) (%s "PTR_FORMAT")", _prefix, _index, p2i(p), p2i((HeapWord*) o), BOOL_TO_STR(ShenandoahHeap::heap()->is_marked_current(o)), o->klass()->internal_name(), p2i(o->klass()));
 871       } else {
 872         //        tty->print_cr("%s "INT32_FORMAT" ("PTR_FORMAT" dirty: %s) -> "PTR_FORMAT" (not in heap, possibly corrupted or dirty (%s))", _prefix, _index, p2i(p), BOOL_TO_STR(ShenandoahHeap::heap()->heap_region_containing(p)->is_in_collection_set()), p2i((HeapWord*) o), BOOL_TO_STR(ShenandoahHeap::heap()->heap_region_containing(o)->is_in_collection_set()));
 873         tty->print_cr("%s "INT32_FORMAT" ("PTR_FORMAT" dirty -> "PTR_FORMAT" (not in heap, possibly corrupted or dirty)", _prefix, _index, p2i(p), p2i((HeapWord*) o));
 874       }
 875     } else {
 876       tty->print_cr("%s "INT32_FORMAT" ("PTR_FORMAT") -> "PTR_FORMAT, _prefix, _index, p2i(p), p2i((HeapWord*) o));
 877     }
 878     _index++;
 879   }
 880 
 881 public:
 882   void do_oop(oop* p) {
 883     do_oop_work(p);
 884   }
 885 
 886   void do_oop(narrowOop* p) {
 887     do_oop_work(p);
 888   }
 889 
 890 };
 891 
 892 class PrintAllRefsObjectClosure : public ObjectClosure {
 893   const char* _prefix;
 894 
 895 public:
 896   PrintAllRefsObjectClosure(const char* prefix) : _prefix(prefix) {}
 897 
 898   void do_object(oop p) {
 899     if (ShenandoahHeap::heap()->is_in(p)) {
 900         tty->print_cr("%s object "PTR_FORMAT" (marked: %s) (%s "PTR_FORMAT") refers to:", _prefix, p2i((HeapWord*) p), BOOL_TO_STR(ShenandoahHeap::heap()->is_marked_current(p)), p->klass()->internal_name(), p2i(p->klass()));
 901         PrintAllRefsOopClosure cl(_prefix);
 902         p->oop_iterate(&cl);
 903       }
 904   }
 905 };
 906 
 907 void ShenandoahHeap::print_all_refs(const char* prefix) {
 908   tty->print_cr("printing all references in the heap");
 909   tty->print_cr("root references:");
 910 
 911   ensure_parsability(false);
 912 
 913   PrintAllRefsOopClosure cl(prefix);
 914   roots_iterate(&cl);
 915 
 916   tty->print_cr("heap references:");
 917   PrintAllRefsObjectClosure cl2(prefix);
 918   object_iterate(&cl2);
 919 }
 920 
 921 class VerifyAfterMarkingOopClosure: public ExtendedOopClosure {
 922 private:
 923   ShenandoahHeap*  _heap;
 924 
 925 public:
 926   VerifyAfterMarkingOopClosure() :
 927     _heap(ShenandoahHeap::heap()) { }
 928 
 929 private:
 930   template <class T>
 931   inline void do_oop_work(T* p) {
 932     oop o = oopDesc::load_decode_heap_oop(p);
 933     if (o != NULL) {
 934       if (! _heap->is_marked_prev(o)) {
 935         _heap->print_heap_regions();
 936         _heap->print_all_refs("post-mark");
 937         tty->print_cr("oop not marked, although referrer is marked: "PTR_FORMAT": in_heap: %s, is_marked: %s",
 938                       p2i((HeapWord*) o), BOOL_TO_STR(_heap->is_in(o)), BOOL_TO_STR(_heap->is_marked_prev(o)));
 939         _heap->print_heap_locations((HeapWord*) o, (HeapWord*) o + o->size());
 940 
 941         tty->print_cr("oop class: %s", o->klass()->internal_name());
 942         if (_heap->is_in(p)) {
 943           oop referrer = oop(_heap->heap_region_containing(p)->block_start_const(p));
 944           tty->print_cr("Referrer starts at addr "PTR_FORMAT, p2i((HeapWord*) referrer));
 945           referrer->print();
 946           _heap->print_heap_locations((HeapWord*) referrer, (HeapWord*) referrer + referrer->size());
 947         }
 948         tty->print_cr("heap region containing object:");
 949         _heap->heap_region_containing(o)->print();
 950         tty->print_cr("heap region containing referrer:");
 951         _heap->heap_region_containing(p)->print();
 952         tty->print_cr("heap region containing forwardee:");
 953         _heap->heap_region_containing(oopDesc::bs()->read_barrier(o))->print();
 954       }
 955       assert(o->is_oop(), "oop must be an oop");
 956       assert(Metaspace::contains(o->klass()), "klass pointer must go to metaspace");
 957       if (! oopDesc::unsafe_equals(o, oopDesc::bs()->read_barrier(o))) {
 958         tty->print_cr("oops has forwardee: p: "PTR_FORMAT" (%s), o = "PTR_FORMAT" (%s), new-o: "PTR_FORMAT" (%s)", p2i(p), BOOL_TO_STR(_heap->heap_region_containing(p)->is_in_collection_set()), p2i((HeapWord*) o),  BOOL_TO_STR(_heap->heap_region_containing(o)->is_in_collection_set()), p2i((HeapWord*) oopDesc::bs()->read_barrier(o)), BOOL_TO_STR(_heap->heap_region_containing(oopDesc::bs()->read_barrier(o))->is_in_collection_set()));
 959         tty->print_cr("oop class: %s", o->klass()->internal_name());
 960       }
 961       assert(oopDesc::unsafe_equals(o, oopDesc::bs()->read_barrier(o)), "oops must not be forwarded");
 962       assert(! _heap->heap_region_containing(o)->is_in_collection_set(), "references must not point to dirty heap regions");
 963       assert(_heap->is_marked_prev(o), "live oops must be marked current");
 964     }
 965   }
 966 
 967 public:
 968   void do_oop(oop* p) {
 969     do_oop_work(p);
 970   }
 971 
 972   void do_oop(narrowOop* p) {
 973     do_oop_work(p);
 974   }
 975 
 976 };
 977 
 978 class IterateMarkedCurrentObjectsClosure: public ObjectClosure {
 979 private:
 980   ShenandoahHeap* _heap;
 981   ExtendedOopClosure* _cl;
 982 public:
 983   IterateMarkedCurrentObjectsClosure(ExtendedOopClosure* cl) :
 984     _heap(ShenandoahHeap::heap()), _cl(cl) {};
 985 
 986   void do_object(oop p) {
 987     if (_heap->is_marked_current(p)) {
 988       p->oop_iterate(_cl);
 989     }
 990   }
 991 
 992 };
 993 
 994 void ShenandoahHeap::verify_heap_after_marking() {
 995 
 996   verify_heap_size_consistency();
 997 
 998   log_trace(gc)("verifying heap after marking");
 999 
1000   VerifyAfterMarkingOopClosure cl;
1001   roots_iterate(&cl);
1002 
1003   IterateMarkedCurrentObjectsClosure marked_oops(&cl);
1004   object_iterate(&marked_oops);
1005 }
1006 
1007 
1008 void ShenandoahHeap::reclaim_humongous_region_at(ShenandoahHeapRegion* r) {
1009   assert(r->is_humongous_start(), "reclaim regions starting with the first one");
1010 
1011   oop humongous_obj = oop(r->bottom() + BrooksPointer::word_size());
1012   size_t size = humongous_obj->size() + BrooksPointer::word_size();
1013   uint required_regions = ShenandoahHumongous::required_regions(size * HeapWordSize);
1014   uint index = r->region_number();
1015 
1016 
1017   assert(r->getLiveData() == 0, "liveness must be zero");
1018 
1019   for(size_t i = 0; i < required_regions; i++) {
1020 
1021     ShenandoahHeapRegion* region = _ordered_regions->get(index++);
1022 
1023     assert((region->is_humongous_start() || region->is_humongous_continuation()),
1024            "expect correct humongous start or continuation");
1025 
1026     if (log_is_enabled(Debug, gc, humongous)) {
1027       log_debug(gc, humongous)("reclaiming "UINT32_FORMAT" humongous regions for object of size: "SIZE_FORMAT" words", required_regions, size);
1028       ResourceMark rm;
1029       outputStream* out = Log(gc, humongous)::debug_stream();
1030       region->print_on(out);
1031     }
1032 
1033     region->reset();
1034     ShenandoahHeap::heap()->decrease_used(ShenandoahHeapRegion::RegionSizeBytes);
1035   }
1036 }
1037 
1038 class ShenandoahReclaimHumongousRegionsClosure : public ShenandoahHeapRegionClosure {
1039 
1040   bool doHeapRegion(ShenandoahHeapRegion* r) {
1041     ShenandoahHeap* heap = ShenandoahHeap::heap();
1042 
1043     if (r->is_humongous_start()) {
1044       oop humongous_obj = oop(r->bottom() + BrooksPointer::word_size());
1045       if (! heap->is_marked_current(humongous_obj)) {
1046 
1047         heap->reclaim_humongous_region_at(r);
1048       }
1049     }
1050     return false;
1051   }
1052 };
1053 
1054 #ifdef ASSERT
1055 class CheckCollectionSetClosure: public ShenandoahHeapRegionClosure {
1056   bool doHeapRegion(ShenandoahHeapRegion* r) {
1057     assert(!r->is_in_collection_set(), "Should have been cleared by now");
1058     return false;
1059   }
1060 };
1061 #endif
1062 
1063 void ShenandoahHeap::prepare_for_concurrent_evacuation() {
1064   assert(_ordered_regions->get(0)->region_number() == 0, "FIXME CHF");
1065   /*
1066   tty->print("Thread %d started prepare_for_concurrent_evacuation\n",
1067              Thread::current()->osthread()->thread_id());
1068   */
1069   if (!cancelled_concgc()) {
1070 
1071     recycle_dirty_regions();
1072 
1073     ensure_parsability(true);
1074 
1075 #ifdef ASSERT
1076     if (ShenandoahVerify) {
1077       verify_heap_after_marking();
1078     }
1079 #endif
1080 
1081     // NOTE: This needs to be done during a stop the world pause, because
1082     // putting regions into the collection set concurrently with Java threads
1083     // will create a race. In particular, acmp could fail because when we
1084     // resolve the first operand, the containing region might not yet be in
1085     // the collection set, and thus return the original oop. When the 2nd
1086     // operand gets resolved, the region could be in the collection set
1087     // and the oop gets evacuated. If both operands have originally been
1088     // the same, we get false negatives.
1089 
1090 
1091     _collection_set->clear();
1092     _free_regions->clear();
1093 
1094     ShenandoahReclaimHumongousRegionsClosure reclaim;
1095     heap_region_iterate(&reclaim);
1096 
1097     // _ordered_regions->print();
1098 #ifdef ASSERT
1099     CheckCollectionSetClosure ccsc;
1100     _ordered_regions->heap_region_iterate(&ccsc);
1101 #endif
1102 
1103     _shenandoah_policy->choose_collection_set(_collection_set);
1104 
1105     _shenandoah_policy->choose_free_set(_free_regions);
1106 
1107     /*
1108     tty->print("Sorted free regions\n");
1109     _free_regions->print();
1110     */
1111 
1112     if (_collection_set->count() == 0)
1113       cancel_concgc();
1114 
1115     _bytes_allocated_since_cm = 0;
1116 
1117     Universe::update_heap_info_at_gc();
1118   }
1119 }
1120 
1121 
1122 class RetireTLABClosure : public ThreadClosure {
1123 private:
1124   bool _retire;
1125 
1126 public:
1127   RetireTLABClosure(bool retire) : _retire(retire) {
1128   }
1129 
1130   void do_thread(Thread* thread) {
1131     thread->gclab().make_parsable(_retire);
1132   }
1133 };
1134 
1135 void ShenandoahHeap::ensure_parsability(bool retire_tlabs) {
1136   if (UseTLAB) {
1137     CollectedHeap::ensure_parsability(retire_tlabs);
1138 
1139   RetireTLABClosure cl(retire_tlabs);
1140   for (JavaThread *thread = Threads::first(); thread != NULL; thread = thread->next()) {
1141     cl.do_thread(thread);
1142   }
1143   gc_threads_do(&cl);
1144   }
1145 }
1146 
1147 class ShenandoahEvacuateUpdateRootsClosure: public ExtendedOopClosure {
1148 private:
1149   ShenandoahHeap* _heap;
1150   Thread* _thread;
1151 public:
1152   ShenandoahEvacuateUpdateRootsClosure() :
1153     _heap(ShenandoahHeap::heap()), _thread(Thread::current()) {
1154   }
1155 
1156 private:
1157   template <class T>
1158   void do_oop_work(T* p) {
1159     assert(_heap->is_evacuation_in_progress(), "Only do this when evacuation is in progress");
1160 
1161     T o = oopDesc::load_heap_oop(p);
1162     if (! oopDesc::is_null(o)) {
1163       oop obj = oopDesc::decode_heap_oop_not_null(o);
1164       if (_heap->in_cset_fast_test((HeapWord*) obj)) {
1165         assert(_heap->is_marked_prev(obj), "only evacuate marked objects %d %d", _heap->is_marked_prev(obj), _heap->is_marked_prev(ShenandoahBarrierSet::resolve_oop_static_not_null(obj)));
1166         oop resolved = ShenandoahBarrierSet::resolve_oop_static_not_null(obj);
1167         if (oopDesc::unsafe_equals(resolved, obj)) {
1168           resolved = _heap->evacuate_object(obj, _thread);
1169         }
1170         oopDesc::encode_store_heap_oop(p, resolved);
1171       }
1172     }
1173 #ifdef ASSERT
1174     else {
1175       // tty->print_cr("not updating root at: "PTR_FORMAT" with object: "PTR_FORMAT", is_in_heap: %s, is_in_cset: %s, is_marked: %s", p2i(p), p2i((HeapWord*) obj), BOOL_TO_STR(_heap->is_in(obj)), BOOL_TO_STR(_heap->in_cset_fast_test(obj)), BOOL_TO_STR(_heap->is_marked_current(obj)));
1176     }
1177 #endif
1178   }
1179 
1180 public:
1181   void do_oop(oop* p) {
1182     do_oop_work(p);
1183   }
1184   void do_oop(narrowOop* p) {
1185     do_oop_work(p);
1186   }
1187 };
1188 
1189 class ShenandoahEvacuateUpdateRootsTask : public AbstractGangTask {
1190   ShenandoahRootProcessor* _rp;
1191 public:
1192 
1193   ShenandoahEvacuateUpdateRootsTask(ShenandoahRootProcessor* rp) :
1194     AbstractGangTask("Shenandoah evacuate and update roots"),
1195     _rp(rp)
1196   {
1197     // Nothing else to do.
1198   }
1199 
1200   void work(uint worker_id) {
1201     ShenandoahEvacuateUpdateRootsClosure cl;
1202     MarkingCodeBlobClosure blobsCl(&cl, CodeBlobToOopClosure::FixRelocations);
1203 
1204     _rp->process_evacuate_roots(&cl, &blobsCl, worker_id);
1205   }
1206 };
1207 
1208 void ShenandoahHeap::evacuate_and_update_roots() {
1209 
1210   COMPILER2_PRESENT(DerivedPointerTable::clear());
1211 
1212   if (ShenandoahVerifyReadsToFromSpace) {
1213     set_from_region_protection(false);
1214   }
1215 
1216   assert(SafepointSynchronize::is_at_safepoint(), "Only iterate roots while world is stopped");
1217   ClassLoaderDataGraph::clear_claimed_marks();
1218 
1219   {
1220     ShenandoahRootProcessor rp(this, _max_parallel_workers, ShenandoahCollectorPolicy::evac_thread_roots);
1221     ShenandoahEvacuateUpdateRootsTask roots_task(&rp);
1222     workers()->run_task(&roots_task);
1223   }
1224 
1225   if (ShenandoahVerifyReadsToFromSpace) {
1226     set_from_region_protection(true);
1227   }
1228 
1229   COMPILER2_PRESENT(DerivedPointerTable::update_pointers());
1230 
1231 }
1232 
1233 
1234 void ShenandoahHeap::do_evacuation() {
1235 
1236   parallel_evacuate();
1237 
1238   if (ShenandoahVerify && ! cancelled_concgc()) {
1239     VM_ShenandoahVerifyHeapAfterEvacuation verify_after_evacuation;
1240     if (Thread::current()->is_VM_thread()) {
1241       verify_after_evacuation.doit();
1242     } else {
1243       VMThread::execute(&verify_after_evacuation);
1244     }
1245   }
1246 
1247 }
1248 
1249 void ShenandoahHeap::parallel_evacuate() {
1250 
1251   if (! cancelled_concgc()) {
1252 
1253     log_develop_trace(gc)("starting parallel_evacuate");
1254 
1255     _shenandoah_policy->record_phase_start(ShenandoahCollectorPolicy::conc_evac);
1256 
1257     if (log_is_enabled(Trace, gc, region)) {
1258       ResourceMark rm;
1259       outputStream* out = Log(gc, region)::trace_stream();
1260       out->print("Printing all available regions");
1261       print_heap_regions(out);
1262     }
1263 
1264     if (log_is_enabled(Trace, gc, cset)) {
1265       ResourceMark rm;
1266       outputStream* out = Log(gc, cset)::trace_stream();
1267       out->print("Printing collection set which contains "SIZE_FORMAT" regions:\n", _collection_set->count());
1268       _collection_set->print(out);
1269 
1270       out->print("Printing free set which contains "SIZE_FORMAT" regions:\n", _free_regions->count());
1271       _free_regions->print(out);
1272     }
1273 
1274     ParallelEvacuationTask evacuationTask = ParallelEvacuationTask(this, _collection_set);
1275 
1276     conc_workers()->run_task(&evacuationTask);
1277 
1278     if (log_is_enabled(Trace, gc, cset)) {
1279       ResourceMark rm;
1280       outputStream* out = Log(gc, cset)::trace_stream();
1281       out->print("Printing postgc collection set which contains "SIZE_FORMAT" regions:\n",
1282                  _collection_set->count());
1283 
1284       _collection_set->print(out);
1285 
1286       out->print("Printing postgc free regions which contain "SIZE_FORMAT" free regions:\n",
1287                  _free_regions->count());
1288       _free_regions->print(out);
1289 
1290     }
1291 
1292     if (log_is_enabled(Trace, gc, region)) {
1293       ResourceMark rm;
1294       outputStream* out = Log(gc, region)::trace_stream();
1295       out->print_cr("all regions after evacuation:");
1296       print_heap_regions(out);
1297     }
1298 
1299     _shenandoah_policy->record_phase_end(ShenandoahCollectorPolicy::conc_evac);
1300 
1301     if (cancelled_concgc()) {
1302       // tty->print("GOTCHA: by thread %d", Thread::current()->osthread()->thread_id());
1303       concurrent_thread()->schedule_full_gc();
1304       // tty->print("PostGotcha: by thread %d FullGC should be scheduled\n",
1305       //            Thread::current()->osthread()->thread_id());
1306     }
1307   }
1308 }
1309 
1310 class VerifyEvacuationClosure: public ExtendedOopClosure {
1311 private:
1312   ShenandoahHeap*  _heap;
1313   ShenandoahHeapRegion* _from_region;
1314 
1315 public:
1316   VerifyEvacuationClosure(ShenandoahHeapRegion* from_region) :
1317     _heap(ShenandoahHeap::heap()), _from_region(from_region) { }
1318 private:
1319   template <class T>
1320   inline void do_oop_work(T* p) {
1321     oop heap_oop = oopDesc::load_decode_heap_oop(p);
1322     if (! oopDesc::is_null(heap_oop)) {
1323       guarantee(! _from_region->is_in(heap_oop), "no references to from-region allowed after evacuation: "PTR_FORMAT, p2i((HeapWord*) heap_oop));
1324     }
1325   }
1326 
1327 public:
1328   void do_oop(oop* p)       {
1329     do_oop_work(p);
1330   }
1331 
1332   void do_oop(narrowOop* p) {
1333     do_oop_work(p);
1334   }
1335 
1336 };
1337 
1338 void ShenandoahHeap::roots_iterate(OopClosure* cl) {
1339 
1340   assert(SafepointSynchronize::is_at_safepoint(), "Only iterate roots while world is stopped");
1341 
1342   CodeBlobToOopClosure blobsCl(cl, false);
1343   CLDToOopClosure cldCl(cl);
1344 
1345   ClassLoaderDataGraph::clear_claimed_marks();
1346 
1347   ShenandoahRootProcessor rp(this, 1);
1348   rp.process_all_roots(cl, NULL, &cldCl, &blobsCl, 0);
1349 }
1350 
1351 void ShenandoahHeap::verify_evacuation(ShenandoahHeapRegion* from_region) {
1352 
1353   VerifyEvacuationClosure rootsCl(from_region);
1354   roots_iterate(&rootsCl);
1355 
1356 }
1357 
1358 bool ShenandoahHeap::supports_tlab_allocation() const {
1359   return true;
1360 }
1361 
1362 
1363 size_t  ShenandoahHeap::unsafe_max_tlab_alloc(Thread *thread) const {
1364   jlong idx = _free_regions->current_index();
1365   ShenandoahHeapRegion* current = _free_regions->get(idx);
1366   if (current == NULL)
1367     return 0;
1368   else if (current->free() > MinTLABSize) {
1369     return current->free();
1370   } else {
1371     return MinTLABSize;
1372   }
1373 }
1374 
1375 size_t ShenandoahHeap::max_tlab_size() const {
1376   return ShenandoahHeapRegion::RegionSizeBytes;
1377 }
1378 
1379 class ResizeGCLABClosure : public ThreadClosure {
1380 public:
1381   void do_thread(Thread* thread) {
1382     thread->gclab().resize();
1383   }
1384 };
1385 
1386 void ShenandoahHeap::resize_all_tlabs() {
1387   CollectedHeap::resize_all_tlabs();
1388 
1389   ResizeGCLABClosure cl;
1390   for (JavaThread *thread = Threads::first(); thread != NULL; thread = thread->next()) {
1391     cl.do_thread(thread);
1392   }
1393   gc_threads_do(&cl);
1394 
1395 }
1396 
1397 class AccumulateStatisticsGCLABClosure : public ThreadClosure {
1398 public:
1399   void do_thread(Thread* thread) {
1400     thread->gclab().accumulate_statistics();
1401     thread->gclab().initialize_statistics();
1402   }
1403 };
1404 
1405 void ShenandoahHeap::accumulate_statistics_all_gclabs() {
1406 
1407   AccumulateStatisticsGCLABClosure cl;
1408   for (JavaThread *thread = Threads::first(); thread != NULL; thread = thread->next()) {
1409     cl.do_thread(thread);
1410   }
1411   gc_threads_do(&cl);
1412 }
1413 
1414 bool  ShenandoahHeap::can_elide_tlab_store_barriers() const {
1415   return true;
1416 }
1417 
1418 oop ShenandoahHeap::new_store_pre_barrier(JavaThread* thread, oop new_obj) {
1419   // Overridden to do nothing.
1420   return new_obj;
1421 }
1422 
1423 bool  ShenandoahHeap::can_elide_initializing_store_barrier(oop new_obj) {
1424   return true;
1425 }
1426 
1427 bool ShenandoahHeap::card_mark_must_follow_store() const {
1428   return false;
1429 }
1430 
1431 void ShenandoahHeap::collect(GCCause::Cause cause) {
1432   assert(cause != GCCause::_gc_locker, "no JNI critical callback");
1433   if (GCCause::is_user_requested_gc(cause)) {
1434     if (! DisableExplicitGC) {
1435       cancel_concgc();
1436       _concurrent_gc_thread->do_full_gc(cause);
1437     }
1438   } else if (cause == GCCause::_allocation_failure) {
1439 
1440     cancel_concgc();
1441     collector_policy()->set_should_clear_all_soft_refs(true);
1442       _concurrent_gc_thread->do_full_gc(cause);
1443 
1444   }
1445 }
1446 
1447 void ShenandoahHeap::do_full_collection(bool clear_all_soft_refs) {
1448   //assert(false, "Shouldn't need to do full collections");
1449 }
1450 
1451 AdaptiveSizePolicy* ShenandoahHeap::size_policy() {
1452   Unimplemented();
1453   return NULL;
1454 
1455 }
1456 
1457 CollectorPolicy* ShenandoahHeap::collector_policy() const {
1458   return _shenandoah_policy;
1459 }
1460 
1461 
1462 HeapWord* ShenandoahHeap::block_start(const void* addr) const {
1463   Space* sp = heap_region_containing(addr);
1464   if (sp != NULL) {
1465     return sp->block_start(addr);
1466   }
1467   return NULL;
1468 }
1469 
1470 size_t ShenandoahHeap::block_size(const HeapWord* addr) const {
1471   Space* sp = heap_region_containing(addr);
1472   assert(sp != NULL, "block_size of address outside of heap");
1473   return sp->block_size(addr);
1474 }
1475 
1476 bool ShenandoahHeap::block_is_obj(const HeapWord* addr) const {
1477   Space* sp = heap_region_containing(addr);
1478   return sp->block_is_obj(addr);
1479 }
1480 
1481 jlong ShenandoahHeap::millis_since_last_gc() {
1482   return 0;
1483 }
1484 
1485 void ShenandoahHeap::prepare_for_verify() {
1486   if (SafepointSynchronize::is_at_safepoint() || ! UseTLAB) {
1487     ensure_parsability(false);
1488   }
1489 }
1490 
1491 void ShenandoahHeap::print_gc_threads_on(outputStream* st) const {
1492   workers()->print_worker_threads_on(st);
1493   conc_workers()->print_worker_threads_on(st);
1494 }
1495 
1496 void ShenandoahHeap::gc_threads_do(ThreadClosure* tcl) const {
1497   workers()->threads_do(tcl);
1498   conc_workers()->threads_do(tcl);
1499 }
1500 
1501 void ShenandoahHeap::print_tracing_info() const {
1502   if (log_is_enabled(Info, gc, stats)) {
1503     ResourceMark rm;
1504     outputStream* out = Log(gc, stats)::info_stream();
1505     _shenandoah_policy->print_tracing_info(out);
1506   }
1507 }
1508 
1509 class ShenandoahVerifyRootsClosure: public ExtendedOopClosure {
1510 private:
1511   ShenandoahHeap*  _heap;
1512   VerifyOption     _vo;
1513   bool             _failures;
1514 public:
1515   // _vo == UsePrevMarking -> use "prev" marking information,
1516   // _vo == UseNextMarking -> use "next" marking information,
1517   // _vo == UseMarkWord    -> use mark word from object header.
1518   ShenandoahVerifyRootsClosure(VerifyOption vo) :
1519     _heap(ShenandoahHeap::heap()),
1520     _vo(vo),
1521     _failures(false) { }
1522 
1523   bool failures() { return _failures; }
1524 
1525 private:
1526   template <class T>
1527   inline void do_oop_work(T* p) {
1528     oop obj = oopDesc::load_decode_heap_oop(p);
1529     if (! oopDesc::is_null(obj) && ! obj->is_oop()) {
1530       { // Just for debugging.
1531         tty->print_cr("Root location "PTR_FORMAT
1532                       "verified "PTR_FORMAT, p2i(p), p2i((void*) obj));
1533         //      obj->print_on(tty);
1534       }
1535     }
1536     guarantee(obj->is_oop_or_null(), "is oop or null");
1537   }
1538 
1539 public:
1540   void do_oop(oop* p)       {
1541     do_oop_work(p);
1542   }
1543 
1544   void do_oop(narrowOop* p) {
1545     do_oop_work(p);
1546   }
1547 
1548 };
1549 
1550 class ShenandoahVerifyHeapClosure: public ObjectClosure {
1551 private:
1552   ShenandoahVerifyRootsClosure _rootsCl;
1553 public:
1554   ShenandoahVerifyHeapClosure(ShenandoahVerifyRootsClosure rc) :
1555     _rootsCl(rc) {};
1556 
1557   void do_object(oop p) {
1558     _rootsCl.do_oop(&p);
1559   }
1560 };
1561 
1562 class ShenandoahVerifyKlassClosure: public KlassClosure {
1563   OopClosure *_oop_closure;
1564  public:
1565   ShenandoahVerifyKlassClosure(OopClosure* cl) : _oop_closure(cl) {}
1566   void do_klass(Klass* k) {
1567     k->oops_do(_oop_closure);
1568   }
1569 };
1570 
1571 void ShenandoahHeap::verify(VerifyOption vo) {
1572   if (SafepointSynchronize::is_at_safepoint() || ! UseTLAB) {
1573 
1574     ShenandoahVerifyRootsClosure rootsCl(vo);
1575 
1576     assert(Thread::current()->is_VM_thread(),
1577            "Expected to be executed serially by the VM thread at this point");
1578 
1579     roots_iterate(&rootsCl);
1580 
1581     bool failures = rootsCl.failures();
1582     log_trace(gc)("verify failures: %s", BOOL_TO_STR(failures));
1583 
1584     ShenandoahVerifyHeapClosure heapCl(rootsCl);
1585 
1586     object_iterate(&heapCl);
1587     // TODO: Implement rest of it.
1588 #ifdef ASSERT_DISABLED
1589     verify_live();
1590 #endif
1591   } else {
1592     tty->print("(SKIPPING roots, heapRegions, remset) ");
1593   }
1594 }
1595 size_t ShenandoahHeap::tlab_capacity(Thread *thr) const {
1596   return _free_regions->capacity();
1597 }
1598 
1599 class ShenandoahIterateObjectClosureRegionClosure: public ShenandoahHeapRegionClosure {
1600   ObjectClosure* _cl;
1601 public:
1602   ShenandoahIterateObjectClosureRegionClosure(ObjectClosure* cl) : _cl(cl) {}
1603   bool doHeapRegion(ShenandoahHeapRegion* r) {
1604     ShenandoahHeap::heap()->marked_object_iterate(r, _cl);
1605     return false;
1606   }
1607 };
1608 
1609 void ShenandoahHeap::object_iterate(ObjectClosure* cl) {
1610   ShenandoahIterateObjectClosureRegionClosure blk(cl);
1611   heap_region_iterate(&blk, false, true);
1612 }
1613 
1614 void ShenandoahHeap::safe_object_iterate(ObjectClosure* cl) {
1615   Unimplemented();
1616 }
1617 
1618 void ShenandoahHeap::marked_object_iterate(ShenandoahHeapRegion* region, ObjectClosure* cl) {
1619   marked_object_iterate(region, cl, region->bottom(), region->top());
1620 }
1621 
1622 void ShenandoahHeap::marked_object_iterate(ShenandoahHeapRegion* region, ObjectClosure* cl,
1623                                            HeapWord* addr, HeapWord* limit) {
1624   addr += BrooksPointer::word_size();
1625   HeapWord* last_addr = NULL;
1626   size_t last_size = 0;
1627   HeapWord* top_at_mark_start = region->top_at_prev_mark_start();
1628   HeapWord* heap_end = _ordered_regions->end();
1629   while (addr < limit) {
1630     if (addr < top_at_mark_start) {
1631       HeapWord* end = top_at_mark_start + BrooksPointer::word_size();
1632       end = MIN2(end, heap_end);
1633       addr = _prev_mark_bit_map->getNextMarkedWordAddress(addr, end);
1634     }
1635     if (addr < limit) {
1636       oop obj = oop(addr);
1637       assert(is_marked_prev(obj), "object expected to be marked");
1638       cl->do_object(obj);
1639       last_addr = addr;
1640       last_size = obj->size();
1641       addr += obj->size() + BrooksPointer::word_size();
1642     } else {
1643       break;
1644     }
1645   }
1646 }
1647 
1648 // Apply blk->doHeapRegion() on all committed regions in address order,
1649 // terminating the iteration early if doHeapRegion() returns true.
1650 void ShenandoahHeap::heap_region_iterate(ShenandoahHeapRegionClosure* blk, bool skip_dirty_regions, bool skip_humongous_continuation) const {
1651   for (size_t i = 0; i < _num_regions; i++) {
1652     ShenandoahHeapRegion* current  = _ordered_regions->get(i);
1653     if (skip_humongous_continuation && current->is_humongous_continuation()) {
1654       continue;
1655     }
1656     if (skip_dirty_regions && current->is_in_collection_set()) {
1657       continue;
1658     }
1659     if (blk->doHeapRegion(current)) {
1660       return;
1661     }
1662   }
1663 }
1664 
1665 class ClearLivenessClosure : public ShenandoahHeapRegionClosure {
1666   ShenandoahHeap* sh;
1667 public:
1668   ClearLivenessClosure(ShenandoahHeap* heap) : sh(heap) { }
1669 
1670   bool doHeapRegion(ShenandoahHeapRegion* r) {
1671     r->clearLiveData();
1672     r->init_top_at_mark_start();
1673     return false;
1674   }
1675 };
1676 
1677 
1678 void ShenandoahHeap::start_concurrent_marking() {
1679 
1680   shenandoahPolicy()->record_phase_start(ShenandoahCollectorPolicy::accumulate_stats);
1681   accumulate_statistics_all_tlabs();
1682   shenandoahPolicy()->record_phase_end(ShenandoahCollectorPolicy::accumulate_stats);
1683 
1684   set_concurrent_mark_in_progress(true);
1685   // We need to reset all TLABs because we'd lose marks on all objects allocated in them.
1686   if (UseTLAB) {
1687     shenandoahPolicy()->record_phase_start(ShenandoahCollectorPolicy::make_parsable);
1688     ensure_parsability(true);
1689     shenandoahPolicy()->record_phase_end(ShenandoahCollectorPolicy::make_parsable);
1690   }
1691 
1692   _shenandoah_policy->record_bytes_allocated(_bytes_allocated_since_cm);
1693   _used_start_gc = used();
1694 
1695 #ifdef ASSERT
1696   if (ShenandoahDumpHeapBeforeConcurrentMark) {
1697     ensure_parsability(false);
1698     print_all_refs("pre-mark");
1699   }
1700 #endif
1701 
1702   shenandoahPolicy()->record_phase_start(ShenandoahCollectorPolicy::clear_liveness);
1703   ClearLivenessClosure clc(this);
1704   heap_region_iterate(&clc);
1705   shenandoahPolicy()->record_phase_end(ShenandoahCollectorPolicy::clear_liveness);
1706 
1707   // print_all_refs("pre -mark");
1708 
1709   // oopDesc::_debug = true;
1710 
1711   // Make above changes visible to worker threads
1712   OrderAccess::fence();
1713 
1714   shenandoahPolicy()->record_phase_start(ShenandoahCollectorPolicy::scan_roots);
1715   concurrentMark()->init_mark_roots();
1716   shenandoahPolicy()->record_phase_end(ShenandoahCollectorPolicy::scan_roots);
1717 
1718   //  print_all_refs("pre-mark2");
1719 }
1720 
1721 
1722 class VerifyLivenessClosure : public ExtendedOopClosure {
1723 
1724   ShenandoahHeap* _sh;
1725 
1726 public:
1727   VerifyLivenessClosure() : _sh ( ShenandoahHeap::heap() ) {}
1728 
1729   template<class T> void do_oop_nv(T* p) {
1730     T heap_oop = oopDesc::load_heap_oop(p);
1731     if (!oopDesc::is_null(heap_oop)) {
1732       oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
1733       guarantee(_sh->heap_region_containing(obj)->is_in_collection_set() == (! oopDesc::unsafe_equals(obj, oopDesc::bs()->read_barrier(obj))),
1734                 "forwarded objects can only exist in dirty (from-space) regions is_dirty: %s, is_forwarded: %s",
1735                 BOOL_TO_STR(_sh->heap_region_containing(obj)->is_in_collection_set()),
1736                 BOOL_TO_STR(! oopDesc::unsafe_equals(obj, oopDesc::bs()->read_barrier(obj)))
1737                 );
1738       obj = oopDesc::bs()->read_barrier(obj);
1739       guarantee(! _sh->heap_region_containing(obj)->is_in_collection_set(), "forwarded oops must not point to dirty regions");
1740       guarantee(obj->is_oop(), "is_oop");
1741       ShenandoahHeap* sh = (ShenandoahHeap*) Universe::heap();
1742       if (! sh->is_marked_current(obj)) {
1743         sh->print_on(tty);
1744       }
1745       assert(sh->is_marked_current(obj), "Referenced Objects should be marked obj: "PTR_FORMAT", marked: %s, is_in_heap: %s",
1746              p2i((HeapWord*) obj), BOOL_TO_STR(sh->is_marked_current(obj)), BOOL_TO_STR(sh->is_in(obj)));
1747     }
1748   }
1749 
1750   void do_oop(oop* p)       { do_oop_nv(p); }
1751   void do_oop(narrowOop* p) { do_oop_nv(p); }
1752 
1753 };
1754 
1755 void ShenandoahHeap::verify_live() {
1756 
1757   VerifyLivenessClosure cl;
1758   roots_iterate(&cl);
1759 
1760   IterateMarkedCurrentObjectsClosure marked_oops(&cl);
1761   object_iterate(&marked_oops);
1762 
1763 }
1764 
1765 class VerifyAfterEvacuationClosure : public ExtendedOopClosure {
1766 
1767   ShenandoahHeap* _sh;
1768 
1769 public:
1770   VerifyAfterEvacuationClosure() : _sh ( ShenandoahHeap::heap() ) {}
1771 
1772   template<class T> void do_oop_nv(T* p) {
1773     T heap_oop = oopDesc::load_heap_oop(p);
1774     if (!oopDesc::is_null(heap_oop)) {
1775       oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
1776       guarantee(_sh->heap_region_containing(obj)->is_in_collection_set() == (! oopDesc::unsafe_equals(obj, oopDesc::bs()->read_barrier(obj))),
1777                 "forwarded objects can only exist in dirty (from-space) regions is_dirty: %s, is_forwarded: %s obj-klass: %s, marked: %s",
1778                 BOOL_TO_STR(_sh->heap_region_containing(obj)->is_in_collection_set()),
1779                 BOOL_TO_STR(! oopDesc::unsafe_equals(obj, oopDesc::bs()->read_barrier(obj))), obj->klass()->external_name(), BOOL_TO_STR(_sh->is_marked_current(obj))
1780                 );
1781       obj = oopDesc::bs()->read_barrier(obj);
1782       guarantee(! _sh->heap_region_containing(obj)->is_in_collection_set(), "forwarded oops must not point to dirty regions");
1783       guarantee(obj->is_oop(), "is_oop");
1784       guarantee(Metaspace::contains(obj->klass()), "klass pointer must go to metaspace");
1785     }
1786   }
1787 
1788   void do_oop(oop* p)       { do_oop_nv(p); }
1789   void do_oop(narrowOop* p) { do_oop_nv(p); }
1790 
1791 };
1792 
1793 class VerifyAfterUpdateRefsClosure : public ExtendedOopClosure {
1794 
1795   ShenandoahHeap* _sh;
1796 
1797 public:
1798   VerifyAfterUpdateRefsClosure() : _sh ( ShenandoahHeap::heap() ) {}
1799 
1800   template<class T> void do_oop_nv(T* p) {
1801     T heap_oop = oopDesc::load_heap_oop(p);
1802     if (!oopDesc::is_null(heap_oop)) {
1803       oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
1804       guarantee((! _sh->heap_region_containing(obj)->is_in_collection_set()),
1805                 "no live reference must point to from-space, is_marked: %s",
1806                 BOOL_TO_STR(_sh->is_marked_current(obj)));
1807       if (! oopDesc::unsafe_equals(obj, oopDesc::bs()->read_barrier(obj)) && _sh->is_in(p)) {
1808         tty->print_cr("top-limit: "PTR_FORMAT", p: "PTR_FORMAT, p2i(_sh->heap_region_containing(p)->concurrent_iteration_safe_limit()), p2i(p));
1809       }
1810       guarantee(oopDesc::unsafe_equals(obj, oopDesc::bs()->read_barrier(obj)), "no live reference must point to forwarded object");
1811       guarantee(obj->is_oop(), "is_oop");
1812       guarantee(Metaspace::contains(obj->klass()), "klass pointer must go to metaspace");
1813     }
1814   }
1815 
1816   void do_oop(oop* p)       { do_oop_nv(p); }
1817   void do_oop(narrowOop* p) { do_oop_nv(p); }
1818 
1819 };
1820 
1821 void ShenandoahHeap::verify_heap_after_evacuation() {
1822 
1823   verify_heap_size_consistency();
1824 
1825   ensure_parsability(false);
1826 
1827   VerifyAfterEvacuationClosure cl;
1828   roots_iterate(&cl);
1829 
1830   IterateMarkedCurrentObjectsClosure marked_oops(&cl);
1831   object_iterate(&marked_oops);
1832 
1833 }
1834 
1835 class VerifyRegionsAfterUpdateRefsClosure : public ShenandoahHeapRegionClosure {
1836 public:
1837   bool doHeapRegion(ShenandoahHeapRegion* r) {
1838     assert(! r->is_in_collection_set(), "no region must be in collection set");
1839     assert(! ShenandoahHeap::heap()->in_cset_fast_test(r->bottom()), "no region must be in collection set");
1840     return false;
1841   }
1842 };
1843 
1844 void ShenandoahHeap::swap_mark_bitmaps() {
1845   CMBitMap* tmp = _prev_mark_bit_map;
1846   _prev_mark_bit_map = _next_mark_bit_map;
1847   _next_mark_bit_map = tmp;
1848 }
1849 
1850 void ShenandoahHeap::stop_concurrent_marking() {
1851   assert(concurrent_mark_in_progress(), "How else could we get here?");
1852   if (! cancelled_concgc()) {
1853     // If we needed to update refs, and concurrent marking has been cancelled,
1854     // we need to finish updating references.
1855     set_need_update_refs(false);
1856     swap_mark_bitmaps();
1857   }
1858   set_concurrent_mark_in_progress(false);
1859 
1860   if (log_is_enabled(Trace, gc, region)) {
1861     ResourceMark rm;
1862     outputStream* out = Log(gc, region)::trace_stream();
1863     print_heap_regions(out);
1864   }
1865 
1866 }
1867 
1868 void ShenandoahHeap::set_concurrent_mark_in_progress(bool in_progress) {
1869   _concurrent_mark_in_progress = in_progress;
1870   JavaThread::satb_mark_queue_set().set_active_all_threads(in_progress, ! in_progress);
1871 }
1872 
1873 void ShenandoahHeap::set_evacuation_in_progress(bool in_progress) {
1874   JavaThread::set_evacuation_in_progress_all_threads(in_progress);
1875   _evacuation_in_progress = in_progress;
1876   OrderAccess::fence();
1877 }
1878 
1879 void ShenandoahHeap::verify_copy(oop p,oop c){
1880     assert(! oopDesc::unsafe_equals(p, oopDesc::bs()->read_barrier(p)), "forwarded correctly");
1881     assert(oopDesc::unsafe_equals(oopDesc::bs()->read_barrier(p), c), "verify pointer is correct");
1882     if (p->klass() != c->klass()) {
1883       print_heap_regions();
1884     }
1885     assert(p->klass() == c->klass(), "verify class p-size: "INT32_FORMAT" c-size: "INT32_FORMAT, p->size(), c->size());
1886     assert(p->size() == c->size(), "verify size");
1887     // Object may have been locked between copy and verification
1888     //    assert(p->mark() == c->mark(), "verify mark");
1889     assert(oopDesc::unsafe_equals(c, oopDesc::bs()->read_barrier(c)), "verify only forwarded once");
1890   }
1891 
1892 void ShenandoahHeap::oom_during_evacuation() {
1893   // tty->print_cr("Out of memory during evacuation, cancel evacuation, schedule full GC by thread %d",
1894   //               Thread::current()->osthread()->thread_id());
1895 
1896   // We ran out of memory during evacuation. Cancel evacuation, and schedule a full-GC.
1897   collector_policy()->set_should_clear_all_soft_refs(true);
1898   concurrent_thread()->schedule_full_gc();
1899   cancel_concgc();
1900 
1901   if ((! Thread::current()->is_GC_task_thread()) && (! Thread::current()->is_ConcurrentGC_thread())) {
1902     log_warning(gc)("OOM during evacuation. Let Java thread wait until evacuation settlded..");
1903     while (_evacuation_in_progress) { // wait.
1904       Thread::current()->_ParkEvent->park(1) ;
1905     }
1906   }
1907 
1908 }
1909 
1910 HeapWord* ShenandoahHeap::tlab_post_allocation_setup(HeapWord* obj) {
1911   // Initialize Brooks pointer for the next object
1912   HeapWord* result = obj + BrooksPointer::word_size();
1913   BrooksPointer::initialize(oop(result));
1914   return result;
1915 }
1916 
1917 uint ShenandoahHeap::oop_extra_words() {
1918   return BrooksPointer::word_size();
1919 }
1920 
1921 void ShenandoahHeap::grow_heap_by(size_t num_regions) {
1922   size_t base = _num_regions;
1923   ensure_new_regions(num_regions);
1924 
1925   ShenandoahHeapRegion* regions[num_regions];
1926   for (size_t i = 0; i < num_regions; i++) {
1927     ShenandoahHeapRegion* new_region = new ShenandoahHeapRegion();
1928     size_t new_region_index = i + base;
1929     HeapWord* start = _first_region_bottom + (ShenandoahHeapRegion::RegionSizeBytes / HeapWordSize) * new_region_index;
1930     new_region->initialize_heap_region(start, ShenandoahHeapRegion::RegionSizeBytes / HeapWordSize, new_region_index);
1931 
1932     if (log_is_enabled(Trace, gc, region)) {
1933       ResourceMark rm;
1934       outputStream* out = Log(gc, region)::trace_stream();
1935       out->print_cr("allocating new region at index: "SIZE_FORMAT, new_region_index);
1936       new_region->print_on(out);
1937     }
1938 
1939     assert(_ordered_regions->active_regions() == new_region->region_number(), "must match");
1940     _ordered_regions->add_region(new_region);
1941     _sorted_regions->add_region(new_region);
1942     _in_cset_fast_test_base[new_region_index] = false; // Not in cset
1943     _top_at_mark_starts_base[new_region_index] = new_region->bottom();
1944 
1945     regions[i] = new_region;
1946   }
1947   _free_regions->par_add_regions(regions, 0, num_regions, num_regions);
1948 }
1949 
1950 void ShenandoahHeap::ensure_new_regions(size_t new_regions) {
1951 
1952   size_t num_regions = _num_regions;
1953   size_t new_num_regions = num_regions + new_regions;
1954   assert(new_num_regions <= _max_regions, "we checked this earlier");
1955 
1956   size_t expand_size = new_regions * ShenandoahHeapRegion::RegionSizeBytes;
1957   log_trace(gc, region)("expanding storage by "SIZE_FORMAT_HEX" bytes, for "SIZE_FORMAT" new regions", expand_size, new_regions);
1958   bool success = _storage.expand_by(expand_size, ShenandoahAlwaysPreTouch);
1959   assert(success, "should always be able to expand by requested size");
1960 
1961   _num_regions = new_num_regions;
1962 
1963 }
1964 
1965 ShenandoahIsAliveClosure::ShenandoahIsAliveClosure() :
1966   _heap(ShenandoahHeap::heap_no_check()) {
1967 }
1968 
1969 void ShenandoahIsAliveClosure::init(ShenandoahHeap* heap) {
1970   _heap = heap;
1971 }
1972 
1973 bool ShenandoahIsAliveClosure::do_object_b(oop obj) {
1974 
1975   assert(_heap != NULL, "sanity");
1976 #ifdef ASSERT
1977   if (_heap->concurrent_mark_in_progress()) {
1978     assert(oopDesc::unsafe_equals(obj, ShenandoahBarrierSet::resolve_oop_static_not_null(obj)), "only query to-space");
1979   }
1980 #endif
1981   assert(!oopDesc::is_null(obj), "null");
1982   return _heap->is_marked_current(obj);
1983 }
1984 
1985 ShenandoahForwardedIsAliveClosure::ShenandoahForwardedIsAliveClosure() :
1986   _heap(ShenandoahHeap::heap_no_check()) {
1987 }
1988 
1989 void ShenandoahForwardedIsAliveClosure::init(ShenandoahHeap* heap) {
1990   _heap = heap;
1991 }
1992 
1993 bool ShenandoahForwardedIsAliveClosure::do_object_b(oop obj) {
1994 
1995   assert(_heap != NULL, "sanity");
1996   obj = ShenandoahBarrierSet::resolve_oop_static_not_null(obj);
1997 #ifdef ASSERT
1998   if (_heap->concurrent_mark_in_progress()) {
1999     assert(oopDesc::unsafe_equals(obj, ShenandoahBarrierSet::resolve_oop_static_not_null(obj)), "only query to-space");
2000   }
2001 #endif
2002   assert(!oopDesc::is_null(obj), "null");
2003   return _heap->is_marked_current(obj);
2004 }
2005 
2006 void ShenandoahHeap::ref_processing_init() {
2007   MemRegion mr = reserved_region();
2008 
2009   isAlive.init(ShenandoahHeap::heap());
2010   _ref_processor =
2011     new ReferenceProcessor(mr,    // span
2012                            ParallelRefProcEnabled,
2013                            // mt processing
2014                            (int) ConcGCThreads,
2015                            // degree of mt processing
2016                            true,
2017                            // mt discovery
2018                            (int) ConcGCThreads,
2019                            // degree of mt discovery
2020                            false,
2021                            // Reference discovery is not atomic
2022                            &isAlive);
2023 
2024 }
2025 
2026 #ifdef ASSERT
2027 void ShenandoahHeap::set_from_region_protection(bool protect) {
2028   for (uint i = 0; i < _num_regions; i++) {
2029     ShenandoahHeapRegion* region = _ordered_regions->get(i);
2030     if (region != NULL && region->is_in_collection_set()) {
2031       if (protect) {
2032         region->memProtectionOn();
2033       } else {
2034         region->memProtectionOff();
2035       }
2036     }
2037   }
2038 }
2039 #endif
2040 
2041 size_t ShenandoahHeap::num_regions() {
2042   return _num_regions;
2043 }
2044 
2045 size_t ShenandoahHeap::max_regions() {
2046   return _max_regions;
2047 }
2048 
2049 GCTracer* ShenandoahHeap::tracer() {
2050   return shenandoahPolicy()->tracer();
2051 }
2052 
2053 size_t ShenandoahHeap::tlab_used(Thread* thread) const {
2054   return _free_regions->used();
2055 }
2056 
2057 void ShenandoahHeap::cancel_concgc() {
2058   // only report it once
2059   if (!_cancelled_concgc) {
2060     log_info(gc)("Cancelling GC");
2061     _cancelled_concgc = true;
2062     OrderAccess::fence();
2063     _shenandoah_policy->report_concgc_cancelled();
2064   }
2065 
2066 }
2067 
2068 void ShenandoahHeap::clear_cancelled_concgc() {
2069   _cancelled_concgc = false;
2070 }
2071 
2072 int ShenandoahHeap::max_workers() {
2073   return _max_workers;
2074 }
2075 
2076 int ShenandoahHeap::max_parallel_workers() {
2077   return _max_parallel_workers;
2078 }
2079 int ShenandoahHeap::max_conc_workers() {
2080   return _max_conc_workers;
2081 }
2082 
2083 void ShenandoahHeap::stop() {
2084   // We set this early here, to let GC threads terminate before we ask the concurrent thread
2085   // to terminate, which would otherwise block until all GC threads come to finish normally.
2086   _cancelled_concgc = true;
2087   _concurrent_gc_thread->stop();
2088   cancel_concgc();
2089 }
2090 
2091 void ShenandoahHeap::unlink_string_and_symbol_table(BoolObjectClosure* is_alive, bool process_strings, bool process_symbols) {
2092 
2093   StringSymbolTableUnlinkTask shenandoah_unlink_task(is_alive, process_strings, process_symbols);
2094   workers()->run_task(&shenandoah_unlink_task);
2095 
2096   //  if (G1StringDedup::is_enabled()) {
2097   //    G1StringDedup::unlink(is_alive);
2098   //  }
2099 }
2100 
2101 void ShenandoahHeap::set_need_update_refs(bool need_update_refs) {
2102   _need_update_refs = need_update_refs;
2103 }
2104 
2105 //fixme this should be in heapregionset
2106 ShenandoahHeapRegion* ShenandoahHeap::next_compaction_region(const ShenandoahHeapRegion* r) {
2107   size_t region_idx = r->region_number() + 1;
2108   ShenandoahHeapRegion* next = _ordered_regions->get(region_idx);
2109   guarantee(next->region_number() == region_idx, "region number must match");
2110   while (next->is_humongous()) {
2111     region_idx = next->region_number() + 1;
2112     next = _ordered_regions->get(region_idx);
2113     guarantee(next->region_number() == region_idx, "region number must match");
2114   }
2115   return next;
2116 }
2117 
2118 bool ShenandoahHeap::is_in_collection_set(const void* p) {
2119   return heap_region_containing(p)->is_in_collection_set();
2120 }
2121 
2122 ShenandoahMonitoringSupport* ShenandoahHeap::monitoring_support() {
2123   return _monitoring_support;
2124 }
2125 
2126 bool ShenandoahHeap::is_obj_dead(const oop obj, const ShenandoahHeapRegion* r) const {
2127   return ! r->allocated_after_prev_mark_start((HeapWord*) obj) &&
2128          ! is_marked_prev(obj, r);
2129 }
2130 CMBitMap* ShenandoahHeap::prev_mark_bit_map() {
2131   return _prev_mark_bit_map;
2132 }
2133 
2134 CMBitMap* ShenandoahHeap::next_mark_bit_map() {
2135   return _next_mark_bit_map;
2136 }
2137 
2138 void ShenandoahHeap::add_free_region(ShenandoahHeapRegion* r) {
2139   _free_regions->add_region(r);
2140 }
2141 
2142 void ShenandoahHeap::clear_free_regions() {
2143   _free_regions->clear();
2144 }
2145 
2146 void ShenandoahHeap::register_region_with_in_cset_fast_test(ShenandoahHeapRegion* r) {
2147   assert(_in_cset_fast_test_base != NULL, "sanity");
2148   assert(r->is_in_collection_set(), "invariant");
2149   uint index = r->region_number();
2150   assert(index < _in_cset_fast_test_length, "invariant");
2151   assert(!_in_cset_fast_test_base[index], "invariant");
2152   _in_cset_fast_test_base[index] = true;
2153 }
2154 
2155 address ShenandoahHeap::in_cset_fast_test_addr() {
2156   return (address) (ShenandoahHeap::heap()->_in_cset_fast_test);
2157 }
2158 
2159 void ShenandoahHeap::clear_cset_fast_test() {
2160   assert(_in_cset_fast_test_base != NULL, "sanity");
2161   memset(_in_cset_fast_test_base, false,
2162          (size_t) _in_cset_fast_test_length * sizeof(bool));
2163 }
2164 
2165 size_t ShenandoahHeap::conservative_max_heap_alignment() {
2166   return HeapRegionBounds::max_size();
2167 }
2168 
2169 size_t ShenandoahHeap::bytes_allocated_since_cm() {
2170   return _bytes_allocated_since_cm;
2171 }
2172 
2173 void ShenandoahHeap::set_bytes_allocated_since_cm(size_t bytes) {
2174   _bytes_allocated_since_cm = bytes;
2175 }
2176 
2177 size_t ShenandoahHeap::max_allocated_gc() {
2178   return _max_allocated_gc;
2179 }
2180 
2181 void ShenandoahHeap::set_top_at_mark_start(HeapWord* region_base, HeapWord* addr) {
2182   uintx index = ((uintx) region_base) >> ShenandoahHeapRegion::RegionSizeShift;
2183   _top_at_mark_starts[index] = addr;
2184 }
2185 
2186 void ShenandoahHeap::set_full_gc_in_progress(bool in_progress) {
2187   _full_gc_in_progress = in_progress;
2188 }
2189 
2190 bool ShenandoahHeap::is_full_gc_in_progress() const {
2191   return _full_gc_in_progress;
2192 }
2193 
2194 bool ShenandoahHeap::needs_reference_pending_list_locker_thread() const {
2195   return true;
2196 }
2197 
2198 class NMethodOopInitializer : public OopClosure {
2199 private:
2200   ShenandoahHeap* _heap;
2201 public:
2202   NMethodOopInitializer() : _heap(ShenandoahHeap::heap()) {
2203   }
2204 
2205 private:
2206   template <class T>
2207   inline void do_oop_work(T* p) {
2208     T o = oopDesc::load_heap_oop(p);
2209     if (! oopDesc::is_null(o)) {
2210       oop obj1 = oopDesc::decode_heap_oop_not_null(o);
2211       oop obj2 = oopDesc::bs()->write_barrier(obj1);
2212       if (! oopDesc::unsafe_equals(obj1, obj2)) {
2213         oopDesc::encode_store_heap_oop(p, obj2);
2214       }
2215     }
2216   }
2217 
2218 public:
2219   void do_oop(oop* o) {
2220     do_oop_work(o);
2221   }
2222   void do_oop(narrowOop* o) {
2223     do_oop_work(o);
2224   }
2225 };
2226 
2227 void ShenandoahHeap::register_nmethod(nmethod* nm) {
2228   NMethodOopInitializer init;
2229   nm->oops_do(&init);
2230   nm->fix_oop_relocations();
2231 }
2232 
2233 void ShenandoahHeap::unregister_nmethod(nmethod* nm) {
2234 }
2235 
2236 void ShenandoahHeap::enter_critical(oop o) {
2237   heap_region_containing(o)->enter_critical();
2238 }
2239 
2240 void ShenandoahHeap::exit_critical(oop o) {
2241   heap_region_containing(o)->exit_critical();
2242 }
2243 
2244 
2245 GCTimer* ShenandoahHeap::gc_timer() const {
2246   return _gc_timer;
2247 }