1 /*
   2  * Copyright (c) 2013, 2015, Red Hat, Inc. and/or its affiliates.
   3  *
   4  * This code is free software; you can redistribute it and/or modify it
   5  * under the terms of the GNU General Public License version 2 only, as
   6  * published by the Free Software Foundation.
   7  *
   8  * This code is distributed in the hope that it will be useful, but WITHOUT
   9  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  10  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  11  * version 2 for more details (a copy is included in the LICENSE file that
  12  * accompanied this code).
  13  *
  14  * You should have received a copy of the GNU General Public License version
  15  * 2 along with this work; if not, write to the Free Software Foundation,
  16  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  17  *
  18  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  19  * or visit www.oracle.com if you need additional information or have any
  20  * questions.
  21  *
  22  */
  23 
  24 #include "memory/allocation.hpp"
  25 #include "gc/g1/heapRegionBounds.inline.hpp"
  26 
  27 #include "gc/shared/gcTimer.hpp"
  28 #include "gc/shared/gcTraceTime.inline.hpp"
  29 #include "gc/shared/parallelCleaning.hpp"
  30 
  31 #include "gc/shenandoah/brooksPointer.hpp"
  32 #include "gc/shenandoah/shenandoahBarrierSet.hpp"
  33 #include "gc/shenandoah/shenandoahCollectionSet.hpp"
  34 #include "gc/shenandoah/shenandoahCollectorPolicy.hpp"
  35 #include "gc/shenandoah/shenandoahConcurrentMark.hpp"
  36 #include "gc/shenandoah/shenandoahConcurrentThread.hpp"
  37 #include "gc/shenandoah/shenandoahFreeSet.hpp"
  38 #include "gc/shenandoah/shenandoahHeap.inline.hpp"
  39 #include "gc/shenandoah/shenandoahHeapRegion.hpp"
  40 #include "gc/shenandoah/shenandoahHeapRegionSet.hpp"
  41 #include "gc/shenandoah/shenandoahHumongous.hpp"
  42 #include "gc/shenandoah/shenandoahMarkCompact.hpp"
  43 #include "gc/shenandoah/shenandoahMonitoringSupport.hpp"
  44 #include "gc/shenandoah/shenandoahRootProcessor.hpp"
  45 #include "gc/shenandoah/vm_operations_shenandoah.hpp"
  46 
  47 #include "runtime/vmThread.hpp"
  48 #include "services/mallocTracker.hpp"
  49 
  50 const char* ShenandoahHeap::name() const {
  51   return "Shenandoah";
  52 }
  53 
  54 void ShenandoahHeap::print_heap_locations(HeapWord* start, HeapWord* end) {
  55   HeapWord* cur = NULL;
  56   for (cur = start; cur < end; cur++) {
  57     tty->print_cr(PTR_FORMAT" : "PTR_FORMAT, p2i(cur), p2i(*((HeapWord**) cur)));
  58   }
  59 }
  60 
  61 class PrintHeapRegionsClosure : public
  62    ShenandoahHeapRegionClosure {
  63 private:
  64   outputStream* _st;
  65 public:
  66   PrintHeapRegionsClosure() : _st(tty) {}
  67   PrintHeapRegionsClosure(outputStream* st) : _st(st) {}
  68 
  69   bool doHeapRegion(ShenandoahHeapRegion* r) {
  70     r->print_on(_st);
  71     return false;
  72   }
  73 };
  74 
  75 class ShenandoahPretouchTask : public AbstractGangTask {
  76 private:
  77   char* volatile _cur_addr;
  78   char* const _start_addr;
  79   char* const _end_addr;
  80   size_t const _page_size;
  81 public:
  82   ShenandoahPretouchTask(char* start_address, char* end_address, size_t page_size) :
  83     AbstractGangTask("Shenandoah PreTouch",
  84                      Universe::is_fully_initialized() ? GCId::current_raw() :
  85                                                         // During VM initialization there is
  86                                                         // no GC cycle that this task can be
  87                                                         // associated with.
  88                                                         GCId::undefined()),
  89     _cur_addr(start_address),
  90     _start_addr(start_address),
  91     _end_addr(end_address),
  92     _page_size(page_size) {
  93   }
  94 
  95   virtual void work(uint worker_id) {
  96     size_t const actual_chunk_size = MAX2(PreTouchParallelChunkSize, _page_size);
  97     while (true) {
  98       char* touch_addr = (char*)Atomic::add_ptr((intptr_t)actual_chunk_size, (volatile void*) &_cur_addr) - actual_chunk_size;
  99       if (touch_addr < _start_addr || touch_addr >= _end_addr) {
 100         break;
 101       }
 102       char* end_addr = touch_addr + MIN2(actual_chunk_size, pointer_delta(_end_addr, touch_addr, sizeof(char)));
 103       os::pretouch_memory(touch_addr, end_addr, _page_size);
 104     }
 105   }
 106 };
 107 
 108 void ShenandoahHeap::pretouch_storage(char* start, char* end, WorkGang* workers) {
 109   assert (ShenandoahAlwaysPreTouch, "Sanity");
 110   assert (!AlwaysPreTouch, "Should have been overridden");
 111 
 112   size_t size = (size_t)(end - start);
 113   size_t page_size = UseLargePages ? (size_t)os::large_page_size() : (size_t)os::vm_page_size();
 114   size_t num_chunks = MAX2((size_t)1, size / MAX2(PreTouchParallelChunkSize, page_size));
 115   uint num_workers = MIN2((uint)num_chunks, workers->active_workers());
 116 
 117   log_info(gc, heap)("Parallel pretouch with %u workers for " SIZE_FORMAT " work units pre-touching " SIZE_FORMAT " bytes.",
 118                       num_workers, num_chunks, size);
 119 
 120   ShenandoahPretouchTask cl(start, end, page_size);
 121   workers->run_task(&cl, num_workers);
 122 }
 123 
 124 jint ShenandoahHeap::initialize() {
 125   CollectedHeap::pre_initialize();
 126 
 127   size_t init_byte_size = collector_policy()->initial_heap_byte_size();
 128   size_t max_byte_size = collector_policy()->max_heap_byte_size();
 129 
 130   Universe::check_alignment(max_byte_size,
 131                             ShenandoahHeapRegion::RegionSizeBytes,
 132                             "shenandoah heap");
 133   Universe::check_alignment(init_byte_size,
 134                             ShenandoahHeapRegion::RegionSizeBytes,
 135                             "shenandoah heap");
 136 
 137   ReservedSpace heap_rs = Universe::reserve_heap(max_byte_size,
 138                                                  Arguments::conservative_max_heap_alignment());
 139   initialize_reserved_region((HeapWord*)heap_rs.base(), (HeapWord*) (heap_rs.base() + heap_rs.size()));
 140 
 141   set_barrier_set(new ShenandoahBarrierSet(this));
 142   ReservedSpace pgc_rs = heap_rs.first_part(max_byte_size);
 143   _storage.initialize(pgc_rs, init_byte_size);
 144   if (ShenandoahAlwaysPreTouch) {
 145     pretouch_storage(_storage.low(), _storage.high(), _workers);
 146   }
 147 
 148   _num_regions = init_byte_size / ShenandoahHeapRegion::RegionSizeBytes;
 149   _max_regions = max_byte_size / ShenandoahHeapRegion::RegionSizeBytes;
 150   _initialSize = _num_regions * ShenandoahHeapRegion::RegionSizeBytes;
 151   size_t regionSizeWords = ShenandoahHeapRegion::RegionSizeBytes / HeapWordSize;
 152   assert(init_byte_size == _initialSize, "tautology");
 153   _ordered_regions = new ShenandoahHeapRegionSet(_max_regions);
 154   _sorted_regions = new ShenandoahHeapRegionSet(_max_regions);
 155   _collection_set = new ShenandoahCollectionSet(_max_regions);
 156   _free_regions = new ShenandoahFreeSet(_max_regions);
 157 
 158   size_t i = 0;
 159   for (i = 0; i < _num_regions; i++) {
 160     ShenandoahHeapRegion* current = new ShenandoahHeapRegion();
 161     current->initialize_heap_region((HeapWord*) pgc_rs.base() +
 162                                     regionSizeWords * i, regionSizeWords, i);
 163     _free_regions->add_region(current);
 164     _ordered_regions->add_region(current);
 165     _sorted_regions->add_region(current);
 166   }
 167   assert(((size_t) _ordered_regions->active_regions()) == _num_regions, "");
 168   _first_region = _ordered_regions->get(0);
 169   _first_region_bottom = _first_region->bottom();
 170   assert((((size_t) _first_region_bottom) &
 171           (ShenandoahHeapRegion::RegionSizeBytes - 1)) == 0,
 172          "misaligned heap: "PTR_FORMAT, p2i(_first_region_bottom));
 173 
 174   _numAllocs = 0;
 175 
 176   if (log_is_enabled(Trace, gc, region)) {
 177     ResourceMark rm;
 178     outputStream* out = Log(gc, region)::trace_stream();
 179     log_trace(gc, region)("All Regions");
 180     _ordered_regions->print(out);
 181     log_trace(gc, region)("Free Regions");
 182     _free_regions->print(out);
 183   }
 184 
 185   // The call below uses stuff (the SATB* things) that are in G1, but probably
 186   // belong into a shared location.
 187   JavaThread::satb_mark_queue_set().initialize(SATB_Q_CBL_mon,
 188                                                SATB_Q_FL_lock,
 189                                                20 /*G1SATBProcessCompletedThreshold */,
 190                                                Shared_SATB_Q_lock);
 191 
 192   // Reserve space for prev and next bitmap.
 193   size_t bitmap_size = CMBitMap::compute_size(heap_rs.size());
 194   MemRegion heap_region = MemRegion((HeapWord*) heap_rs.base(), heap_rs.size() / HeapWordSize);
 195 
 196   size_t page_size = UseLargePages ? (size_t)os::large_page_size() : (size_t)os::vm_page_size();
 197 
 198   ReservedSpace bitmap0(bitmap_size, page_size);
 199   os::commit_memory_or_exit(bitmap0.base(), bitmap0.size(), false, "couldn't allocate mark bitmap");
 200   MemTracker::record_virtual_memory_type(bitmap0.base(), mtGC);
 201   MemRegion bitmap_region0 = MemRegion((HeapWord*) bitmap0.base(), bitmap0.size() / HeapWordSize);
 202   _mark_bit_map0.initialize(heap_region, bitmap_region0);
 203   _prev_mark_bit_map = &_mark_bit_map0;
 204 
 205   ReservedSpace bitmap1(bitmap_size, page_size);
 206   os::commit_memory_or_exit(bitmap1.base(), bitmap1.size(), false, "couldn't allocate mark bitmap");
 207   MemTracker::record_virtual_memory_type(bitmap1.base(), mtGC);
 208   MemRegion bitmap_region1 = MemRegion((HeapWord*) bitmap1.base(), bitmap1.size() / HeapWordSize);
 209   _mark_bit_map1.initialize(heap_region, bitmap_region1);
 210   _next_mark_bit_map = &_mark_bit_map1;
 211 
 212   // Initialize fast collection set test structure.
 213   _in_cset_fast_test_length = _max_regions;
 214   _in_cset_fast_test_base =
 215                    NEW_C_HEAP_ARRAY(bool, _in_cset_fast_test_length, mtGC);
 216   _in_cset_fast_test = _in_cset_fast_test_base -
 217                ((uintx) pgc_rs.base() >> ShenandoahHeapRegion::RegionSizeShift);
 218   clear_cset_fast_test();
 219 
 220   _top_at_mark_starts_base =
 221                    NEW_C_HEAP_ARRAY(HeapWord*, _max_regions, mtGC);
 222   _top_at_mark_starts = _top_at_mark_starts_base -
 223                ((uintx) pgc_rs.base() >> ShenandoahHeapRegion::RegionSizeShift);
 224 
 225   for (i = 0; i < _num_regions; i++) {
 226     _in_cset_fast_test_base[i] = false; // Not in cset
 227     _top_at_mark_starts_base[i] = _ordered_regions->get(i)->bottom();
 228   }
 229 
 230   _monitoring_support = new ShenandoahMonitoringSupport(this);
 231 
 232   _concurrent_gc_thread = new ShenandoahConcurrentThread();
 233 
 234   ShenandoahMarkCompact::initialize();
 235 
 236   return JNI_OK;
 237 }
 238 
 239 ShenandoahHeap::ShenandoahHeap(ShenandoahCollectorPolicy* policy) :
 240   CollectedHeap(),
 241   _shenandoah_policy(policy),
 242   _concurrent_mark_in_progress(0),
 243   _evacuation_in_progress(0),
 244   _full_gc_in_progress(false),
 245   _free_regions(NULL),
 246   _collection_set(NULL),
 247   _bytes_allocated_since_cm(0),
 248   _bytes_allocated_during_cm(0),
 249   _max_allocated_gc(0),
 250   _allocated_last_gc(0),
 251   _used_start_gc(0),
 252   _max_conc_workers((int) MAX2((uint) ConcGCThreads, 1U)),
 253   _max_parallel_workers((int) MAX2((uint) ParallelGCThreads, 1U)),
 254   _ref_processor(NULL),
 255   _in_cset_fast_test(NULL),
 256   _in_cset_fast_test_base(NULL),
 257   _top_at_mark_starts(NULL),
 258   _top_at_mark_starts_base(NULL),
 259   _mark_bit_map0(),
 260   _mark_bit_map1(),
 261   _cancelled_concgc(false),
 262   _need_update_refs(false),
 263   _need_reset_bitmaps(false),
 264   _growing_heap(0),
 265   _gc_timer(new (ResourceObj::C_HEAP, mtGC) ConcurrentGCTimer())
 266 
 267 {
 268   log_info(gc, init)("Parallel GC threads: "UINT32_FORMAT, ParallelGCThreads);
 269   log_info(gc, init)("Concurrent GC threads: "UINT32_FORMAT, ConcGCThreads);
 270   log_info(gc, init)("Parallel reference processing enabled: %s", BOOL_TO_STR(ParallelRefProcEnabled));
 271 
 272   _scm = new ShenandoahConcurrentMark();
 273   _used = 0;
 274 
 275   // This is odd.  They are concurrent gc threads, but they are also task threads.
 276   // Framework doesn't allow both.
 277   _workers = new WorkGang("Parallel GC Threads", ParallelGCThreads,
 278                             /* are_GC_task_threads */true,
 279                             /* are_ConcurrentGC_threads */false);
 280   _conc_workers = new WorkGang("Concurrent GC Threads", ConcGCThreads,
 281                             /* are_GC_task_threads */true,
 282                             /* are_ConcurrentGC_threads */false);
 283   if ((_workers == NULL) || (_conc_workers == NULL)) {
 284     vm_exit_during_initialization("Failed necessary allocation.");
 285   } else {
 286     _workers->initialize_workers();
 287     _conc_workers->initialize_workers();
 288   }
 289 }
 290 
 291 class ResetBitmapTask : public AbstractGangTask {
 292 private:
 293   ShenandoahHeapRegionSet* _regions;
 294 
 295 public:
 296   ResetBitmapTask(ShenandoahHeapRegionSet* regions) :
 297     AbstractGangTask("Parallel Reset Bitmap Task"),
 298     _regions(regions) {
 299     _regions->clear_current_index();
 300   }
 301 
 302   void work(uint worker_id) {
 303     ShenandoahHeapRegion* region = _regions->claim_next();
 304     ShenandoahHeap* heap = ShenandoahHeap::heap();
 305     while (region != NULL) {
 306       HeapWord* bottom = region->bottom();
 307       HeapWord* top = region->top_prev_mark_bitmap();
 308       region->set_top_prev_mark_bitmap(region->top_at_prev_mark_start());
 309       if (top > bottom) {
 310         heap->reset_mark_bitmap_range(bottom, top);
 311       }
 312       region = _regions->claim_next();
 313     }
 314   }
 315 };
 316 
 317 class ResetPrevBitmapTask : public AbstractGangTask {
 318 private:
 319   ShenandoahHeapRegionSet* _regions;
 320 
 321 public:
 322   ResetPrevBitmapTask(ShenandoahHeapRegionSet* regions) :
 323           AbstractGangTask("Parallel Reset Prev Bitmap Task"),
 324     _regions(regions) {
 325     _regions->clear_current_index();
 326   }
 327 
 328   void work(uint worker_id) {
 329     ShenandoahHeapRegion* region = _regions->claim_next();
 330     ShenandoahHeap* heap = ShenandoahHeap::heap();
 331     while (region != NULL) {
 332       HeapWord* bottom = region->bottom();
 333       HeapWord* top = region->top_prev_mark_bitmap();
 334       if (top > bottom) {
 335         heap->reset_prev_mark_bitmap_range(bottom, top);
 336       }
 337       region = _regions->claim_next();
 338     }
 339   }
 340 };
 341 
 342 void ShenandoahHeap::reset_mark_bitmap(WorkGang* workers) {
 343   GCTraceTime(Info, gc, phases) time("Concurrent reset bitmaps", gc_timer(), GCCause::_no_gc);
 344 
 345   ResetBitmapTask task = ResetBitmapTask(_ordered_regions);
 346   workers->run_task(&task);
 347 }
 348 
 349 void ShenandoahHeap::reset_prev_mark_bitmap(WorkGang* workers) {
 350   GCTraceTime(Info, gc, phases) time("Concurrent reset prev bitmaps", gc_timer(), GCCause::_no_gc);
 351 
 352   ResetPrevBitmapTask task = ResetPrevBitmapTask(_ordered_regions);
 353   workers->run_task(&task);
 354 }
 355 
 356 void ShenandoahHeap::reset_mark_bitmap_range(HeapWord* from, HeapWord* to) {
 357   _next_mark_bit_map->clear_range(MemRegion(from, to));
 358 }
 359 
 360 void ShenandoahHeap::reset_prev_mark_bitmap_range(HeapWord* from, HeapWord* to) {
 361   _prev_mark_bit_map->clear_range(MemRegion(from, to));
 362 }
 363 
 364 bool ShenandoahHeap::is_bitmap_clear() {
 365   HeapWord* start = _ordered_regions->bottom();
 366   HeapWord* end = _ordered_regions->end();
 367   return _next_mark_bit_map->getNextMarkedWordAddress(start, end) == end;
 368 }
 369 
 370 void ShenandoahHeap::print_on(outputStream* st) const {
 371   st->print("Shenandoah Heap");
 372   st->print(" total = " SIZE_FORMAT " K, used " SIZE_FORMAT " K ", capacity()/ K, used() /K);
 373   st->print("Region size = " SIZE_FORMAT "K ", ShenandoahHeapRegion::RegionSizeBytes / K);
 374   if (_concurrent_mark_in_progress) {
 375     st->print("marking ");
 376   }
 377   if (_evacuation_in_progress) {
 378     st->print("evacuating ");
 379   }
 380   if (cancelled_concgc()) {
 381     st->print("cancelled ");
 382   }
 383   st->print("\n");
 384 
 385   if (Verbose) {
 386     print_heap_regions(st);
 387   }
 388 }
 389 
 390 class InitGCLABClosure : public ThreadClosure {
 391 public:
 392   void do_thread(Thread* thread) {
 393     thread->gclab().initialize(true);
 394   }
 395 };
 396 
 397 void ShenandoahHeap::post_initialize() {
 398 
 399   {
 400     if (UseTLAB) {
 401       InitGCLABClosure init_gclabs;
 402       for (JavaThread *thread = Threads::first(); thread != NULL; thread = thread->next()) {
 403         init_gclabs.do_thread(thread);
 404       }
 405       gc_threads_do(&init_gclabs);
 406     }
 407   }
 408 
 409   _max_workers = MAX(_max_parallel_workers, _max_conc_workers);
 410   _scm->initialize(_max_workers);
 411 
 412   ref_processing_init();
 413 }
 414 
 415 class CalculateUsedRegionClosure : public ShenandoahHeapRegionClosure {
 416   size_t sum;
 417 public:
 418 
 419   CalculateUsedRegionClosure() {
 420     sum = 0;
 421   }
 422 
 423   bool doHeapRegion(ShenandoahHeapRegion* r) {
 424     sum = sum + r->used();
 425     return false;
 426   }
 427 
 428   size_t getResult() { return sum;}
 429 };
 430 
 431 size_t ShenandoahHeap::calculateUsed() {
 432   CalculateUsedRegionClosure cl;
 433   heap_region_iterate(&cl);
 434   return cl.getResult();
 435 }
 436 
 437 void ShenandoahHeap::verify_heap_size_consistency() {
 438 
 439   assert(calculateUsed() == used(),
 440          "heap used size must be consistent heap-used: "SIZE_FORMAT" regions-used: "SIZE_FORMAT, used(), calculateUsed());
 441 }
 442 
 443 size_t ShenandoahHeap::used() const {
 444   OrderAccess::acquire();
 445   return _used;
 446 }
 447 
 448 void ShenandoahHeap::increase_used(size_t bytes) {
 449   Atomic::add(bytes, &_used);
 450 }
 451 
 452 void ShenandoahHeap::set_used(size_t bytes) {
 453   _used = bytes;
 454   OrderAccess::release();
 455 }
 456 
 457 void ShenandoahHeap::decrease_used(size_t bytes) {
 458   assert(_used >= bytes, "never decrease heap size by more than we've left");
 459   Atomic::add(-bytes, &_used);
 460 }
 461 
 462 size_t ShenandoahHeap::capacity() const {
 463   return _num_regions * ShenandoahHeapRegion::RegionSizeBytes;
 464 
 465 }
 466 
 467 bool ShenandoahHeap::is_maximal_no_gc() const {
 468   Unimplemented();
 469   return true;
 470 }
 471 
 472 size_t ShenandoahHeap::max_capacity() const {
 473   return _max_regions * ShenandoahHeapRegion::RegionSizeBytes;
 474 }
 475 
 476 size_t ShenandoahHeap::min_capacity() const {
 477   return _initialSize;
 478 }
 479 
 480 VirtualSpace* ShenandoahHeap::storage() const {
 481   return (VirtualSpace*) &_storage;
 482 }
 483 
 484 bool ShenandoahHeap::is_in(const void* p) const {
 485   HeapWord* first_region_bottom = _first_region->bottom();
 486   HeapWord* last_region_end = first_region_bottom + (ShenandoahHeapRegion::RegionSizeBytes / HeapWordSize) * _num_regions;
 487   return p > _first_region_bottom && p < last_region_end;
 488 }
 489 
 490 bool ShenandoahHeap::is_scavengable(const void* p) {
 491   return true;
 492 }
 493 
 494 HeapWord* ShenandoahHeap::allocate_from_gclab_slow(Thread* thread, size_t size) {
 495   // Retain tlab and allocate object in shared space if
 496   // the amount free in the tlab is too large to discard.
 497   if (thread->gclab().free() > thread->gclab().refill_waste_limit()) {
 498     thread->gclab().record_slow_allocation(size);
 499     return NULL;
 500   }
 501 
 502   // Discard gclab and allocate a new one.
 503   // To minimize fragmentation, the last GCLAB may be smaller than the rest.
 504   size_t new_gclab_size = thread->gclab().compute_size(size);
 505 
 506   thread->gclab().clear_before_allocation();
 507 
 508   if (new_gclab_size == 0) {
 509     return NULL;
 510   }
 511 
 512   // Allocate a new GCLAB...
 513   HeapWord* obj = allocate_new_gclab(new_gclab_size);
 514   if (obj == NULL) {
 515     return NULL;
 516   }
 517 
 518   if (ZeroTLAB) {
 519     // ..and clear it.
 520     Copy::zero_to_words(obj, new_gclab_size);
 521   } else {
 522     // ...and zap just allocated object.
 523 #ifdef ASSERT
 524     // Skip mangling the space corresponding to the object header to
 525     // ensure that the returned space is not considered parsable by
 526     // any concurrent GC thread.
 527     size_t hdr_size = oopDesc::header_size();
 528     Copy::fill_to_words(obj + hdr_size, new_gclab_size - hdr_size, badHeapWordVal);
 529 #endif // ASSERT
 530   }
 531   thread->gclab().fill(obj, obj + size, new_gclab_size);
 532   return obj;
 533 }
 534 
 535 HeapWord* ShenandoahHeap::allocate_new_tlab(size_t word_size) {
 536   return allocate_new_tlab(word_size, false);
 537 }
 538 
 539 HeapWord* ShenandoahHeap::allocate_new_gclab(size_t word_size) {
 540   return allocate_new_tlab(word_size, true);
 541 }
 542 
 543 HeapWord* ShenandoahHeap::allocate_new_tlab(size_t word_size, bool evacuating) {
 544   HeapWord* result = allocate_memory(word_size, evacuating);
 545 
 546   if (result != NULL) {
 547     assert(! heap_region_containing(result)->is_in_collection_set(), "Never allocate in dirty region");
 548     _bytes_allocated_since_cm += word_size * HeapWordSize;
 549 
 550     log_develop_trace(gc, tlab)("allocating new tlab of size "SIZE_FORMAT" at addr "PTR_FORMAT, word_size, p2i(result));
 551 
 552   }
 553   return result;
 554 }
 555 
 556 ShenandoahHeap* ShenandoahHeap::heap() {
 557   CollectedHeap* heap = Universe::heap();
 558   assert(heap != NULL, "Unitialized access to ShenandoahHeap::heap()");
 559   assert(heap->kind() == CollectedHeap::ShenandoahHeap, "not a shenandoah heap");
 560   return (ShenandoahHeap*) heap;
 561 }
 562 
 563 ShenandoahHeap* ShenandoahHeap::heap_no_check() {
 564   CollectedHeap* heap = Universe::heap();
 565   return (ShenandoahHeap*) heap;
 566 }
 567 
 568 HeapWord* ShenandoahHeap::allocate_memory(size_t word_size, bool evacuating) {
 569   HeapWord* result = NULL;
 570   result = allocate_memory_work(word_size);
 571 
 572   if (result == NULL) {
 573     bool retry;
 574     do {
 575       // Try to grow the heap.
 576       retry = check_grow_heap();
 577       result = allocate_memory_work(word_size);
 578     } while (retry && result == NULL);
 579   }
 580 
 581   if (result == NULL && ! evacuating) { // Allocation failed, try full-GC, then retry allocation.
 582     log_develop_trace(gc)("Failed to allocate " SIZE_FORMAT " bytes, free regions: ", word_size * HeapWordSize);
 583     collect(GCCause::_allocation_failure);
 584     result = allocate_memory_work(word_size);
 585   }
 586 
 587   // Only update monitoring counters when not calling from a write-barrier.
 588   // Otherwise we might attempt to grab the Service_lock, which we must
 589   // not do when coming from a write-barrier (because the thread might
 590   // already hold the Compile_lock).
 591   if (! evacuating) {
 592     monitoring_support()->update_counters();
 593   }
 594 
 595   log_develop_trace(gc, alloc)("allocate memory chunk of size "SIZE_FORMAT" at addr "PTR_FORMAT " by thread %d ", word_size, p2i(result), Thread::current()->osthread()->thread_id());
 596 
 597   return result;
 598 }
 599 
 600 bool ShenandoahHeap::call_from_write_barrier(bool evacuating) {
 601   return evacuating && Thread::current()->is_Java_thread();
 602 }
 603 
 604 bool ShenandoahHeap::check_grow_heap() {
 605 
 606   assert(_free_regions->max_regions() >= _free_regions->active_regions(), "don't get negative");
 607 
 608   size_t available = _max_regions - _num_regions;
 609   if (available == 0) {
 610     return false; // Don't retry.
 611   }
 612 
 613   jbyte growing = Atomic::cmpxchg(1, &_growing_heap, 0);
 614   if (growing == 0) {
 615     // Only one thread succeeds this, and this one gets
 616     // to grow the heap. All other threads can continue
 617     // to allocate from the reserve.
 618     grow_heap_by(MIN2(available, ShenandoahAllocReserveRegions));
 619 
 620     // Reset it back to 0, so that other threads can take it again.
 621     Atomic::store(0, &_growing_heap);
 622     return true;
 623   } else {
 624     // Let other threads work, then try again.
 625     os::naked_yield();
 626     return true;
 627   }
 628 }
 629 
 630 HeapWord* ShenandoahHeap::allocate_memory_work(size_t word_size) {
 631   if (word_size * HeapWordSize > ShenandoahHeapRegion::RegionSizeBytes) {
 632     return allocate_large_memory(word_size);
 633   }
 634 
 635   // Not enough memory in free region set.
 636   // Coming out of full GC, it is possible that there is not
 637   // free region available, so current_index may not be valid.
 638   if (word_size * HeapWordSize > _free_regions->capacity()) return NULL;
 639 
 640   size_t current_idx = _free_regions->current_index();
 641   ShenandoahHeapRegion* my_current_region = _free_regions->get(current_idx);
 642 
 643   if (my_current_region == NULL) {
 644     return NULL; // No more room to make a new region. OOM.
 645   }
 646   assert(my_current_region != NULL, "should have a region at this point");
 647 
 648 #ifdef ASSERT
 649   if (my_current_region->is_in_collection_set()) {
 650     print_heap_regions();
 651   }
 652 #endif
 653   assert(! my_current_region->is_in_collection_set(), "never get targetted regions in free-lists");
 654   assert(! my_current_region->is_humongous(), "never attempt to allocate from humongous object regions");
 655 
 656   HeapWord* result = my_current_region->par_allocate(word_size);
 657 
 658   while (result == NULL) {
 659     // 2nd attempt. Try next region.
 660     current_idx = _free_regions->par_claim_next(current_idx);
 661     my_current_region = _free_regions->get(current_idx);
 662 
 663     if (my_current_region == NULL) {
 664       return NULL; // No more room to make a new region. OOM.
 665     }
 666     // _free_regions->increase_used(remaining);
 667     assert(my_current_region != NULL, "should have a region at this point");
 668     assert(! my_current_region->is_in_collection_set(), "never get targetted regions in free-lists");
 669     assert(! my_current_region->is_humongous(), "never attempt to allocate from humongous object regions");
 670     result = my_current_region->par_allocate(word_size);
 671   }
 672 
 673   my_current_region->increase_live_data(word_size * HeapWordSize);
 674   increase_used(word_size * HeapWordSize);
 675   _free_regions->increase_used(word_size * HeapWordSize);
 676   return result;
 677 }
 678 
 679 HeapWord* ShenandoahHeap::allocate_large_memory(size_t words) {
 680 
 681   uint required_regions = ShenandoahHumongous::required_regions(words * HeapWordSize);
 682   if (required_regions > _max_regions) return NULL;
 683 
 684   ShenandoahHeapRegion* r = _free_regions->claim_contiguous(required_regions);
 685 
 686   HeapWord* result = NULL;
 687 
 688   if (r != NULL)  {
 689     result = r->bottom();
 690 
 691     log_debug(gc, humongous)("allocating humongous object of size: "SIZE_FORMAT" KB at location "PTR_FORMAT" in start region "SIZE_FORMAT,
 692                              (words * HeapWordSize) / K, p2i(result), r->region_number());
 693   } else {
 694     log_debug(gc, humongous)("allocating humongous object of size: "SIZE_FORMAT" KB at location "PTR_FORMAT" failed",
 695                              (words * HeapWordSize) / K, p2i(result));
 696   }
 697 
 698 
 699   return result;
 700 
 701 }
 702 
 703 HeapWord*  ShenandoahHeap::mem_allocate(size_t size,
 704                                         bool*  gc_overhead_limit_was_exceeded) {
 705 
 706 #ifdef ASSERT
 707   if (ShenandoahVerify && _numAllocs > 1000000) {
 708     _numAllocs = 0;
 709   }
 710   _numAllocs++;
 711 #endif
 712   HeapWord* filler = allocate_memory(BrooksPointer::word_size() + size, false);
 713   HeapWord* result = filler + BrooksPointer::word_size();
 714   if (filler != NULL) {
 715     BrooksPointer::initialize(oop(result));
 716     _bytes_allocated_since_cm += size * HeapWordSize;
 717 
 718     assert(! heap_region_containing(result)->is_in_collection_set(), "never allocate in targetted region");
 719     return result;
 720   } else {
 721     /*
 722     tty->print_cr("Out of memory. Requested number of words: "SIZE_FORMAT" used heap: "INT64_FORMAT", bytes allocated since last CM: "INT64_FORMAT, size, used(), _bytes_allocated_since_cm);
 723     {
 724       print_heap_regions();
 725       tty->print("Printing "SIZE_FORMAT" free regions:\n", _free_regions->count());
 726       _free_regions->print();
 727     }
 728     */
 729     return NULL;
 730   }
 731 }
 732 
 733 class ParallelEvacuateRegionObjectClosure : public ObjectClosure {
 734 private:
 735   ShenandoahHeap* _heap;
 736   Thread* _thread;
 737   public:
 738   ParallelEvacuateRegionObjectClosure(ShenandoahHeap* heap) :
 739     _heap(heap), _thread(Thread::current()) {
 740   }
 741 
 742   void do_object(oop p) {
 743 
 744     log_develop_trace(gc, compaction)("Calling ParallelEvacuateRegionObjectClosure on "PTR_FORMAT" of size %d\n", p2i((HeapWord*) p), p->size());
 745 
 746     assert(_heap->is_marked_prev(p), "expect only marked objects");
 747     if (oopDesc::unsafe_equals(p, ShenandoahBarrierSet::resolve_oop_static_not_null(p))) {
 748       _heap->evacuate_object(p, _thread);
 749     }
 750   }
 751 };
 752 
 753 #ifdef ASSERT
 754 class VerifyEvacuatedObjectClosure : public ObjectClosure {
 755 
 756 public:
 757 
 758   void do_object(oop p) {
 759     if (ShenandoahHeap::heap()->is_marked_prev(p)) {
 760       oop p_prime = oopDesc::bs()->read_barrier(p);
 761       assert(! oopDesc::unsafe_equals(p, p_prime), "Should point to evacuated copy");
 762       if (p->klass() != p_prime->klass()) {
 763         tty->print_cr("copy has different class than original:");
 764         p->klass()->print_on(tty);
 765         p_prime->klass()->print_on(tty);
 766       }
 767       assert(p->klass() == p_prime->klass(), "Should have the same class p: "PTR_FORMAT", p_prime: "PTR_FORMAT, p2i((HeapWord*) p), p2i((HeapWord*) p_prime));
 768       //      assert(p->mark() == p_prime->mark(), "Should have the same mark");
 769       assert(p->size() == p_prime->size(), "Should be the same size");
 770       assert(oopDesc::unsafe_equals(p_prime, oopDesc::bs()->read_barrier(p_prime)), "One forward once");
 771     }
 772   }
 773 };
 774 
 775 void ShenandoahHeap::verify_evacuated_region(ShenandoahHeapRegion* from_region) {
 776   VerifyEvacuatedObjectClosure verify_evacuation;
 777   marked_next_object_iterate(from_region, &verify_evacuation);
 778 }
 779 #endif
 780 
 781 void ShenandoahHeap::parallel_evacuate_region(ShenandoahHeapRegion* from_region) {
 782 
 783   assert(from_region->get_live_data() > 0, "all-garbage regions are reclaimed earlier");
 784 
 785   ParallelEvacuateRegionObjectClosure evacuate_region(this);
 786 
 787   marked_prev_object_iterate(from_region, &evacuate_region);
 788 
 789 #ifdef ASSERT
 790   if (ShenandoahVerify && ! cancelled_concgc()) {
 791     verify_evacuated_region(from_region);
 792   }
 793 #endif
 794 }
 795 
 796 class ParallelEvacuationTask : public AbstractGangTask {
 797 private:
 798   ShenandoahHeap* _sh;
 799   ShenandoahCollectionSet* _cs;
 800 
 801 public:
 802   ParallelEvacuationTask(ShenandoahHeap* sh,
 803                          ShenandoahCollectionSet* cs) :
 804     AbstractGangTask("Parallel Evacuation Task"),
 805     _cs(cs),
 806     _sh(sh) {}
 807 
 808   void work(uint worker_id) {
 809 
 810     ShenandoahHeapRegion* from_hr = _cs->claim_next();
 811 
 812     while (from_hr != NULL) {
 813       log_develop_trace(gc, region)("Thread "INT32_FORMAT" claimed Heap Region "SIZE_FORMAT,
 814                                     worker_id,
 815                                     from_hr->region_number());
 816 
 817       assert(from_hr->get_live_data() > 0, "all-garbage regions are reclaimed early");
 818       _sh->parallel_evacuate_region(from_hr);
 819 
 820       if (_sh->cancelled_concgc()) {
 821         log_develop_trace(gc, region)("Cancelled concgc while evacuating region " SIZE_FORMAT "\n", from_hr->region_number());
 822         break;
 823       }
 824       from_hr = _cs->claim_next();
 825     }
 826   }
 827 };
 828 
 829 class RecycleDirtyRegionsClosure: public ShenandoahHeapRegionClosure {
 830 private:
 831   ShenandoahHeap* _heap;
 832   size_t _bytes_reclaimed;
 833 public:
 834   RecycleDirtyRegionsClosure() : _heap(ShenandoahHeap::heap()) {}
 835 
 836   bool doHeapRegion(ShenandoahHeapRegion* r) {
 837 
 838     if (_heap->cancelled_concgc()) {
 839       // The aborted marking bitmap needs to be cleared at the end of cycle.
 840       // Setup the top-marker for this.
 841       r->set_top_prev_mark_bitmap(r->top_at_mark_start());
 842 
 843       return false;
 844     }
 845 
 846     r->swap_top_at_mark_start();
 847 
 848     if (r->is_in_collection_set()) {
 849       log_develop_trace(gc, region)("Recycling region " SIZE_FORMAT ":", r->region_number());
 850       _heap->decrease_used(r->used());
 851       _bytes_reclaimed += r->used();
 852       r->recycle();
 853       _heap->free_regions()->add_region(r);
 854     }
 855 
 856     return false;
 857   }
 858   size_t bytes_reclaimed() { return _bytes_reclaimed;}
 859   void clear_bytes_reclaimed() {_bytes_reclaimed = 0;}
 860 };
 861 
 862 void ShenandoahHeap::recycle_dirty_regions() {
 863   RecycleDirtyRegionsClosure cl;
 864   cl.clear_bytes_reclaimed();
 865 
 866   heap_region_iterate(&cl);
 867 
 868   _shenandoah_policy->record_bytes_reclaimed(cl.bytes_reclaimed());
 869   if (! cancelled_concgc()) {
 870     clear_cset_fast_test();
 871   }
 872 }
 873 
 874 ShenandoahFreeSet* ShenandoahHeap::free_regions() {
 875   return _free_regions;
 876 }
 877 
 878 void ShenandoahHeap::print_heap_regions(outputStream* st) const {
 879   _ordered_regions->print(st);
 880 }
 881 
 882 class PrintAllRefsOopClosure: public ExtendedOopClosure {
 883 private:
 884   int _index;
 885   const char* _prefix;
 886 
 887 public:
 888   PrintAllRefsOopClosure(const char* prefix) : _index(0), _prefix(prefix) {}
 889 
 890 private:
 891   template <class T>
 892   inline void do_oop_work(T* p) {
 893     oop o = oopDesc::load_decode_heap_oop(p);
 894     if (o != NULL) {
 895       if (ShenandoahHeap::heap()->is_in(o) && o->is_oop()) {
 896         tty->print_cr("%s "INT32_FORMAT" ("PTR_FORMAT")-> "PTR_FORMAT" (marked: %s) (%s "PTR_FORMAT")", _prefix, _index, p2i(p), p2i((HeapWord*) o), BOOL_TO_STR(ShenandoahHeap::heap()->is_marked_current(o)), o->klass()->internal_name(), p2i(o->klass()));
 897       } else {
 898         //        tty->print_cr("%s "INT32_FORMAT" ("PTR_FORMAT" dirty: %s) -> "PTR_FORMAT" (not in heap, possibly corrupted or dirty (%s))", _prefix, _index, p2i(p), BOOL_TO_STR(ShenandoahHeap::heap()->heap_region_containing(p)->is_in_collection_set()), p2i((HeapWord*) o), BOOL_TO_STR(ShenandoahHeap::heap()->heap_region_containing(o)->is_in_collection_set()));
 899         tty->print_cr("%s "INT32_FORMAT" ("PTR_FORMAT" dirty -> "PTR_FORMAT" (not in heap, possibly corrupted or dirty)", _prefix, _index, p2i(p), p2i((HeapWord*) o));
 900       }
 901     } else {
 902       tty->print_cr("%s "INT32_FORMAT" ("PTR_FORMAT") -> "PTR_FORMAT, _prefix, _index, p2i(p), p2i((HeapWord*) o));
 903     }
 904     _index++;
 905   }
 906 
 907 public:
 908   void do_oop(oop* p) {
 909     do_oop_work(p);
 910   }
 911 
 912   void do_oop(narrowOop* p) {
 913     do_oop_work(p);
 914   }
 915 
 916 };
 917 
 918 class PrintAllRefsObjectClosure : public ObjectClosure {
 919   const char* _prefix;
 920 
 921 public:
 922   PrintAllRefsObjectClosure(const char* prefix) : _prefix(prefix) {}
 923 
 924   void do_object(oop p) {
 925     if (ShenandoahHeap::heap()->is_in(p)) {
 926         tty->print_cr("%s object "PTR_FORMAT" (marked: %s) (%s "PTR_FORMAT") refers to:", _prefix, p2i((HeapWord*) p), BOOL_TO_STR(ShenandoahHeap::heap()->is_marked_current(p)), p->klass()->internal_name(), p2i(p->klass()));
 927         PrintAllRefsOopClosure cl(_prefix);
 928         p->oop_iterate(&cl);
 929       }
 930   }
 931 };
 932 
 933 void ShenandoahHeap::print_all_refs(const char* prefix) {
 934   tty->print_cr("printing all references in the heap");
 935   tty->print_cr("root references:");
 936 
 937   ensure_parsability(false);
 938 
 939   PrintAllRefsOopClosure cl(prefix);
 940   roots_iterate(&cl);
 941 
 942   tty->print_cr("heap references:");
 943   PrintAllRefsObjectClosure cl2(prefix);
 944   object_iterate(&cl2);
 945 }
 946 
 947 class VerifyAfterMarkingOopClosure: public ExtendedOopClosure {
 948 private:
 949   ShenandoahHeap*  _heap;
 950 
 951 public:
 952   VerifyAfterMarkingOopClosure() :
 953     _heap(ShenandoahHeap::heap()) { }
 954 
 955 private:
 956   template <class T>
 957   inline void do_oop_work(T* p) {
 958     oop o = oopDesc::load_decode_heap_oop(p);
 959     if (o != NULL) {
 960       if (! _heap->is_marked_prev(o)) {
 961         _heap->print_heap_regions();
 962         _heap->print_all_refs("post-mark");
 963         tty->print_cr("oop not marked, although referrer is marked: "PTR_FORMAT": in_heap: %s, is_marked: %s",
 964                       p2i((HeapWord*) o), BOOL_TO_STR(_heap->is_in(o)), BOOL_TO_STR(_heap->is_marked_prev(o)));
 965         _heap->print_heap_locations((HeapWord*) o, (HeapWord*) o + o->size());
 966 
 967         tty->print_cr("oop class: %s", o->klass()->internal_name());
 968         if (_heap->is_in(p)) {
 969           oop referrer = oop(_heap->heap_region_containing(p)->block_start_const(p));
 970           tty->print_cr("Referrer starts at addr "PTR_FORMAT, p2i((HeapWord*) referrer));
 971           referrer->print();
 972           _heap->print_heap_locations((HeapWord*) referrer, (HeapWord*) referrer + referrer->size());
 973         }
 974         tty->print_cr("heap region containing object:");
 975         _heap->heap_region_containing(o)->print();
 976         tty->print_cr("heap region containing referrer:");
 977         _heap->heap_region_containing(p)->print();
 978         tty->print_cr("heap region containing forwardee:");
 979         _heap->heap_region_containing(oopDesc::bs()->read_barrier(o))->print();
 980       }
 981       assert(o->is_oop(), "oop must be an oop");
 982       assert(Metaspace::contains(o->klass()), "klass pointer must go to metaspace");
 983       if (! oopDesc::unsafe_equals(o, oopDesc::bs()->read_barrier(o))) {
 984         tty->print_cr("oops has forwardee: p: "PTR_FORMAT" (%s), o = "PTR_FORMAT" (%s), new-o: "PTR_FORMAT" (%s)", p2i(p), BOOL_TO_STR(_heap->heap_region_containing(p)->is_in_collection_set()), p2i((HeapWord*) o),  BOOL_TO_STR(_heap->heap_region_containing(o)->is_in_collection_set()), p2i((HeapWord*) oopDesc::bs()->read_barrier(o)), BOOL_TO_STR(_heap->heap_region_containing(oopDesc::bs()->read_barrier(o))->is_in_collection_set()));
 985         tty->print_cr("oop class: %s", o->klass()->internal_name());
 986       }
 987       assert(oopDesc::unsafe_equals(o, oopDesc::bs()->read_barrier(o)), "oops must not be forwarded");
 988       assert(! _heap->heap_region_containing(o)->is_in_collection_set(), "references must not point to dirty heap regions");
 989       assert(_heap->is_marked_prev(o), "live oops must be marked current");
 990     }
 991   }
 992 
 993 public:
 994   void do_oop(oop* p) {
 995     do_oop_work(p);
 996   }
 997 
 998   void do_oop(narrowOop* p) {
 999     do_oop_work(p);
1000   }
1001 
1002 };
1003 
1004 class IterateMarkedCurrentObjectsClosure: public ObjectClosure {
1005 private:
1006   ShenandoahHeap* _heap;
1007   ExtendedOopClosure* _cl;
1008 public:
1009   IterateMarkedCurrentObjectsClosure(ExtendedOopClosure* cl) :
1010     _heap(ShenandoahHeap::heap()), _cl(cl) {};
1011 
1012   void do_object(oop p) {
1013     if (_heap->is_marked_current(p)) {
1014       p->oop_iterate(_cl);
1015     }
1016   }
1017 
1018 };
1019 
1020 void ShenandoahHeap::verify_heap_after_marking() {
1021 
1022   verify_heap_size_consistency();
1023 
1024   log_trace(gc)("verifying heap after marking");
1025 
1026   VerifyAfterMarkingOopClosure cl;
1027   roots_iterate(&cl);
1028 
1029   IterateMarkedCurrentObjectsClosure marked_oops(&cl);
1030   object_iterate(&marked_oops);
1031 }
1032 
1033 
1034 void ShenandoahHeap::reclaim_humongous_region_at(ShenandoahHeapRegion* r) {
1035   assert(r->is_humongous_start(), "reclaim regions starting with the first one");
1036 
1037   oop humongous_obj = oop(r->bottom() + BrooksPointer::word_size());
1038   size_t size = humongous_obj->size() + BrooksPointer::word_size();
1039   uint required_regions = ShenandoahHumongous::required_regions(size * HeapWordSize);
1040   uint index = r->region_number();
1041 
1042 
1043   assert(r->get_live_data() == 0, "liveness must be zero");
1044 
1045   for(size_t i = 0; i < required_regions; i++) {
1046 
1047     ShenandoahHeapRegion* region = _ordered_regions->get(index++);
1048 
1049     assert((region->is_humongous_start() || region->is_humongous_continuation()),
1050            "expect correct humongous start or continuation");
1051 
1052     if (log_is_enabled(Debug, gc, humongous)) {
1053       log_debug(gc, humongous)("reclaiming "UINT32_FORMAT" humongous regions for object of size: "SIZE_FORMAT" words", required_regions, size);
1054       ResourceMark rm;
1055       outputStream* out = Log(gc, humongous)::debug_stream();
1056       region->print_on(out);
1057     }
1058 
1059     region->reset();
1060     ShenandoahHeap::heap()->decrease_used(ShenandoahHeapRegion::RegionSizeBytes);
1061   }
1062 }
1063 
1064 class ShenandoahReclaimHumongousRegionsClosure : public ShenandoahHeapRegionClosure {
1065 
1066   bool doHeapRegion(ShenandoahHeapRegion* r) {
1067     ShenandoahHeap* heap = ShenandoahHeap::heap();
1068 
1069     if (r->is_humongous_start()) {
1070       oop humongous_obj = oop(r->bottom() + BrooksPointer::word_size());
1071       if (! heap->is_marked_prev(humongous_obj)) {
1072 
1073         heap->reclaim_humongous_region_at(r);
1074       }
1075     }
1076     return false;
1077   }
1078 };
1079 
1080 #ifdef ASSERT
1081 class CheckCollectionSetClosure: public ShenandoahHeapRegionClosure {
1082   bool doHeapRegion(ShenandoahHeapRegion* r) {
1083     assert(!r->is_in_collection_set(), "Should have been cleared by now");
1084     return false;
1085   }
1086 };
1087 #endif
1088 
1089 void ShenandoahHeap::prepare_for_concurrent_evacuation() {
1090   assert(_ordered_regions->get(0)->region_number() == 0, "FIXME CHF. FIXME CHF!");
1091 
1092   log_develop_trace(gc)("Thread %d started prepare_for_concurrent_evacuation", Thread::current()->osthread()->thread_id());
1093 
1094   if (!cancelled_concgc()) {
1095 
1096     recycle_dirty_regions();
1097 
1098     ensure_parsability(true);
1099 
1100 #ifdef ASSERT
1101     if (ShenandoahVerify) {
1102       verify_heap_after_marking();
1103     }
1104 #endif
1105 
1106     // NOTE: This needs to be done during a stop the world pause, because
1107     // putting regions into the collection set concurrently with Java threads
1108     // will create a race. In particular, acmp could fail because when we
1109     // resolve the first operand, the containing region might not yet be in
1110     // the collection set, and thus return the original oop. When the 2nd
1111     // operand gets resolved, the region could be in the collection set
1112     // and the oop gets evacuated. If both operands have originally been
1113     // the same, we get false negatives.
1114 
1115 
1116     _collection_set->clear();
1117     _free_regions->clear();
1118 
1119     ShenandoahReclaimHumongousRegionsClosure reclaim;
1120     heap_region_iterate(&reclaim);
1121 
1122     // _ordered_regions->print();
1123 #ifdef ASSERT
1124     CheckCollectionSetClosure ccsc;
1125     _ordered_regions->heap_region_iterate(&ccsc);
1126 #endif
1127 
1128     _shenandoah_policy->choose_collection_set(_collection_set);
1129 
1130     _shenandoah_policy->choose_free_set(_free_regions);
1131 
1132     /*
1133     tty->print("Sorted free regions\n");
1134     _free_regions->print();
1135     */
1136 
1137     if (_collection_set->count() == 0)
1138       cancel_concgc();
1139 
1140     _bytes_allocated_since_cm = 0;
1141 
1142     Universe::update_heap_info_at_gc();
1143   }
1144 }
1145 
1146 
1147 class RetireTLABClosure : public ThreadClosure {
1148 private:
1149   bool _retire;
1150 
1151 public:
1152   RetireTLABClosure(bool retire) : _retire(retire) {
1153   }
1154 
1155   void do_thread(Thread* thread) {
1156     thread->gclab().make_parsable(_retire);
1157   }
1158 };
1159 
1160 void ShenandoahHeap::ensure_parsability(bool retire_tlabs) {
1161   if (UseTLAB) {
1162     CollectedHeap::ensure_parsability(retire_tlabs);
1163 
1164   RetireTLABClosure cl(retire_tlabs);
1165   for (JavaThread *thread = Threads::first(); thread != NULL; thread = thread->next()) {
1166     cl.do_thread(thread);
1167   }
1168   gc_threads_do(&cl);
1169   }
1170 }
1171 
1172 class ShenandoahEvacuateUpdateRootsClosure: public ExtendedOopClosure {
1173 private:
1174   ShenandoahHeap* _heap;
1175   Thread* _thread;
1176 public:
1177   ShenandoahEvacuateUpdateRootsClosure() :
1178     _heap(ShenandoahHeap::heap()), _thread(Thread::current()) {
1179   }
1180 
1181 private:
1182   template <class T>
1183   void do_oop_work(T* p) {
1184     assert(_heap->is_evacuation_in_progress(), "Only do this when evacuation is in progress");
1185 
1186     T o = oopDesc::load_heap_oop(p);
1187     if (! oopDesc::is_null(o)) {
1188       oop obj = oopDesc::decode_heap_oop_not_null(o);
1189       if (_heap->in_cset_fast_test((HeapWord*) obj)) {
1190         assert(_heap->is_marked_prev(obj), "only evacuate marked objects %d %d", _heap->is_marked_prev(obj), _heap->is_marked_prev(ShenandoahBarrierSet::resolve_oop_static_not_null(obj)));
1191         oop resolved = ShenandoahBarrierSet::resolve_oop_static_not_null(obj);
1192         if (oopDesc::unsafe_equals(resolved, obj)) {
1193           resolved = _heap->evacuate_object(obj, _thread);
1194         }
1195         oopDesc::encode_store_heap_oop(p, resolved);
1196       }
1197     }
1198 #ifdef ASSERT
1199     else {
1200       // tty->print_cr("not updating root at: "PTR_FORMAT" with object: "PTR_FORMAT", is_in_heap: %s, is_in_cset: %s, is_marked: %s", p2i(p), p2i((HeapWord*) obj), BOOL_TO_STR(_heap->is_in(obj)), BOOL_TO_STR(_heap->in_cset_fast_test(obj)), BOOL_TO_STR(_heap->is_marked_current(obj)));
1201     }
1202 #endif
1203   }
1204 
1205 public:
1206   void do_oop(oop* p) {
1207     do_oop_work(p);
1208   }
1209   void do_oop(narrowOop* p) {
1210     do_oop_work(p);
1211   }
1212 };
1213 
1214 class ShenandoahEvacuateUpdateRootsTask : public AbstractGangTask {
1215   ShenandoahRootEvacuator* _rp;
1216 public:
1217 
1218   ShenandoahEvacuateUpdateRootsTask(ShenandoahRootEvacuator* rp) :
1219     AbstractGangTask("Shenandoah evacuate and update roots"),
1220     _rp(rp)
1221   {
1222     // Nothing else to do.
1223   }
1224 
1225   void work(uint worker_id) {
1226     ShenandoahEvacuateUpdateRootsClosure cl;
1227     MarkingCodeBlobClosure blobsCl(&cl, CodeBlobToOopClosure::FixRelocations);
1228 
1229     _rp->process_evacuate_roots(&cl, &blobsCl, worker_id);
1230   }
1231 };
1232 
1233 void ShenandoahHeap::evacuate_and_update_roots() {
1234 
1235   COMPILER2_PRESENT(DerivedPointerTable::clear());
1236 
1237   if (ShenandoahVerifyReadsToFromSpace) {
1238     set_from_region_protection(false);
1239   }
1240 
1241   assert(SafepointSynchronize::is_at_safepoint(), "Only iterate roots while world is stopped");
1242   ClassLoaderDataGraph::clear_claimed_marks();
1243 
1244   {
1245     ShenandoahRootEvacuator rp(this, _max_parallel_workers, ShenandoahCollectorPolicy::evac_thread_roots);
1246     ShenandoahEvacuateUpdateRootsTask roots_task(&rp);
1247     workers()->run_task(&roots_task);
1248   }
1249 
1250   if (ShenandoahVerifyReadsToFromSpace) {
1251     set_from_region_protection(true);
1252   }
1253 
1254   COMPILER2_PRESENT(DerivedPointerTable::update_pointers());
1255 
1256 }
1257 
1258 
1259 void ShenandoahHeap::do_evacuation() {
1260 
1261   parallel_evacuate();
1262 
1263   if (ShenandoahVerify && ! cancelled_concgc()) {
1264     VM_ShenandoahVerifyHeapAfterEvacuation verify_after_evacuation;
1265     if (Thread::current()->is_VM_thread()) {
1266       verify_after_evacuation.doit();
1267     } else {
1268       VMThread::execute(&verify_after_evacuation);
1269     }
1270   }
1271 
1272 }
1273 
1274 void ShenandoahHeap::parallel_evacuate() {
1275 
1276   if (! cancelled_concgc()) {
1277 
1278     log_develop_trace(gc)("starting parallel_evacuate");
1279 
1280     _shenandoah_policy->record_phase_start(ShenandoahCollectorPolicy::conc_evac);
1281 
1282     if (log_is_enabled(Trace, gc, region)) {
1283       ResourceMark rm;
1284       outputStream* out = Log(gc, region)::trace_stream();
1285       out->print("Printing all available regions");
1286       print_heap_regions(out);
1287     }
1288 
1289     if (log_is_enabled(Trace, gc, cset)) {
1290       ResourceMark rm;
1291       outputStream* out = Log(gc, cset)::trace_stream();
1292       out->print("Printing collection set which contains "SIZE_FORMAT" regions:\n", _collection_set->count());
1293       _collection_set->print(out);
1294 
1295       out->print("Printing free set which contains "SIZE_FORMAT" regions:\n", _free_regions->count());
1296       _free_regions->print(out);
1297     }
1298 
1299     ParallelEvacuationTask evacuationTask = ParallelEvacuationTask(this, _collection_set);
1300 
1301     conc_workers()->run_task(&evacuationTask);
1302 
1303     if (log_is_enabled(Trace, gc, cset)) {
1304       ResourceMark rm;
1305       outputStream* out = Log(gc, cset)::trace_stream();
1306       out->print("Printing postgc collection set which contains "SIZE_FORMAT" regions:\n",
1307                  _collection_set->count());
1308 
1309       _collection_set->print(out);
1310 
1311       out->print("Printing postgc free regions which contain "SIZE_FORMAT" free regions:\n",
1312                  _free_regions->count());
1313       _free_regions->print(out);
1314 
1315     }
1316 
1317     if (log_is_enabled(Trace, gc, region)) {
1318       ResourceMark rm;
1319       outputStream* out = Log(gc, region)::trace_stream();
1320       out->print_cr("all regions after evacuation:");
1321       print_heap_regions(out);
1322     }
1323 
1324     _shenandoah_policy->record_phase_end(ShenandoahCollectorPolicy::conc_evac);
1325 
1326     if (cancelled_concgc()) {
1327       // tty->print("GOTCHA: by thread %d", Thread::current()->osthread()->thread_id());
1328       concurrent_thread()->schedule_full_gc();
1329       // tty->print("PostGotcha: by thread %d FullGC should be scheduled\n",
1330       //            Thread::current()->osthread()->thread_id());
1331     }
1332   }
1333 }
1334 
1335 class VerifyEvacuationClosure: public ExtendedOopClosure {
1336 private:
1337   ShenandoahHeap*  _heap;
1338   ShenandoahHeapRegion* _from_region;
1339 
1340 public:
1341   VerifyEvacuationClosure(ShenandoahHeapRegion* from_region) :
1342     _heap(ShenandoahHeap::heap()), _from_region(from_region) { }
1343 private:
1344   template <class T>
1345   inline void do_oop_work(T* p) {
1346     oop heap_oop = oopDesc::load_decode_heap_oop(p);
1347     if (! oopDesc::is_null(heap_oop)) {
1348       guarantee(! _from_region->is_in(heap_oop), "no references to from-region allowed after evacuation: "PTR_FORMAT, p2i((HeapWord*) heap_oop));
1349     }
1350   }
1351 
1352 public:
1353   void do_oop(oop* p)       {
1354     do_oop_work(p);
1355   }
1356 
1357   void do_oop(narrowOop* p) {
1358     do_oop_work(p);
1359   }
1360 
1361 };
1362 
1363 void ShenandoahHeap::roots_iterate(OopClosure* cl) {
1364 
1365   assert(SafepointSynchronize::is_at_safepoint(), "Only iterate roots while world is stopped");
1366 
1367   CodeBlobToOopClosure blobsCl(cl, false);
1368   CLDToOopClosure cldCl(cl);
1369 
1370   ClassLoaderDataGraph::clear_claimed_marks();
1371 
1372   ShenandoahRootProcessor rp(this, 1);
1373   rp.process_all_roots(cl, NULL, &cldCl, &blobsCl, 0);
1374 }
1375 
1376 void ShenandoahHeap::verify_evacuation(ShenandoahHeapRegion* from_region) {
1377 
1378   VerifyEvacuationClosure rootsCl(from_region);
1379   roots_iterate(&rootsCl);
1380 
1381 }
1382 
1383 bool ShenandoahHeap::supports_tlab_allocation() const {
1384   return true;
1385 }
1386 
1387 
1388 size_t  ShenandoahHeap::unsafe_max_tlab_alloc(Thread *thread) const {
1389   size_t idx = _free_regions->current_index();
1390   ShenandoahHeapRegion* current = _free_regions->get(idx);
1391   if (current == NULL)
1392     return 0;
1393   else if (current->free() > MinTLABSize) {
1394     return current->free();
1395   } else {
1396     return MinTLABSize;
1397   }
1398 }
1399 
1400 size_t ShenandoahHeap::max_tlab_size() const {
1401   return ShenandoahHeapRegion::RegionSizeBytes;
1402 }
1403 
1404 class ResizeGCLABClosure : public ThreadClosure {
1405 public:
1406   void do_thread(Thread* thread) {
1407     thread->gclab().resize();
1408   }
1409 };
1410 
1411 void ShenandoahHeap::resize_all_tlabs() {
1412   CollectedHeap::resize_all_tlabs();
1413 
1414   ResizeGCLABClosure cl;
1415   for (JavaThread *thread = Threads::first(); thread != NULL; thread = thread->next()) {
1416     cl.do_thread(thread);
1417   }
1418   gc_threads_do(&cl);
1419 
1420 }
1421 
1422 class AccumulateStatisticsGCLABClosure : public ThreadClosure {
1423 public:
1424   void do_thread(Thread* thread) {
1425     thread->gclab().accumulate_statistics();
1426     thread->gclab().initialize_statistics();
1427   }
1428 };
1429 
1430 void ShenandoahHeap::accumulate_statistics_all_gclabs() {
1431 
1432   AccumulateStatisticsGCLABClosure cl;
1433   for (JavaThread *thread = Threads::first(); thread != NULL; thread = thread->next()) {
1434     cl.do_thread(thread);
1435   }
1436   gc_threads_do(&cl);
1437 }
1438 
1439 bool  ShenandoahHeap::can_elide_tlab_store_barriers() const {
1440   return true;
1441 }
1442 
1443 oop ShenandoahHeap::new_store_pre_barrier(JavaThread* thread, oop new_obj) {
1444   // Overridden to do nothing.
1445   return new_obj;
1446 }
1447 
1448 bool  ShenandoahHeap::can_elide_initializing_store_barrier(oop new_obj) {
1449   return true;
1450 }
1451 
1452 bool ShenandoahHeap::card_mark_must_follow_store() const {
1453   return false;
1454 }
1455 
1456 void ShenandoahHeap::collect(GCCause::Cause cause) {
1457   assert(cause != GCCause::_gc_locker, "no JNI critical callback");
1458   if (GCCause::is_user_requested_gc(cause)) {
1459     if (! DisableExplicitGC) {
1460       cancel_concgc();
1461       _concurrent_gc_thread->do_full_gc(cause);
1462     }
1463   } else if (cause == GCCause::_allocation_failure) {
1464 
1465     cancel_concgc();
1466     collector_policy()->set_should_clear_all_soft_refs(true);
1467       _concurrent_gc_thread->do_full_gc(cause);
1468 
1469   }
1470 }
1471 
1472 void ShenandoahHeap::do_full_collection(bool clear_all_soft_refs) {
1473   //assert(false, "Shouldn't need to do full collections");
1474 }
1475 
1476 AdaptiveSizePolicy* ShenandoahHeap::size_policy() {
1477   Unimplemented();
1478   return NULL;
1479 
1480 }
1481 
1482 CollectorPolicy* ShenandoahHeap::collector_policy() const {
1483   return _shenandoah_policy;
1484 }
1485 
1486 
1487 HeapWord* ShenandoahHeap::block_start(const void* addr) const {
1488   Space* sp = heap_region_containing(addr);
1489   if (sp != NULL) {
1490     return sp->block_start(addr);
1491   }
1492   return NULL;
1493 }
1494 
1495 size_t ShenandoahHeap::block_size(const HeapWord* addr) const {
1496   Space* sp = heap_region_containing(addr);
1497   assert(sp != NULL, "block_size of address outside of heap");
1498   return sp->block_size(addr);
1499 }
1500 
1501 bool ShenandoahHeap::block_is_obj(const HeapWord* addr) const {
1502   Space* sp = heap_region_containing(addr);
1503   return sp->block_is_obj(addr);
1504 }
1505 
1506 jlong ShenandoahHeap::millis_since_last_gc() {
1507   return 0;
1508 }
1509 
1510 void ShenandoahHeap::prepare_for_verify() {
1511   if (SafepointSynchronize::is_at_safepoint() || ! UseTLAB) {
1512     ensure_parsability(false);
1513   }
1514 }
1515 
1516 void ShenandoahHeap::print_gc_threads_on(outputStream* st) const {
1517   workers()->print_worker_threads_on(st);
1518   conc_workers()->print_worker_threads_on(st);
1519 }
1520 
1521 void ShenandoahHeap::gc_threads_do(ThreadClosure* tcl) const {
1522   workers()->threads_do(tcl);
1523   conc_workers()->threads_do(tcl);
1524 }
1525 
1526 void ShenandoahHeap::print_tracing_info() const {
1527   if (log_is_enabled(Info, gc, stats)) {
1528     ResourceMark rm;
1529     outputStream* out = Log(gc, stats)::info_stream();
1530     _shenandoah_policy->print_tracing_info(out);
1531   }
1532 }
1533 
1534 class ShenandoahVerifyRootsClosure: public ExtendedOopClosure {
1535 private:
1536   ShenandoahHeap*  _heap;
1537   VerifyOption     _vo;
1538   bool             _failures;
1539 public:
1540   // _vo == UsePrevMarking -> use "prev" marking information,
1541   // _vo == UseNextMarking -> use "next" marking information,
1542   // _vo == UseMarkWord    -> use mark word from object header.
1543   ShenandoahVerifyRootsClosure(VerifyOption vo) :
1544     _heap(ShenandoahHeap::heap()),
1545     _vo(vo),
1546     _failures(false) { }
1547 
1548   bool failures() { return _failures; }
1549 
1550 private:
1551   template <class T>
1552   inline void do_oop_work(T* p) {
1553     oop obj = oopDesc::load_decode_heap_oop(p);
1554     if (! oopDesc::is_null(obj) && ! obj->is_oop()) {
1555       { // Just for debugging.
1556         tty->print_cr("Root location "PTR_FORMAT
1557                       "verified "PTR_FORMAT, p2i(p), p2i((void*) obj));
1558         //      obj->print_on(tty);
1559       }
1560     }
1561     guarantee(obj->is_oop_or_null(), "is oop or null");
1562   }
1563 
1564 public:
1565   void do_oop(oop* p)       {
1566     do_oop_work(p);
1567   }
1568 
1569   void do_oop(narrowOop* p) {
1570     do_oop_work(p);
1571   }
1572 
1573 };
1574 
1575 class ShenandoahVerifyHeapClosure: public ObjectClosure {
1576 private:
1577   ShenandoahVerifyRootsClosure _rootsCl;
1578 public:
1579   ShenandoahVerifyHeapClosure(ShenandoahVerifyRootsClosure rc) :
1580     _rootsCl(rc) {};
1581 
1582   void do_object(oop p) {
1583     _rootsCl.do_oop(&p);
1584   }
1585 };
1586 
1587 class ShenandoahVerifyKlassClosure: public KlassClosure {
1588   OopClosure *_oop_closure;
1589  public:
1590   ShenandoahVerifyKlassClosure(OopClosure* cl) : _oop_closure(cl) {}
1591   void do_klass(Klass* k) {
1592     k->oops_do(_oop_closure);
1593   }
1594 };
1595 
1596 void ShenandoahHeap::verify(VerifyOption vo) {
1597   if (SafepointSynchronize::is_at_safepoint() || ! UseTLAB) {
1598 
1599     ShenandoahVerifyRootsClosure rootsCl(vo);
1600 
1601     assert(Thread::current()->is_VM_thread(),
1602            "Expected to be executed serially by the VM thread at this point");
1603 
1604     roots_iterate(&rootsCl);
1605 
1606     bool failures = rootsCl.failures();
1607     log_trace(gc)("verify failures: %s", BOOL_TO_STR(failures));
1608 
1609     ShenandoahVerifyHeapClosure heapCl(rootsCl);
1610 
1611     object_iterate(&heapCl);
1612     // TODO: Implement rest of it.
1613 #ifdef ASSERT_DISABLED
1614     verify_live();
1615 #endif
1616   } else {
1617     tty->print("(SKIPPING roots, heapRegions, remset) ");
1618   }
1619 }
1620 size_t ShenandoahHeap::tlab_capacity(Thread *thr) const {
1621   return _free_regions->capacity();
1622 }
1623 
1624 class ShenandoahIterateObjectClosureRegionClosure: public ShenandoahHeapRegionClosure {
1625   ObjectClosure* _cl;
1626 public:
1627   ShenandoahIterateObjectClosureRegionClosure(ObjectClosure* cl) : _cl(cl) {}
1628   bool doHeapRegion(ShenandoahHeapRegion* r) {
1629     ShenandoahHeap::heap()->marked_prev_object_iterate(r, _cl);
1630     return false;
1631   }
1632 };
1633 
1634 void ShenandoahHeap::object_iterate(ObjectClosure* cl) {
1635   ShenandoahIterateObjectClosureRegionClosure blk(cl);
1636   heap_region_iterate(&blk, false, true);
1637 }
1638 
1639 void ShenandoahHeap::safe_object_iterate(ObjectClosure* cl) {
1640   Unimplemented();
1641 }
1642 
1643 // Apply blk->doHeapRegion() on all committed regions in address order,
1644 // terminating the iteration early if doHeapRegion() returns true.
1645 void ShenandoahHeap::heap_region_iterate(ShenandoahHeapRegionClosure* blk, bool skip_dirty_regions, bool skip_humongous_continuation) const {
1646   for (size_t i = 0; i < _num_regions; i++) {
1647     ShenandoahHeapRegion* current  = _ordered_regions->get(i);
1648     if (skip_humongous_continuation && current->is_humongous_continuation()) {
1649       continue;
1650     }
1651     if (skip_dirty_regions && current->is_in_collection_set()) {
1652       continue;
1653     }
1654     if (blk->doHeapRegion(current)) {
1655       return;
1656     }
1657   }
1658 }
1659 
1660 class ClearLivenessClosure : public ShenandoahHeapRegionClosure {
1661   ShenandoahHeap* sh;
1662 public:
1663   ClearLivenessClosure(ShenandoahHeap* heap) : sh(heap) { }
1664 
1665   bool doHeapRegion(ShenandoahHeapRegion* r) {
1666     r->clear_live_data();
1667     r->init_top_at_mark_start();
1668     return false;
1669   }
1670 };
1671 
1672 
1673 void ShenandoahHeap::start_concurrent_marking() {
1674 
1675   shenandoahPolicy()->record_phase_start(ShenandoahCollectorPolicy::accumulate_stats);
1676   accumulate_statistics_all_tlabs();
1677   shenandoahPolicy()->record_phase_end(ShenandoahCollectorPolicy::accumulate_stats);
1678 
1679   set_concurrent_mark_in_progress(true);
1680   // We need to reset all TLABs because we'd lose marks on all objects allocated in them.
1681   if (UseTLAB) {
1682     shenandoahPolicy()->record_phase_start(ShenandoahCollectorPolicy::make_parsable);
1683     ensure_parsability(true);
1684     shenandoahPolicy()->record_phase_end(ShenandoahCollectorPolicy::make_parsable);
1685   }
1686 
1687   _shenandoah_policy->record_bytes_allocated(_bytes_allocated_since_cm);
1688   _used_start_gc = used();
1689 
1690 #ifdef ASSERT
1691   if (ShenandoahDumpHeapBeforeConcurrentMark) {
1692     ensure_parsability(false);
1693     print_all_refs("pre-mark");
1694   }
1695 #endif
1696 
1697   shenandoahPolicy()->record_phase_start(ShenandoahCollectorPolicy::clear_liveness);
1698   ClearLivenessClosure clc(this);
1699   heap_region_iterate(&clc);
1700   shenandoahPolicy()->record_phase_end(ShenandoahCollectorPolicy::clear_liveness);
1701 
1702   // print_all_refs("pre -mark");
1703 
1704   // oopDesc::_debug = true;
1705 
1706   // Make above changes visible to worker threads
1707   OrderAccess::fence();
1708 
1709   shenandoahPolicy()->record_phase_start(ShenandoahCollectorPolicy::scan_roots);
1710   concurrentMark()->init_mark_roots();
1711   shenandoahPolicy()->record_phase_end(ShenandoahCollectorPolicy::scan_roots);
1712 
1713   //  print_all_refs("pre-mark2");
1714 }
1715 
1716 
1717 class VerifyLivenessClosure : public ExtendedOopClosure {
1718 
1719   ShenandoahHeap* _sh;
1720 
1721 public:
1722   VerifyLivenessClosure() : _sh ( ShenandoahHeap::heap() ) {}
1723 
1724   template<class T> void do_oop_nv(T* p) {
1725     T heap_oop = oopDesc::load_heap_oop(p);
1726     if (!oopDesc::is_null(heap_oop)) {
1727       oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
1728       guarantee(_sh->heap_region_containing(obj)->is_in_collection_set() == (! oopDesc::unsafe_equals(obj, oopDesc::bs()->read_barrier(obj))),
1729                 "forwarded objects can only exist in dirty (from-space) regions is_dirty: %s, is_forwarded: %s",
1730                 BOOL_TO_STR(_sh->heap_region_containing(obj)->is_in_collection_set()),
1731                 BOOL_TO_STR(! oopDesc::unsafe_equals(obj, oopDesc::bs()->read_barrier(obj)))
1732                 );
1733       obj = oopDesc::bs()->read_barrier(obj);
1734       guarantee(! _sh->heap_region_containing(obj)->is_in_collection_set(), "forwarded oops must not point to dirty regions");
1735       guarantee(obj->is_oop(), "is_oop");
1736       ShenandoahHeap* sh = (ShenandoahHeap*) Universe::heap();
1737       if (! sh->is_marked_current(obj)) {
1738         sh->print_on(tty);
1739       }
1740       assert(sh->is_marked_current(obj), "Referenced Objects should be marked obj: "PTR_FORMAT", marked: %s, is_in_heap: %s",
1741              p2i((HeapWord*) obj), BOOL_TO_STR(sh->is_marked_current(obj)), BOOL_TO_STR(sh->is_in(obj)));
1742     }
1743   }
1744 
1745   void do_oop(oop* p)       { do_oop_nv(p); }
1746   void do_oop(narrowOop* p) { do_oop_nv(p); }
1747 
1748 };
1749 
1750 void ShenandoahHeap::verify_live() {
1751 
1752   VerifyLivenessClosure cl;
1753   roots_iterate(&cl);
1754 
1755   IterateMarkedCurrentObjectsClosure marked_oops(&cl);
1756   object_iterate(&marked_oops);
1757 
1758 }
1759 
1760 class VerifyAfterEvacuationClosure : public ExtendedOopClosure {
1761 
1762   ShenandoahHeap* _sh;
1763 
1764 public:
1765   VerifyAfterEvacuationClosure() : _sh ( ShenandoahHeap::heap() ) {}
1766 
1767   template<class T> void do_oop_nv(T* p) {
1768     T heap_oop = oopDesc::load_heap_oop(p);
1769     if (!oopDesc::is_null(heap_oop)) {
1770       oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
1771       guarantee(_sh->heap_region_containing(obj)->is_in_collection_set() == (! oopDesc::unsafe_equals(obj, oopDesc::bs()->read_barrier(obj))),
1772                 "forwarded objects can only exist in dirty (from-space) regions is_dirty: %s, is_forwarded: %s obj-klass: %s, marked: %s",
1773                 BOOL_TO_STR(_sh->heap_region_containing(obj)->is_in_collection_set()),
1774                 BOOL_TO_STR(! oopDesc::unsafe_equals(obj, oopDesc::bs()->read_barrier(obj))), obj->klass()->external_name(), BOOL_TO_STR(_sh->is_marked_current(obj))
1775                 );
1776       obj = oopDesc::bs()->read_barrier(obj);
1777       guarantee(! _sh->heap_region_containing(obj)->is_in_collection_set(), "forwarded oops must not point to dirty regions");
1778       guarantee(obj->is_oop(), "is_oop");
1779       guarantee(Metaspace::contains(obj->klass()), "klass pointer must go to metaspace");
1780     }
1781   }
1782 
1783   void do_oop(oop* p)       { do_oop_nv(p); }
1784   void do_oop(narrowOop* p) { do_oop_nv(p); }
1785 
1786 };
1787 
1788 class VerifyAfterUpdateRefsClosure : public ExtendedOopClosure {
1789 
1790   ShenandoahHeap* _sh;
1791 
1792 public:
1793   VerifyAfterUpdateRefsClosure() : _sh ( ShenandoahHeap::heap() ) {}
1794 
1795   template<class T> void do_oop_nv(T* p) {
1796     T heap_oop = oopDesc::load_heap_oop(p);
1797     if (!oopDesc::is_null(heap_oop)) {
1798       oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
1799       guarantee((! _sh->heap_region_containing(obj)->is_in_collection_set()),
1800                 "no live reference must point to from-space, is_marked: %s",
1801                 BOOL_TO_STR(_sh->is_marked_current(obj)));
1802       if (! oopDesc::unsafe_equals(obj, oopDesc::bs()->read_barrier(obj)) && _sh->is_in(p)) {
1803         tty->print_cr("top-limit: "PTR_FORMAT", p: "PTR_FORMAT, p2i(_sh->heap_region_containing(p)->concurrent_iteration_safe_limit()), p2i(p));
1804       }
1805       guarantee(oopDesc::unsafe_equals(obj, oopDesc::bs()->read_barrier(obj)), "no live reference must point to forwarded object");
1806       guarantee(obj->is_oop(), "is_oop");
1807       guarantee(Metaspace::contains(obj->klass()), "klass pointer must go to metaspace");
1808     }
1809   }
1810 
1811   void do_oop(oop* p)       { do_oop_nv(p); }
1812   void do_oop(narrowOop* p) { do_oop_nv(p); }
1813 
1814 };
1815 
1816 void ShenandoahHeap::verify_heap_after_evacuation() {
1817 
1818   verify_heap_size_consistency();
1819 
1820   ensure_parsability(false);
1821 
1822   VerifyAfterEvacuationClosure cl;
1823   roots_iterate(&cl);
1824 
1825   IterateMarkedCurrentObjectsClosure marked_oops(&cl);
1826   object_iterate(&marked_oops);
1827 
1828 }
1829 
1830 class VerifyRegionsAfterUpdateRefsClosure : public ShenandoahHeapRegionClosure {
1831 public:
1832   bool doHeapRegion(ShenandoahHeapRegion* r) {
1833     assert(! r->is_in_collection_set(), "no region must be in collection set");
1834     assert(! ShenandoahHeap::heap()->in_cset_fast_test(r->bottom()), "no region must be in collection set");
1835     return false;
1836   }
1837 };
1838 
1839 void ShenandoahHeap::swap_mark_bitmaps() {
1840   CMBitMap* tmp = _prev_mark_bit_map;
1841   _prev_mark_bit_map = _next_mark_bit_map;
1842   _next_mark_bit_map = tmp;
1843 }
1844 
1845 void ShenandoahHeap::stop_concurrent_marking() {
1846   assert(concurrent_mark_in_progress(), "How else could we get here?");
1847   if (! cancelled_concgc()) {
1848     // If we needed to update refs, and concurrent marking has been cancelled,
1849     // we need to finish updating references.
1850     set_need_update_refs(false);
1851     swap_mark_bitmaps();
1852   }
1853   set_concurrent_mark_in_progress(false);
1854 
1855   if (log_is_enabled(Trace, gc, region)) {
1856     ResourceMark rm;
1857     outputStream* out = Log(gc, region)::trace_stream();
1858     print_heap_regions(out);
1859   }
1860 
1861 }
1862 
1863 void ShenandoahHeap::set_concurrent_mark_in_progress(bool in_progress) {
1864   _concurrent_mark_in_progress = in_progress ? 1 : 0;
1865   JavaThread::satb_mark_queue_set().set_active_all_threads(in_progress, !in_progress);
1866 }
1867 
1868 void ShenandoahHeap::set_evacuation_in_progress(bool in_progress) {
1869   JavaThread::set_evacuation_in_progress_all_threads(in_progress);
1870   _evacuation_in_progress = in_progress ? 1 : 0;
1871   OrderAccess::fence();
1872 }
1873 
1874 void ShenandoahHeap::verify_copy(oop p,oop c){
1875     assert(! oopDesc::unsafe_equals(p, oopDesc::bs()->read_barrier(p)), "forwarded correctly");
1876     assert(oopDesc::unsafe_equals(oopDesc::bs()->read_barrier(p), c), "verify pointer is correct");
1877     if (p->klass() != c->klass()) {
1878       print_heap_regions();
1879     }
1880     assert(p->klass() == c->klass(), "verify class p-size: "INT32_FORMAT" c-size: "INT32_FORMAT, p->size(), c->size());
1881     assert(p->size() == c->size(), "verify size");
1882     // Object may have been locked between copy and verification
1883     //    assert(p->mark() == c->mark(), "verify mark");
1884     assert(oopDesc::unsafe_equals(c, oopDesc::bs()->read_barrier(c)), "verify only forwarded once");
1885   }
1886 
1887 void ShenandoahHeap::oom_during_evacuation() {
1888   log_develop_trace(gc)("Out of memory during evacuation, cancel evacuation, schedule full GC by thread %d",
1889                         Thread::current()->osthread()->thread_id());
1890 
1891   // We ran out of memory during evacuation. Cancel evacuation, and schedule a full-GC.
1892   collector_policy()->set_should_clear_all_soft_refs(true);
1893   concurrent_thread()->schedule_full_gc();
1894   cancel_concgc();
1895 
1896   if ((! Thread::current()->is_GC_task_thread()) && (! Thread::current()->is_ConcurrentGC_thread())) {
1897     log_warning(gc)("OOM during evacuation. Let Java thread wait until evacuation finishes.");
1898     while (_evacuation_in_progress) { // wait.
1899       Thread::current()->_ParkEvent->park(1);
1900     }
1901   }
1902 
1903 }
1904 
1905 HeapWord* ShenandoahHeap::tlab_post_allocation_setup(HeapWord* obj) {
1906   // Initialize Brooks pointer for the next object
1907   HeapWord* result = obj + BrooksPointer::word_size();
1908   BrooksPointer::initialize(oop(result));
1909   return result;
1910 }
1911 
1912 uint ShenandoahHeap::oop_extra_words() {
1913   return BrooksPointer::word_size();
1914 }
1915 
1916 void ShenandoahHeap::grow_heap_by(size_t num_regions) {
1917   size_t base = _num_regions;
1918   ensure_new_regions(num_regions);
1919 
1920   ShenandoahHeapRegion* regions[num_regions];
1921   for (size_t i = 0; i < num_regions; i++) {
1922     ShenandoahHeapRegion* new_region = new ShenandoahHeapRegion();
1923     size_t new_region_index = i + base;
1924     HeapWord* start = _first_region_bottom + (ShenandoahHeapRegion::RegionSizeBytes / HeapWordSize) * new_region_index;
1925     new_region->initialize_heap_region(start, ShenandoahHeapRegion::RegionSizeBytes / HeapWordSize, new_region_index);
1926 
1927     if (log_is_enabled(Trace, gc, region)) {
1928       ResourceMark rm;
1929       outputStream* out = Log(gc, region)::trace_stream();
1930       out->print_cr("allocating new region at index: "SIZE_FORMAT, new_region_index);
1931       new_region->print_on(out);
1932     }
1933 
1934     assert(_ordered_regions->active_regions() == new_region->region_number(), "must match");
1935     _ordered_regions->add_region(new_region);
1936     _sorted_regions->add_region(new_region);
1937     _in_cset_fast_test_base[new_region_index] = false; // Not in cset
1938     _top_at_mark_starts_base[new_region_index] = new_region->bottom();
1939 
1940     regions[i] = new_region;
1941   }
1942   _free_regions->par_add_regions(regions, 0, num_regions, num_regions);
1943 }
1944 
1945 void ShenandoahHeap::ensure_new_regions(size_t new_regions) {
1946 
1947   size_t num_regions = _num_regions;
1948   size_t new_num_regions = num_regions + new_regions;
1949   assert(new_num_regions <= _max_regions, "we checked this earlier");
1950 
1951   size_t expand_size = new_regions * ShenandoahHeapRegion::RegionSizeBytes;
1952   log_trace(gc, region)("expanding storage by "SIZE_FORMAT_HEX" bytes, for "SIZE_FORMAT" new regions", expand_size, new_regions);
1953   bool success = _storage.expand_by(expand_size, ShenandoahAlwaysPreTouch);
1954   assert(success, "should always be able to expand by requested size");
1955 
1956   _num_regions = new_num_regions;
1957 
1958 }
1959 
1960 ShenandoahIsAliveClosure::ShenandoahIsAliveClosure() :
1961   _heap(ShenandoahHeap::heap_no_check()) {
1962 }
1963 
1964 void ShenandoahIsAliveClosure::init(ShenandoahHeap* heap) {
1965   _heap = heap;
1966 }
1967 
1968 bool ShenandoahIsAliveClosure::do_object_b(oop obj) {
1969 
1970   assert(_heap != NULL, "sanity");
1971 #ifdef ASSERT
1972   if (_heap->concurrent_mark_in_progress()) {
1973     assert(oopDesc::unsafe_equals(obj, ShenandoahBarrierSet::resolve_oop_static_not_null(obj)), "only query to-space");
1974   }
1975 #endif
1976   assert(!oopDesc::is_null(obj), "null");
1977   return _heap->is_marked_current(obj);
1978 }
1979 
1980 ShenandoahForwardedIsAliveClosure::ShenandoahForwardedIsAliveClosure() :
1981   _heap(ShenandoahHeap::heap_no_check()) {
1982 }
1983 
1984 void ShenandoahForwardedIsAliveClosure::init(ShenandoahHeap* heap) {
1985   _heap = heap;
1986 }
1987 
1988 bool ShenandoahForwardedIsAliveClosure::do_object_b(oop obj) {
1989 
1990   assert(_heap != NULL, "sanity");
1991   obj = ShenandoahBarrierSet::resolve_oop_static_not_null(obj);
1992 #ifdef ASSERT
1993   if (_heap->concurrent_mark_in_progress()) {
1994     assert(oopDesc::unsafe_equals(obj, ShenandoahBarrierSet::resolve_oop_static_not_null(obj)), "only query to-space");
1995   }
1996 #endif
1997   assert(!oopDesc::is_null(obj), "null");
1998   return _heap->is_marked_current(obj);
1999 }
2000 
2001 void ShenandoahHeap::ref_processing_init() {
2002   MemRegion mr = reserved_region();
2003 
2004   isAlive.init(ShenandoahHeap::heap());
2005   assert(_max_workers > 0, "Sanity");
2006 
2007   _ref_processor =
2008     new ReferenceProcessor(mr,    // span
2009                            ParallelRefProcEnabled,
2010                            // mt processing
2011                            _max_workers,
2012                            // degree of mt processing
2013                            true,
2014                            // mt discovery
2015                            _max_workers,
2016                            // degree of mt discovery
2017                            false,
2018                            // Reference discovery is not atomic
2019                            &isAlive);
2020 }
2021 
2022 #ifdef ASSERT
2023 void ShenandoahHeap::set_from_region_protection(bool protect) {
2024   for (uint i = 0; i < _num_regions; i++) {
2025     ShenandoahHeapRegion* region = _ordered_regions->get(i);
2026     if (region != NULL && region->is_in_collection_set()) {
2027       if (protect) {
2028         region->memProtectionOn();
2029       } else {
2030         region->memProtectionOff();
2031       }
2032     }
2033   }
2034 }
2035 #endif
2036 
2037 size_t ShenandoahHeap::num_regions() {
2038   return _num_regions;
2039 }
2040 
2041 size_t ShenandoahHeap::max_regions() {
2042   return _max_regions;
2043 }
2044 
2045 GCTracer* ShenandoahHeap::tracer() {
2046   return shenandoahPolicy()->tracer();
2047 }
2048 
2049 size_t ShenandoahHeap::tlab_used(Thread* thread) const {
2050   return _free_regions->used();
2051 }
2052 
2053 void ShenandoahHeap::cancel_concgc() {
2054   // only report it once
2055   if (! cancelled_concgc()) {
2056     log_info(gc)("Cancelling GC");
2057     set_cancelled_concgc(true);
2058     _shenandoah_policy->report_concgc_cancelled();
2059   }
2060 
2061 }
2062 
2063 void ShenandoahHeap::clear_cancelled_concgc() {
2064   set_cancelled_concgc(false);
2065 }
2066 
2067 uint ShenandoahHeap::max_workers() {
2068   return _max_workers;
2069 }
2070 
2071 uint ShenandoahHeap::max_parallel_workers() {
2072   return _max_parallel_workers;
2073 }
2074 uint ShenandoahHeap::max_conc_workers() {
2075   return _max_conc_workers;
2076 }
2077 
2078 void ShenandoahHeap::stop() {
2079   // We set this early here, to let GC threads terminate before we ask the concurrent thread
2080   // to terminate, which would otherwise block until all GC threads come to finish normally.
2081   set_cancelled_concgc(true);
2082   _concurrent_gc_thread->stop();
2083   cancel_concgc();
2084 }
2085 
2086 void ShenandoahHeap::unlink_string_and_symbol_table(BoolObjectClosure* is_alive, bool process_strings, bool process_symbols) {
2087 
2088   StringSymbolTableUnlinkTask shenandoah_unlink_task(is_alive, process_strings, process_symbols);
2089   workers()->run_task(&shenandoah_unlink_task);
2090 
2091   //  if (G1StringDedup::is_enabled()) {
2092   //    G1StringDedup::unlink(is_alive);
2093   //  }
2094 }
2095 
2096 void ShenandoahHeap::set_need_update_refs(bool need_update_refs) {
2097   _need_update_refs = need_update_refs;
2098 }
2099 
2100 //fixme this should be in heapregionset
2101 ShenandoahHeapRegion* ShenandoahHeap::next_compaction_region(const ShenandoahHeapRegion* r) {
2102   size_t region_idx = r->region_number() + 1;
2103   ShenandoahHeapRegion* next = _ordered_regions->get(region_idx);
2104   guarantee(next->region_number() == region_idx, "region number must match");
2105   while (next->is_humongous()) {
2106     region_idx = next->region_number() + 1;
2107     next = _ordered_regions->get(region_idx);
2108     guarantee(next->region_number() == region_idx, "region number must match");
2109   }
2110   return next;
2111 }
2112 
2113 bool ShenandoahHeap::is_in_collection_set(const void* p) {
2114   return heap_region_containing(p)->is_in_collection_set();
2115 }
2116 
2117 ShenandoahMonitoringSupport* ShenandoahHeap::monitoring_support() {
2118   return _monitoring_support;
2119 }
2120 
2121 bool ShenandoahHeap::is_obj_dead(const oop obj, const ShenandoahHeapRegion* r) const {
2122   return ! r->allocated_after_prev_mark_start((HeapWord*) obj) &&
2123          ! is_marked_prev(obj, r);
2124 }
2125 CMBitMap* ShenandoahHeap::prev_mark_bit_map() {
2126   return _prev_mark_bit_map;
2127 }
2128 
2129 CMBitMap* ShenandoahHeap::next_mark_bit_map() {
2130   return _next_mark_bit_map;
2131 }
2132 
2133 void ShenandoahHeap::add_free_region(ShenandoahHeapRegion* r) {
2134   _free_regions->add_region(r);
2135 }
2136 
2137 void ShenandoahHeap::clear_free_regions() {
2138   _free_regions->clear();
2139 }
2140 
2141 void ShenandoahHeap::register_region_with_in_cset_fast_test(ShenandoahHeapRegion* r) {
2142   assert(_in_cset_fast_test_base != NULL, "sanity");
2143   assert(r->is_in_collection_set(), "invariant");
2144   size_t index = r->region_number();
2145   assert(index < _in_cset_fast_test_length, "invariant");
2146   assert(!_in_cset_fast_test_base[index], "invariant");
2147   _in_cset_fast_test_base[index] = true;
2148 }
2149 
2150 address ShenandoahHeap::in_cset_fast_test_addr() {
2151   return (address) (ShenandoahHeap::heap()->_in_cset_fast_test);
2152 }
2153 
2154 address ShenandoahHeap::cancelled_concgc_addr() {
2155   return (address) &(ShenandoahHeap::heap()->_cancelled_concgc);
2156 }
2157 
2158 void ShenandoahHeap::clear_cset_fast_test() {
2159   assert(_in_cset_fast_test_base != NULL, "sanity");
2160   memset(_in_cset_fast_test_base, false,
2161          _in_cset_fast_test_length * sizeof(bool));
2162 }
2163 
2164 size_t ShenandoahHeap::conservative_max_heap_alignment() {
2165   return HeapRegionBounds::max_size();
2166 }
2167 
2168 size_t ShenandoahHeap::bytes_allocated_since_cm() {
2169   return _bytes_allocated_since_cm;
2170 }
2171 
2172 void ShenandoahHeap::set_bytes_allocated_since_cm(size_t bytes) {
2173   _bytes_allocated_since_cm = bytes;
2174 }
2175 
2176 size_t ShenandoahHeap::max_allocated_gc() {
2177   return _max_allocated_gc;
2178 }
2179 
2180 void ShenandoahHeap::set_top_at_mark_start(HeapWord* region_base, HeapWord* addr) {
2181   uintx index = ((uintx) region_base) >> ShenandoahHeapRegion::RegionSizeShift;
2182   _top_at_mark_starts[index] = addr;
2183 }
2184 
2185 void ShenandoahHeap::set_full_gc_in_progress(bool in_progress) {
2186   _full_gc_in_progress = in_progress;
2187 }
2188 
2189 bool ShenandoahHeap::is_full_gc_in_progress() const {
2190   return _full_gc_in_progress;
2191 }
2192 
2193 class NMethodOopInitializer : public OopClosure {
2194 private:
2195   ShenandoahHeap* _heap;
2196 public:
2197   NMethodOopInitializer() : _heap(ShenandoahHeap::heap()) {
2198   }
2199 
2200 private:
2201   template <class T>
2202   inline void do_oop_work(T* p) {
2203     T o = oopDesc::load_heap_oop(p);
2204     if (! oopDesc::is_null(o)) {
2205       oop obj1 = oopDesc::decode_heap_oop_not_null(o);
2206       oop obj2 = oopDesc::bs()->write_barrier(obj1);
2207       if (! oopDesc::unsafe_equals(obj1, obj2)) {
2208         oopDesc::encode_store_heap_oop(p, obj2);
2209       }
2210     }
2211   }
2212 
2213 public:
2214   void do_oop(oop* o) {
2215     do_oop_work(o);
2216   }
2217   void do_oop(narrowOop* o) {
2218     do_oop_work(o);
2219   }
2220 };
2221 
2222 void ShenandoahHeap::register_nmethod(nmethod* nm) {
2223   NMethodOopInitializer init;
2224   nm->oops_do(&init);
2225   nm->fix_oop_relocations();
2226 }
2227 
2228 void ShenandoahHeap::unregister_nmethod(nmethod* nm) {
2229 }
2230 
2231 void ShenandoahHeap::pin_object(oop o) {
2232   heap_region_containing(o)->pin();
2233 }
2234 
2235 void ShenandoahHeap::unpin_object(oop o) {
2236   heap_region_containing(o)->unpin();
2237 }
2238 
2239 
2240 GCTimer* ShenandoahHeap::gc_timer() const {
2241   return _gc_timer;
2242 }