1 /*
   2  * Copyright (c) 2013, 2015, Red Hat, Inc. and/or its affiliates.
   3  *
   4  * This code is free software; you can redistribute it and/or modify it
   5  * under the terms of the GNU General Public License version 2 only, as
   6  * published by the Free Software Foundation.
   7  *
   8  * This code is distributed in the hope that it will be useful, but WITHOUT
   9  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  10  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  11  * version 2 for more details (a copy is included in the LICENSE file that
  12  * accompanied this code).
  13  *
  14  * You should have received a copy of the GNU General Public License version
  15  * 2 along with this work; if not, write to the Free Software Foundation,
  16  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  17  *
  18  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  19  * or visit www.oracle.com if you need additional information or have any
  20  * questions.
  21  *
  22  */
  23 
  24 #include "precompiled.hpp"
  25 #include "asm/macroAssembler.hpp"
  26 
  27 #include "classfile/symbolTable.hpp"
  28 #include "classfile/stringTable.hpp"
  29 
  30 #include "gc/shared/collectedHeap.inline.hpp"
  31 #include "gc/shared/cmBitMap.inline.hpp"
  32 #include "gc/shared/gcHeapSummary.hpp"
  33 #include "gc/shared/gcTimer.hpp"
  34 #include "gc/shared/gcTrace.hpp"
  35 #include "gc/shared/gcTraceTime.hpp"
  36 #include "gc/shared/isGCActiveMark.hpp"
  37 
  38 #include "gc/shenandoah/brooksPointer.hpp"
  39 #include "gc/shenandoah/shenandoahHumongous.hpp"
  40 #include "gc/shenandoah/shenandoahRootProcessor.hpp"
  41 #include "gc/shenandoah/shenandoahHeap.inline.hpp"
  42 #include "gc/shenandoah/shenandoahJNICritical.hpp"
  43 #include "gc/shenandoah/shenandoahBarrierSet.hpp"
  44 #include "gc/shenandoah/vm_operations_shenandoah.hpp"
  45 #include "oops/oop.inline.hpp"
  46 #include "runtime/vmThread.hpp"
  47 #include "memory/iterator.hpp"
  48 #include "memory/oopFactory.hpp"
  49 #include "gc/shared/referenceProcessor.hpp"
  50 #include "gc/shared/space.inline.hpp"
  51 #include "gc/shared/threadLocalAllocBuffer.inline.hpp"
  52 #include "memory/universe.hpp"
  53 #include "utilities/copy.hpp"
  54 #include "gc/shared/vmGCOperations.hpp"
  55 #include "runtime/atomic.inline.hpp"
  56 
  57 #define __ masm->
  58 
  59 ShenandoahHeap* ShenandoahHeap::_pgc = NULL;
  60 
  61 void ShenandoahHeap::print_heap_locations(HeapWord* start, HeapWord* end) {
  62   HeapWord* cur = NULL;
  63   for (cur = start; cur < end; cur++) {
  64     tty->print_cr(PTR_FORMAT" : "PTR_FORMAT, p2i(cur), p2i(*((HeapWord**) cur)));
  65   }
  66 }
  67 
  68 void ShenandoahHeap::print_heap_objects(HeapWord* start, HeapWord* end) {
  69   HeapWord* cur = NULL;
  70   for (cur = start; cur < end; cur = cur + oop(cur)->size()) {
  71     oop(cur)->print();
  72     print_heap_locations(cur, cur + oop(cur)->size());
  73   }
  74 }
  75 
  76 void ShenandoahHeap::print_heap_object(oop p) {
  77   HeapWord* hw = (HeapWord*) p;
  78   print_heap_locations(hw-1, hw+1+p->size());
  79 }
  80 
  81 
  82 class PrintHeapRegionsClosure : public
  83    ShenandoahHeapRegionClosure {
  84 private:
  85   outputStream* _st;
  86 public:
  87   PrintHeapRegionsClosure() : _st(tty) {}
  88   PrintHeapRegionsClosure(outputStream* st) : _st(st) {}
  89 
  90   bool doHeapRegion(ShenandoahHeapRegion* r) {
  91     r->print_on(_st);
  92     return false;
  93   }
  94 };
  95 
  96 class PrintHeapObjectsClosure : public ShenandoahHeapRegionClosure {
  97 public:
  98   bool doHeapRegion(ShenandoahHeapRegion* r) {
  99     tty->print_cr("Region "INT32_FORMAT" top = "PTR_FORMAT" used = "SIZE_FORMAT_HEX" free = "SIZE_FORMAT_HEX,
 100                r->region_number(), p2i(r->top()), r->used(), r->free());
 101 
 102     ShenandoahHeap::heap()->print_heap_objects(r->bottom(), r->top());
 103     return false;
 104   }
 105 };
 106 
 107 jint ShenandoahHeap::initialize() {
 108   CollectedHeap::pre_initialize();
 109 
 110   size_t init_byte_size = collector_policy()->initial_heap_byte_size();
 111   size_t max_byte_size = collector_policy()->max_heap_byte_size();
 112   if (ShenandoahGCVerbose)
 113     tty->print_cr("init_byte_size = "SIZE_FORMAT","SIZE_FORMAT_HEX"  max_byte_size = "INT64_FORMAT","SIZE_FORMAT_HEX,
 114              init_byte_size, init_byte_size, max_byte_size, max_byte_size);
 115 
 116   Universe::check_alignment(max_byte_size,
 117                             ShenandoahHeapRegion::RegionSizeBytes,
 118                             "shenandoah heap");
 119   Universe::check_alignment(init_byte_size,
 120                             ShenandoahHeapRegion::RegionSizeBytes,
 121                             "shenandoah heap");
 122 
 123   ReservedSpace heap_rs = Universe::reserve_heap(max_byte_size,
 124                                                  Arguments::conservative_max_heap_alignment());
 125   initialize_reserved_region((HeapWord*)heap_rs.base(), (HeapWord*) (heap_rs.base() + heap_rs.size()));
 126 
 127   set_barrier_set(new ShenandoahBarrierSet());
 128   ReservedSpace pgc_rs = heap_rs.first_part(max_byte_size);
 129   _storage.initialize(pgc_rs, init_byte_size);
 130   if (ShenandoahGCVerbose) {
 131     tty->print_cr("Calling initialize on reserved space base = "PTR_FORMAT" end = "PTR_FORMAT,
 132                p2i(pgc_rs.base()), p2i(pgc_rs.base() + pgc_rs.size()));
 133   }
 134 
 135   _num_regions = init_byte_size / ShenandoahHeapRegion::RegionSizeBytes;
 136   _max_regions = max_byte_size / ShenandoahHeapRegion::RegionSizeBytes;
 137   _ordered_regions = NEW_C_HEAP_ARRAY(ShenandoahHeapRegion*, _max_regions, mtGC);
 138   for (size_t i = 0; i < _max_regions; i++) {
 139     _ordered_regions[i] = NULL;
 140   }
 141 
 142   _initialSize = _num_regions * ShenandoahHeapRegion::RegionSizeBytes;
 143   size_t regionSizeWords = ShenandoahHeapRegion::RegionSizeBytes / HeapWordSize;
 144   assert(init_byte_size == _initialSize, "tautology");
 145   _free_regions = new ShenandoahHeapRegionSet(_max_regions);
 146   _collection_set = new ShenandoahHeapRegionSet(_max_regions);
 147 
 148   for (size_t i = 0; i < _num_regions; i++) {
 149     ShenandoahHeapRegion* current = new ShenandoahHeapRegion();
 150     current->initialize_heap_region((HeapWord*) pgc_rs.base() +
 151                                     regionSizeWords * i, regionSizeWords, i);
 152     _free_regions->append(current);
 153     _ordered_regions[i] = current;
 154   }
 155   _first_region = _ordered_regions[0];
 156   _first_region_bottom = _first_region->bottom();
 157   assert((((size_t) _first_region_bottom) & (ShenandoahHeapRegion::RegionSizeBytes - 1)) == 0, err_msg("misaligned heap: "PTR_FORMAT, p2i(_first_region_bottom)));
 158 
 159   _numAllocs = 0;
 160 
 161   if (ShenandoahGCVerbose) {
 162     tty->print("All Regions\n");
 163     print_heap_regions();
 164     tty->print("Free Regions\n");
 165     _free_regions->print();
 166   }
 167 
 168   // The call below uses stuff (the SATB* things) that are in G1, but probably
 169   // belong into a shared location.
 170   JavaThread::satb_mark_queue_set().initialize(SATB_Q_CBL_mon,
 171                                                SATB_Q_FL_lock,
 172                                                20 /*G1SATBProcessCompletedThreshold */,
 173                                                Shared_SATB_Q_lock);
 174 
 175   // Reserve space for prev and next bitmap.
 176   size_t bitmap_size = CMBitMap::compute_size(heap_rs.size());
 177   MemRegion heap_region = MemRegion((HeapWord*) heap_rs.base(), heap_rs.size() / HeapWordSize);
 178 
 179   ReservedSpace bitmap(ReservedSpace::allocation_align_size_up(bitmap_size));
 180   os::commit_memory_or_exit(bitmap.base(), bitmap.size(), false, err_msg("couldn't allocate mark bitmap"));
 181   MemRegion bitmap_region = MemRegion((HeapWord*) bitmap.base(), bitmap.size() / HeapWordSize);
 182   _mark_bit_map.initialize(heap_region, bitmap_region);
 183 
 184   _next_mark_bit_map = &_mark_bit_map;
 185   reset_mark_bitmap();
 186 
 187   // Initialize fast collection set test structure.
 188   _in_cset_fast_test_length = _max_regions;
 189   _in_cset_fast_test_base =
 190                    NEW_C_HEAP_ARRAY(bool, (size_t) _in_cset_fast_test_length, mtGC);
 191   _in_cset_fast_test = _in_cset_fast_test_base -
 192                ((uintx) pgc_rs.base() >> ShenandoahHeapRegion::RegionSizeShift);
 193   clear_cset_fast_test();
 194 
 195   _concurrent_gc_thread = new ShenandoahConcurrentThread();
 196   return JNI_OK;
 197 }
 198 
 199 ShenandoahHeap::ShenandoahHeap(ShenandoahCollectorPolicy* policy) :
 200   CollectedHeap(),
 201   _shenandoah_policy(policy),
 202   _concurrent_mark_in_progress(false),
 203   _evacuation_in_progress(false),
 204   _update_references_in_progress(false),
 205   _free_regions(NULL),
 206   _collection_set(NULL),
 207   _bytesAllocSinceCM(0),
 208   _bytes_allocated_during_cm(0),
 209   _max_allocated_gc(0),
 210   _allocated_last_gc(0),
 211   _used_start_gc(0),
 212   _max_conc_workers((int) MAX2((uint) ConcGCThreads, 1U)),
 213   _max_parallel_workers((int) MAX2((uint) ParallelGCThreads, 1U)),
 214   _ref_processor(NULL),
 215   _in_cset_fast_test(NULL),
 216   _in_cset_fast_test_base(NULL),
 217   _mark_bit_map(),
 218   _cancelled_concgc(false),
 219   _need_update_refs(false),
 220   _need_reset_bitmaps(false),
 221   _jni_critical(new ShenandoahJNICritical())
 222 
 223 {
 224   if (ShenandoahLogConfig) {
 225     tty->print_cr("Parallel GC threads: "UINT32_FORMAT, ParallelGCThreads);
 226     tty->print_cr("Concurrent GC threads: "UINT32_FORMAT, ConcGCThreads);
 227     tty->print_cr("Parallel reference processing enabled: %s", BOOL_TO_STR(ParallelRefProcEnabled));
 228   }
 229   _pgc = this;
 230   _scm = new ShenandoahConcurrentMark();
 231   _used = 0;
 232   // This is odd.  They are concurrent gc threads, but they are also task threads.
 233   // Framework doesn't allow both.
 234   _workers = new WorkGang("Concurrent GC Threads", ParallelGCThreads,
 235                             /* are_GC_task_threads */true,
 236                             /* are_ConcurrentGC_threads */false);
 237   _conc_workers = new WorkGang("Concurrent GC Threads", ConcGCThreads,
 238                             /* are_GC_task_threads */true,
 239                             /* are_ConcurrentGC_threads */false);
 240   if ((_workers == NULL) || (_conc_workers == NULL)) {
 241     vm_exit_during_initialization("Failed necessary allocation.");
 242   } else {
 243     _workers->initialize_workers();
 244     _conc_workers->initialize_workers();
 245   }
 246 }
 247 
 248 class ResetBitmapTask : public AbstractGangTask {
 249 private:
 250   ShenandoahHeapRegionSet* _regions;
 251 
 252 public:
 253   ResetBitmapTask(ShenandoahHeapRegionSet* regions) :
 254     AbstractGangTask("Parallel Reset Bitmap Task"),
 255     _regions(regions) {
 256   }
 257 
 258   void work(uint worker_id) {
 259     ShenandoahHeapRegion* region = _regions->claim_next();
 260     ShenandoahHeap* heap = ShenandoahHeap::heap();
 261     while (region != NULL) {
 262       heap->reset_mark_bitmap_range(region->bottom(), region->end());
 263       region = _regions->claim_next();
 264     }
 265   }
 266 };
 267 
 268 void ShenandoahHeap::reset_mark_bitmap() {
 269   if (ShenandoahTracePhases) {
 270     tty->print_cr("Shenandoah starting concurrent reset bitmaps");
 271   }
 272   ShenandoahHeapRegionSet regions = ShenandoahHeapRegionSet(_num_regions, _ordered_regions, _num_regions);
 273   ResetBitmapTask task = ResetBitmapTask(&regions);
 274   conc_workers()->set_active_workers(_max_conc_workers);
 275   conc_workers()->run_task(&task);
 276   if (ShenandoahTracePhases) {
 277     tty->print_cr("Shenandoah finishing concurrent reset bitmaps");
 278   }
 279 }
 280 
 281 void ShenandoahHeap::reset_mark_bitmap_range(HeapWord* from, HeapWord* to) {
 282   _next_mark_bit_map->clearRange(MemRegion(from, to));
 283 }
 284 
 285 bool ShenandoahHeap::is_bitmap_clear() {
 286   HeapWord* start = _ordered_regions[0]->bottom();
 287   HeapWord* end = _ordered_regions[_num_regions-1]->end();
 288   return _next_mark_bit_map->getNextMarkedWordAddress(start, end) == end;
 289 }
 290 
 291 void ShenandoahHeap::print_on(outputStream* st) const {
 292   st->print("Shenandoah Heap");
 293   st->print(" total = " SIZE_FORMAT " K, used " SIZE_FORMAT " K ", capacity()/ K, used() /K);
 294   st->print("Region size = " SIZE_FORMAT "K ", ShenandoahHeapRegion::RegionSizeBytes / K);
 295   if (_concurrent_mark_in_progress) {
 296     st->print("marking ");
 297   }
 298   if (_evacuation_in_progress) {
 299     st->print("evacuating ");
 300   }
 301   if (_update_references_in_progress) {
 302     st->print("updating-refs ");
 303   }
 304   if (_cancelled_concgc) {
 305     st->print("cancelled ");
 306   }
 307   st->print("\n");
 308 
 309   if (Verbose) {
 310     print_heap_regions(st);
 311   }
 312 }
 313 
 314 class InitGCLABClosure : public ThreadClosure {
 315 public:
 316   void do_thread(Thread* thread) {
 317     thread->gclab().initialize(true);
 318   }
 319 };
 320 
 321 void ShenandoahHeap::post_initialize() {
 322 
 323   {
 324     MutexLockerEx ml(Threads_lock);
 325     InitGCLABClosure init_gclabs;
 326     for (JavaThread *thread = Threads::first(); thread != NULL; thread = thread->next()) {
 327       init_gclabs.do_thread(thread);
 328     }
 329     gc_threads_do(&init_gclabs);
 330   }
 331   _scm->initialize();
 332 
 333   ref_processing_init();
 334 
 335   _max_workers = MAX(_max_parallel_workers, _max_conc_workers);
 336 }
 337 
 338 class CalculateUsedRegionClosure : public ShenandoahHeapRegionClosure {
 339   size_t sum;
 340 public:
 341 
 342   CalculateUsedRegionClosure() {
 343     sum = 0;
 344   }
 345 
 346   bool doHeapRegion(ShenandoahHeapRegion* r) {
 347     sum = sum + r->used();
 348     return false;
 349   }
 350 
 351   size_t getResult() { return sum;}
 352 };
 353 
 354 size_t ShenandoahHeap::calculateUsed() {
 355   CalculateUsedRegionClosure cl;
 356   heap_region_iterate(&cl);
 357   return cl.getResult();
 358 }
 359 
 360 size_t ShenandoahHeap::calculateFree() {
 361   return capacity() - calculateUsed();
 362 }
 363 
 364 void ShenandoahHeap::verify_heap_size_consistency() {
 365 
 366   assert(calculateUsed() == used(),
 367          err_msg("heap used size must be consistent heap-used: "SIZE_FORMAT" regions-used: "SIZE_FORMAT, used(), calculateUsed()));
 368 }
 369 
 370 size_t ShenandoahHeap::used() const {
 371   return _used;
 372 }
 373 
 374 void ShenandoahHeap::increase_used(size_t bytes) {
 375   _used += bytes;
 376   // Atomic::add_ptr(bytes, &_used);
 377 }
 378 
 379 void ShenandoahHeap::set_used(size_t bytes) {
 380   _used = bytes;
 381 }
 382 
 383 void ShenandoahHeap::decrease_used(size_t bytes) {
 384   assert(_used >= bytes, "never decrease heap size by more than we've left");
 385   _used -= bytes;
 386 
 387   // Atomic::add_ptr(-bytes, &_used);
 388 }
 389 
 390 size_t ShenandoahHeap::capacity() const {
 391   return _num_regions * ShenandoahHeapRegion::RegionSizeBytes;
 392 
 393 }
 394 
 395 bool ShenandoahHeap::is_maximal_no_gc() const {
 396   Unimplemented();
 397   return true;
 398 }
 399 
 400 size_t ShenandoahHeap::max_capacity() const {
 401   return _max_regions * ShenandoahHeapRegion::RegionSizeBytes;
 402 }
 403 
 404 class IsInRegionClosure : public ShenandoahHeapRegionClosure {
 405   const void* _p;
 406   bool _result;
 407 public:
 408 
 409   IsInRegionClosure(const void* p) {
 410     _p = p;
 411     _result = false;
 412   }
 413 
 414   bool doHeapRegion(ShenandoahHeapRegion* r) {
 415     if (r->is_in(_p)) {
 416       _result = true;
 417       return true;
 418     }
 419     return false;
 420   }
 421 
 422   bool result() { return _result;}
 423 };
 424 
 425 bool ShenandoahHeap::is_in(const void* p) const {
 426   //  IsInRegionClosure isIn(p);
 427   //  heap_region_iterate(&isIn);
 428   //  bool result = isIn.result();
 429 
 430   //  return isIn.result();
 431   HeapWord* first_region_bottom = _first_region->bottom();
 432   HeapWord* last_region_end = first_region_bottom + (ShenandoahHeapRegion::RegionSizeBytes / HeapWordSize) * _num_regions;
 433   return p > _first_region_bottom && p < last_region_end;
 434 }
 435 
 436 bool ShenandoahHeap::is_in_partial_collection(const void* p ) {
 437   Unimplemented();
 438   return false;
 439 }
 440 
 441 bool  ShenandoahHeap::is_scavengable(const void* p) {
 442   //  nyi();
 443   //  return false;
 444   return true;
 445 }
 446 
 447 HeapWord* ShenandoahHeap::allocate_from_gclab(Thread* thread, size_t size) {
 448   if (UseTLAB) {
 449   HeapWord* obj = thread->gclab().allocate(size);
 450   if (obj != NULL) {
 451     return obj;
 452   }
 453   // Otherwise...
 454   return allocate_from_gclab_slow(thread, size);
 455   } else {
 456     return NULL;
 457   }
 458 }
 459 
 460 HeapWord* ShenandoahHeap::allocate_from_gclab_slow(Thread* thread, size_t size) {
 461   // Retain tlab and allocate object in shared space if
 462   // the amount free in the tlab is too large to discard.
 463   if (thread->gclab().free() > thread->gclab().refill_waste_limit()) {
 464     thread->gclab().record_slow_allocation(size);
 465     return NULL;
 466   }
 467 
 468   // Discard gclab and allocate a new one.
 469   // To minimize fragmentation, the last GCLAB may be smaller than the rest.
 470   size_t new_gclab_size = thread->gclab().compute_size(size);
 471 
 472   thread->gclab().clear_before_allocation();
 473 
 474   if (new_gclab_size == 0) {
 475     return NULL;
 476   }
 477 
 478   // Allocate a new GCLAB...
 479   HeapWord* obj = allocate_new_gclab(new_gclab_size);
 480   if (obj == NULL) {
 481     return NULL;
 482   }
 483 
 484   if (ZeroTLAB) {
 485     // ..and clear it.
 486     Copy::zero_to_words(obj, new_gclab_size);
 487   } else {
 488     // ...and zap just allocated object.
 489 #ifdef ASSERT
 490     // Skip mangling the space corresponding to the object header to
 491     // ensure that the returned space is not considered parsable by
 492     // any concurrent GC thread.
 493     size_t hdr_size = oopDesc::header_size();
 494     Copy::fill_to_words(obj + hdr_size, new_gclab_size - hdr_size, badHeapWordVal);
 495 #endif // ASSERT
 496   }
 497   thread->gclab().fill(obj, obj + size, new_gclab_size);
 498   return obj;
 499 }
 500 
 501 HeapWord* ShenandoahHeap::allocate_new_tlab(size_t word_size) {
 502   return allocate_new_tlab(word_size, true);
 503 }
 504 
 505 HeapWord* ShenandoahHeap::allocate_new_gclab(size_t word_size) {
 506   return allocate_new_tlab(word_size, false);
 507 }
 508 
 509 HeapWord* ShenandoahHeap::allocate_new_tlab(size_t word_size, bool mark) {
 510   HeapWord* result = allocate_memory(word_size);
 511 
 512   if (result != NULL) {
 513     if (mark && (_concurrent_mark_in_progress ||
 514                  (shenandoahPolicy()->update_refs_early() && _evacuation_in_progress))) {
 515       // We mark the whole tlab here, this way we avoid marking every single
 516       // allocated object. We mark it from the 2nd word, because the 1st word is always
 517       // the brooks ptr of the first object, and it confuses the fast marked-iterator
 518       // if we mark that.
 519       _next_mark_bit_map->parMarkRange(MemRegion(result + BrooksPointer::BROOKS_POINTER_OBJ_SIZE,
 520                                                  word_size - BrooksPointer::BROOKS_POINTER_OBJ_SIZE));
 521     }
 522     assert(! heap_region_containing(result)->is_in_collection_set(), "Never allocate in dirty region");
 523     _bytesAllocSinceCM += word_size * HeapWordSize;
 524 
 525 #ifdef ASSERT
 526     if (ShenandoahTraceTLabs)
 527       tty->print_cr("allocating new tlab of size "SIZE_FORMAT" at addr "PTR_FORMAT, word_size, p2i(result));
 528 #endif
 529 
 530   }
 531   return result;
 532 }
 533 
 534 ShenandoahHeap* ShenandoahHeap::heap() {
 535   assert(_pgc != NULL, "Unitialized access to ShenandoahHeap::heap()");
 536   assert(_pgc->kind() == CollectedHeap::ShenandoahHeap, "not a shenandoah heap");
 537   return _pgc;
 538 }
 539 
 540 class VM_ShenandoahVerifyHeap: public VM_GC_Operation {
 541 public:
 542   VM_ShenandoahVerifyHeap(unsigned int gc_count_before,
 543                    unsigned int full_gc_count_before,
 544                    GCCause::Cause cause)
 545     : VM_GC_Operation(gc_count_before, cause, full_gc_count_before) { }
 546   virtual VMOp_Type type() const { return VMOp_G1CollectFull; }
 547   virtual void doit() {
 548     if (ShenandoahGCVerbose)
 549       tty->print_cr("verifying heap");
 550      Universe::heap()->ensure_parsability(false);
 551      Universe::verify();
 552   }
 553   virtual const char* name() const {
 554     return "Shenandoah verify trigger";
 555   }
 556 };
 557 
 558 class FindEmptyRegionClosure: public ShenandoahHeapRegionClosure {
 559   ShenandoahHeapRegion* _result;
 560   size_t _required_size;
 561 public:
 562 
 563   FindEmptyRegionClosure(size_t required_size) : _required_size(required_size) {
 564     _result = NULL;
 565   }
 566 
 567   bool doHeapRegion(ShenandoahHeapRegion* r) {
 568     if ((! r->is_in_collection_set()) && r->free() >= _required_size) {
 569       _result = r;
 570       return true;
 571     }
 572     return false;
 573   }
 574   ShenandoahHeapRegion* result() { return _result;}
 575 
 576 };
 577 
 578 HeapWord* ShenandoahHeap::allocate_memory(size_t word_size) {
 579   HeapWord* result = NULL;
 580   result = allocate_memory_with_lock(word_size);
 581 
 582   if (result == NULL && ! Thread::current()->is_evacuating()) { // Allocation failed, try full-GC, then retry allocation.
 583     // tty->print_cr("failed to allocate "SIZE_FORMAT " bytes, free regions:", word_size * HeapWordSize);
 584     // _free_regions->print();
 585     collect(GCCause::_allocation_failure);
 586     result = allocate_memory_with_lock(word_size);
 587   }
 588 
 589   return result;
 590 }
 591 
 592 HeapWord* ShenandoahHeap::allocate_memory_with_lock(size_t word_size) {
 593   return allocate_memory_shenandoah_lock(word_size);
 594 }
 595 
 596 HeapWord* ShenandoahHeap::allocate_memory_heap_lock(size_t word_size) {
 597   ShouldNotReachHere();
 598   MutexLocker ml(Heap_lock);
 599   return allocate_memory_work(word_size);
 600 }
 601 
 602 HeapWord* ShenandoahHeap::allocate_memory_shenandoah_lock(size_t word_size) {
 603   MutexLockerEx ml(ShenandoahHeap_lock, true);
 604   return allocate_memory_work(word_size);
 605 }
 606 
 607 ShenandoahHeapRegion* ShenandoahHeap::check_skip_humongous(ShenandoahHeapRegion* region) const {
 608   while (region != NULL && region->is_humongous()) {
 609     region = _free_regions->get_next();
 610   }
 611   return region;
 612 }
 613 
 614 ShenandoahHeapRegion* ShenandoahHeap::get_next_region_skip_humongous() const {
 615   ShenandoahHeapRegion* next = _free_regions->get_next();
 616   return check_skip_humongous(next);
 617 }
 618 
 619 ShenandoahHeapRegion* ShenandoahHeap::get_current_region_skip_humongous() const {
 620   ShenandoahHeapRegion* current = _free_regions->current();
 621   return check_skip_humongous(current);
 622 }
 623 
 624 
 625 ShenandoahHeapRegion* ShenandoahHeap::check_grow_heap(ShenandoahHeapRegion* current) {
 626   if (current == NULL) {
 627     if (grow_heap_by()) {
 628       current = _free_regions->get_next();
 629       assert(current != NULL, "After successfully growing the heap we should have a region");
 630       assert(! current->is_humongous(), "new region must not be humongous");
 631     } else {
 632       current = NULL; // No more room to make a new region. OOM.
 633     }
 634   }
 635   return current;
 636 }
 637 
 638 ShenandoahHeapRegion* ShenandoahHeap::get_current_region() {
 639   ShenandoahHeapRegion* current = get_current_region_skip_humongous();
 640   return check_grow_heap(current);
 641 }
 642 
 643 ShenandoahHeapRegion* ShenandoahHeap::get_next_region() {
 644   ShenandoahHeapRegion* current = get_next_region_skip_humongous();
 645   return check_grow_heap(current);
 646 }
 647 
 648 
 649 HeapWord* ShenandoahHeap::allocate_memory_work(size_t word_size) {
 650 
 651   if (word_size * HeapWordSize > ShenandoahHeapRegion::RegionSizeBytes) {
 652     assert(! Thread::current()->is_evacuating(), "no humongous allocation for evacuating thread");
 653     return allocate_large_memory(word_size);
 654   }
 655 
 656   ShenandoahHeapRegion* my_current_region = get_current_region();
 657   if (my_current_region == NULL) {
 658     return NULL; // No more room to make a new region. OOM.
 659   }
 660   assert(my_current_region != NULL, "should have a region at this point");
 661 
 662 #ifdef ASSERT
 663   if (my_current_region->is_in_collection_set()) {
 664     print_heap_regions();
 665   }
 666 #endif
 667   assert(! my_current_region->is_in_collection_set(), "never get targetted regions in free-lists");
 668   assert(! my_current_region->is_humongous(), "never attempt to allocate from humongous object regions");
 669 
 670   HeapWord* result;
 671 
 672   result = my_current_region->par_allocate(word_size);
 673   while (result == NULL && my_current_region != NULL) {
 674     // 2nd attempt. Try next region.
 675     size_t remaining = my_current_region->free();
 676     my_current_region = get_next_region();
 677     if (my_current_region == NULL) {
 678       return NULL; // No more room to make a new region. OOM.
 679     }
 680     _free_regions->decrease_available(remaining);
 681     assert(my_current_region != NULL, "should have a region at this point");
 682     assert(! my_current_region->is_in_collection_set(), "never get targetted regions in free-lists");
 683     assert(! my_current_region->is_humongous(), "never attempt to allocate from humongous object regions");
 684     result = my_current_region->par_allocate(word_size);
 685   }
 686 
 687   if (result != NULL) {
 688     my_current_region->increase_live_data(word_size * HeapWordSize);
 689     increase_used(word_size * HeapWordSize);
 690     _free_regions->decrease_available(word_size * HeapWordSize);
 691   }
 692   return result;
 693 }
 694 
 695 HeapWord* ShenandoahHeap::allocate_large_memory(size_t words) {
 696   if (ShenandoahTraceHumongous) {
 697     gclog_or_tty->print_cr("allocating humongous object of size: "SIZE_FORMAT" KB", (words * HeapWordSize) / K);
 698   }
 699 
 700   uint required_regions = ShenandoahHumongous::required_regions(words * HeapWordSize);
 701 
 702   assert(required_regions <= _max_regions, "sanity check");
 703 
 704   HeapWord* result;
 705   ShenandoahHeapRegion* free_regions[required_regions];
 706 
 707   bool success = find_contiguous_free_regions(required_regions, free_regions);
 708   if (! success) {
 709     success = allocate_contiguous_free_regions(required_regions, free_regions);
 710   }
 711   if (! success) {
 712     result = NULL; // Throw OOM, we cannot allocate the huge object.
 713   } else {
 714     // Initialize huge object flags in the regions.
 715     size_t live = words * HeapWordSize;
 716     free_regions[0]->set_humongous_start(true);
 717     free_regions[0]->increase_live_data(live);
 718 
 719     for (uint i = 0; i < required_regions; i++) {
 720       if (i == 0) {
 721         free_regions[0]->set_humongous_start(true);
 722       } else {
 723         free_regions[i]->set_humongous_continuation(true);
 724       }
 725       free_regions[i]->set_top(free_regions[i]->end());
 726       increase_used(ShenandoahHeapRegion::RegionSizeBytes);
 727     }
 728     _free_regions->decrease_available(ShenandoahHeapRegion::RegionSizeBytes * required_regions);
 729     result = free_regions[0]->bottom();
 730   }
 731   return result;
 732 }
 733 
 734 bool ShenandoahHeap::find_contiguous_free_regions(uint num_free_regions, ShenandoahHeapRegion** free_regions) {
 735   if (ShenandoahTraceHumongous) {
 736     gclog_or_tty->print_cr("trying to find "UINT32_FORMAT" contiguous free regions", num_free_regions);
 737   }
 738   uint free_regions_index = 0;
 739   for (uint regions_index = 0; regions_index < _num_regions; regions_index++) {
 740     // Claim a free region.
 741     ShenandoahHeapRegion* region = _ordered_regions[regions_index];
 742     bool free = false;
 743     if (region != NULL) {
 744       if (region->free() == ShenandoahHeapRegion::RegionSizeBytes) {
 745         assert(! region->is_humongous(), "don't reuse occupied humongous regions");
 746         free = true;
 747       }
 748     }
 749     if (! free) {
 750       // Not contiguous, reset search
 751       free_regions_index = 0;
 752       continue;
 753     }
 754     assert(free_regions_index < num_free_regions, "array bounds");
 755     free_regions[free_regions_index] = region;
 756     free_regions_index++;
 757 
 758     if (free_regions_index == num_free_regions) {
 759       if (ShenandoahTraceHumongous) {
 760         gclog_or_tty->print_cr("found "UINT32_FORMAT" contiguous free regions:", num_free_regions);
 761         for (uint i = 0; i < num_free_regions; i++) {
 762           gclog_or_tty->print(UINT32_FORMAT": " , i);
 763           free_regions[i]->print_on(gclog_or_tty);
 764         }
 765       }
 766       return true;
 767     }
 768 
 769   }
 770   if (ShenandoahTraceHumongous) {
 771     gclog_or_tty->print_cr("failed to find "UINT32_FORMAT" free regions", num_free_regions);
 772   }
 773   return false;
 774 }
 775 
 776 bool ShenandoahHeap::allocate_contiguous_free_regions(uint num_free_regions, ShenandoahHeapRegion** free_regions) {
 777   // We need to be smart here to avoid interleaved allocation of regions when concurrently
 778   // allocating for large objects. We get the new index into regions array using CAS, where can
 779   // subsequently safely allocate new regions.
 780   int new_regions_index = ensure_new_regions(num_free_regions);
 781   if (new_regions_index == -1) {
 782     return false;
 783   }
 784 
 785   int last_new_region = new_regions_index + num_free_regions;
 786 
 787   // Now we can allocate new regions at the found index without being scared that
 788   // other threads allocate in the same contiguous region.
 789   if (ShenandoahGCVerbose) {
 790     tty->print_cr("allocate contiguous regions:");
 791   }
 792   for (int i = new_regions_index; i < last_new_region; i++) {
 793     ShenandoahHeapRegion* region = new ShenandoahHeapRegion();
 794     HeapWord* start = _first_region_bottom + (ShenandoahHeapRegion::RegionSizeBytes / HeapWordSize) * i;
 795     region->initialize_heap_region(start, ShenandoahHeapRegion::RegionSizeBytes / HeapWordSize, i);
 796     _ordered_regions[i] = region;
 797     uint index = i - new_regions_index;
 798     assert(index < num_free_regions, "array bounds");
 799     free_regions[index] = region;
 800 
 801     if (ShenandoahGCVerbose) {
 802       region->print();
 803     }
 804   }
 805   return true;
 806 }
 807 
 808 HeapWord* ShenandoahHeap::mem_allocate_locked(size_t size,
 809                                               bool* gc_overhead_limit_was_exceeded) {
 810 
 811   // This was used for allocation while holding the Heap_lock.
 812   // HeapWord* filler = allocate_memory(BrooksPointer::BROOKS_POINTER_OBJ_SIZE + size);
 813 
 814   HeapWord* filler = allocate_memory(BrooksPointer::BROOKS_POINTER_OBJ_SIZE + size);
 815   HeapWord* result = filler + BrooksPointer::BROOKS_POINTER_OBJ_SIZE;
 816   if (filler != NULL) {
 817     initialize_brooks_ptr(filler, result);
 818     _bytesAllocSinceCM += size * HeapWordSize;
 819 #ifdef ASSERT
 820     if (ShenandoahTraceAllocations) {
 821       if (*gc_overhead_limit_was_exceeded)
 822         tty->print("gc_overhead_limit_was_exceeded");
 823       tty->print_cr("mem_allocate_locked object of size "SIZE_FORMAT" uat addr "PTR_FORMAT, size, p2i(result));
 824     }
 825 #endif
 826 
 827     assert(! heap_region_containing(result)->is_in_collection_set(), "never allocate in targetted region");
 828     if (_concurrent_mark_in_progress ||
 829         (shenandoahPolicy()->update_refs_early() && _evacuation_in_progress)) {
 830       mark_current_no_checks(oop(result));
 831     }
 832 
 833     return result;
 834   } else {
 835     tty->print_cr("Out of memory. Requested number of words: "SIZE_FORMAT" used heap: "INT64_FORMAT", bytes allocated since last CM: "INT64_FORMAT, size, used(), _bytesAllocSinceCM);
 836     {
 837       MutexLockerEx ml(ShenandoahHeap_lock, true);
 838       print_heap_regions();
 839       tty->print("Printing "SIZE_FORMAT" free regions:\n", _free_regions->length());
 840       _free_regions->print();
 841     }
 842     assert(false, "Out of memory");
 843     return NULL;
 844   }
 845 }
 846 
 847 class PrintOopContents: public OopClosure {
 848 public:
 849   void do_oop(oop* o) {
 850     oop obj = *o;
 851     tty->print_cr("References oop "PTR_FORMAT, p2i((HeapWord*) obj));
 852     obj->print();
 853   }
 854 
 855   void do_oop(narrowOop* o) {
 856     assert(false, "narrowOops aren't implemented");
 857   }
 858 };
 859 
 860 HeapWord*  ShenandoahHeap::mem_allocate(size_t size,
 861                                         bool*  gc_overhead_limit_was_exceeded) {
 862 
 863 #ifdef ASSERT
 864   if (ShenandoahVerify && _numAllocs > 1000000) {
 865     _numAllocs = 0;
 866   //   VM_ShenandoahVerifyHeap op(0, 0, GCCause::_allocation_failure);
 867   //   if (Thread::current()->is_VM_thread()) {
 868   //     op.doit();
 869   //   } else {
 870   //     // ...and get the VM thread to execute it.
 871   //     VMThread::execute(&op);
 872   //   }
 873   }
 874   _numAllocs++;
 875 #endif
 876 
 877   // MutexLockerEx ml(ShenandoahHeap_lock, true);
 878   HeapWord* result = mem_allocate_locked(size, gc_overhead_limit_was_exceeded);
 879   return result;
 880 }
 881 
 882 class ParallelEvacuateRegionObjectClosure : public ObjectClosure {
 883 private:
 884   ShenandoahHeap* _heap;
 885   Thread* _thread;
 886   public:
 887   ParallelEvacuateRegionObjectClosure(ShenandoahHeap* heap) :
 888     _heap(heap), _thread(Thread::current()) {
 889   }
 890 
 891   void do_object(oop p) {
 892 
 893 #ifdef ASSERT
 894     if (ShenandoahTraceEvacuations) {
 895       tty->print_cr("Calling ParallelEvacuateRegionObjectClosure on "PTR_FORMAT, p2i((HeapWord*) p));
 896     }
 897 #endif
 898 
 899     if (_heap->is_marked_current(p) && p == ShenandoahBarrierSet::resolve_oop_static_not_null(p)) {
 900       _heap->evacuate_object(p, _thread);
 901     }
 902   }
 903 };
 904 
 905 //fixme
 906 void ShenandoahHeap::initialize_brooks_ptr(HeapWord* filler, HeapWord* obj, bool new_obj) {
 907   BrooksPointer brooks_ptr = BrooksPointer::get(oop(obj));
 908   brooks_ptr.set_forwardee(oop(obj));
 909 }
 910 
 911 void ShenandoahHeap::initialize_brooks_ptr(oop p) {
 912   BrooksPointer brooks_ptr = BrooksPointer::get(p);
 913   brooks_ptr.set_forwardee(p);
 914 }
 915 
 916 class VerifyEvacuatedObjectClosure : public ObjectClosure {
 917 
 918 public:
 919 
 920   void do_object(oop p) {
 921     if (ShenandoahHeap::heap()->is_marked_current(p)) {
 922       oop p_prime = oopDesc::bs()->resolve_oop(p);
 923       assert(p != p_prime, "Should point to evacuated copy");
 924 #ifdef ASSERT
 925       if (p->klass() != p_prime->klass()) {
 926         tty->print_cr("copy has different class than original:");
 927         p->klass()->print_on(tty);
 928         p_prime->klass()->print_on(tty);
 929       }
 930 #endif
 931       assert(p->klass() == p_prime->klass(), err_msg("Should have the same class p: "PTR_FORMAT", p_prime: "PTR_FORMAT, p2i((HeapWord*) p), p2i((HeapWord*) p_prime)));
 932       //      assert(p->mark() == p_prime->mark(), "Should have the same mark");
 933       assert(p->size() == p_prime->size(), "Should be the same size");
 934       assert(p_prime == oopDesc::bs()->resolve_oop(p_prime), "One forward once");
 935     }
 936   }
 937 };
 938 
 939 void ShenandoahHeap::verify_evacuated_region(ShenandoahHeapRegion* from_region) {
 940   if (ShenandoahGCVerbose) {
 941     tty->print("Verifying From Region\n");
 942     from_region->print();
 943   }
 944 
 945   VerifyEvacuatedObjectClosure verify_evacuation;
 946   from_region->object_iterate_interruptible(&verify_evacuation, false);
 947 }
 948 
 949 void ShenandoahHeap::parallel_evacuate_region(ShenandoahHeapRegion* from_region) {
 950 
 951   assert(from_region->getLiveData() > 0, "all-garbage regions are reclaimed earlier");
 952 
 953   ParallelEvacuateRegionObjectClosure evacuate_region(this);
 954 
 955 #ifdef ASSERT
 956   if (ShenandoahGCVerbose) {
 957     tty->print_cr("parallel_evacuate_region starting from_region "INT32_FORMAT": free_regions = "SIZE_FORMAT,  from_region->region_number(), _free_regions->available_regions());
 958   }
 959 #endif
 960 
 961   marked_object_iterate(from_region, &evacuate_region);
 962 
 963 #ifdef ASSERT
 964   if (ShenandoahVerify && ! cancelled_concgc()) {
 965     verify_evacuated_region(from_region);
 966   }
 967   if (ShenandoahGCVerbose) {
 968     tty->print_cr("parallel_evacuate_region after from_region = "INT32_FORMAT": free_regions = "SIZE_FORMAT, from_region->region_number(), _free_regions->available_regions());
 969   }
 970 #endif
 971 }
 972 
 973 class ParallelEvacuationTask : public AbstractGangTask {
 974 private:
 975   ShenandoahHeap* _sh;
 976   ShenandoahHeapRegionSet* _cs;
 977 
 978 public:
 979   ParallelEvacuationTask(ShenandoahHeap* sh,
 980                          ShenandoahHeapRegionSet* cs) :
 981     AbstractGangTask("Parallel Evacuation Task"),
 982     _cs(cs),
 983     _sh(sh) {}
 984 
 985   void work(uint worker_id) {
 986 
 987     ShenandoahHeapRegion* from_hr = _cs->claim_next();
 988 
 989     while (from_hr != NULL) {
 990       if (ShenandoahGCVerbose) {
 991         tty->print_cr("Thread "INT32_FORMAT" claimed Heap Region "INT32_FORMAT,
 992                    worker_id,
 993                    from_hr->region_number());
 994         from_hr->print();
 995       }
 996 
 997       assert(from_hr->getLiveData() > 0, "all-garbage regions are reclaimed early");
 998       _sh->parallel_evacuate_region(from_hr);
 999 
1000       if (_sh->cancelled_concgc()) {
1001         if (ShenandoahTracePhases) {
1002           tty->print_cr("Cancelled concurrent evacuation");
1003         }
1004         break;
1005       }
1006       from_hr = _cs->claim_next();
1007     }
1008 
1009     Thread::current()->gclab().make_parsable(true);
1010   }
1011 };
1012 
1013 class RecycleDirtyRegionsClosure: public ShenandoahHeapRegionClosure {
1014 private:
1015   ShenandoahHeap* _heap;
1016   size_t _bytes_reclaimed;
1017 public:
1018   RecycleDirtyRegionsClosure() : _heap(ShenandoahHeap::heap()) {}
1019 
1020   bool doHeapRegion(ShenandoahHeapRegion* r) {
1021 
1022     // If evacuation has been cancelled, we can't recycle regions, we only
1023     // clear their collection-set status.
1024     if (_heap->cancelled_concgc()) {
1025       r->set_is_in_collection_set(false);
1026       return false;
1027     }
1028 
1029     if (r->is_in_collection_set()) {
1030       //      tty->print_cr("recycling region "INT32_FORMAT":", r->region_number());
1031       //      r->print_on(tty);
1032       //      tty->print_cr(" ");
1033       _heap->decrease_used(r->used());
1034       _bytes_reclaimed += r->used();
1035       r->recycle();
1036       _heap->free_regions()->append(r);
1037     }
1038 
1039     return false;
1040   }
1041   size_t bytes_reclaimed() { return _bytes_reclaimed;}
1042   void clear_bytes_reclaimed() {_bytes_reclaimed = 0;}
1043 };
1044 
1045 void ShenandoahHeap::recycle_dirty_regions() {
1046   RecycleDirtyRegionsClosure cl;
1047   cl.clear_bytes_reclaimed();
1048 
1049   heap_region_iterate(&cl);
1050 
1051   _shenandoah_policy->record_bytes_reclaimed(cl.bytes_reclaimed());
1052   clear_cset_fast_test();
1053 }
1054 
1055 ShenandoahHeapRegionSet* ShenandoahHeap::free_regions() {
1056   return _free_regions;
1057 }
1058 
1059 void ShenandoahHeap::print_heap_regions(outputStream* st) const {
1060   PrintHeapRegionsClosure pc1(st);
1061   heap_region_iterate(&pc1);
1062 }
1063 
1064 class PrintAllRefsOopClosure: public ExtendedOopClosure {
1065 private:
1066   int _index;
1067   const char* _prefix;
1068 
1069 public:
1070   PrintAllRefsOopClosure(const char* prefix) : _index(0), _prefix(prefix) {}
1071 
1072   void do_oop(oop* p)       {
1073     oop o = *p;
1074     if (o != NULL) {
1075       if (ShenandoahHeap::heap()->is_in(o) && o->is_oop()) {
1076         tty->print_cr("%s "INT32_FORMAT" ("PTR_FORMAT")-> "PTR_FORMAT" (marked: %s) (%s "PTR_FORMAT")", _prefix, _index, p2i(p), p2i((HeapWord*) o), BOOL_TO_STR(ShenandoahHeap::heap()->is_marked_current(o)), o->klass()->internal_name(), p2i(o->klass()));
1077       } else {
1078         //        tty->print_cr("%s "INT32_FORMAT" ("PTR_FORMAT" dirty: %s) -> "PTR_FORMAT" (not in heap, possibly corrupted or dirty (%s))", _prefix, _index, p2i(p), BOOL_TO_STR(ShenandoahHeap::heap()->heap_region_containing(p)->is_in_collection_set()), p2i((HeapWord*) o), BOOL_TO_STR(ShenandoahHeap::heap()->heap_region_containing(o)->is_in_collection_set()));
1079         tty->print_cr("%s "INT32_FORMAT" ("PTR_FORMAT" dirty -> "PTR_FORMAT" (not in heap, possibly corrupted or dirty)", _prefix, _index, p2i(p), p2i((HeapWord*) o));
1080       }
1081     } else {
1082       tty->print_cr("%s "INT32_FORMAT" ("PTR_FORMAT") -> "PTR_FORMAT, _prefix, _index, p2i(p), p2i((HeapWord*) o));
1083     }
1084     _index++;
1085   }
1086 
1087   void do_oop(narrowOop* p) {
1088     Unimplemented();
1089   }
1090 
1091 };
1092 
1093 class PrintAllRefsObjectClosure : public ObjectClosure {
1094   const char* _prefix;
1095 
1096 public:
1097   PrintAllRefsObjectClosure(const char* prefix) : _prefix(prefix) {}
1098 
1099   void do_object(oop p) {
1100     if (ShenandoahHeap::heap()->is_in(p)) {
1101         tty->print_cr("%s object "PTR_FORMAT" (marked: %s) (%s "PTR_FORMAT") refers to:", _prefix, p2i((HeapWord*) p), BOOL_TO_STR(ShenandoahHeap::heap()->is_marked_current(p)), p->klass()->internal_name(), p2i(p->klass()));
1102         PrintAllRefsOopClosure cl(_prefix);
1103         p->oop_iterate(&cl);
1104       }
1105   }
1106 };
1107 
1108 void ShenandoahHeap::print_all_refs(const char* prefix) {
1109   tty->print_cr("printing all references in the heap");
1110   tty->print_cr("root references:");
1111 
1112   ensure_parsability(false);
1113 
1114   PrintAllRefsOopClosure cl(prefix);
1115   roots_iterate(&cl);
1116 
1117   tty->print_cr("heap references:");
1118   PrintAllRefsObjectClosure cl2(prefix);
1119   object_iterate(&cl2);
1120 }
1121 
1122 class VerifyAfterMarkingOopClosure: public ExtendedOopClosure {
1123 private:
1124   ShenandoahHeap*  _heap;
1125 
1126 public:
1127   VerifyAfterMarkingOopClosure() :
1128     _heap(ShenandoahHeap::heap()) { }
1129 
1130   void do_oop(oop* p)       {
1131     oop o = *p;
1132     if (o != NULL) {
1133       if (! _heap->is_marked_current(o)) {
1134         _heap->print_heap_regions();
1135         _heap->print_all_refs("post-mark");
1136         tty->print_cr("oop not marked, although referrer is marked: "PTR_FORMAT": in_heap: %s, is_marked: %s",
1137                       p2i((HeapWord*) o), BOOL_TO_STR(_heap->is_in(o)), BOOL_TO_STR(_heap->is_marked_current(o)));
1138         _heap->print_heap_locations((HeapWord*) o, (HeapWord*) o + o->size());
1139 
1140         tty->print_cr("oop class: %s", o->klass()->internal_name());
1141         if (_heap->is_in(p)) {
1142           oop referrer = oop(_heap->heap_region_containing(p)->block_start_const(p));
1143           tty->print_cr("Referrer starts at addr "PTR_FORMAT, p2i((HeapWord*) referrer));
1144           referrer->print();
1145           _heap->print_heap_locations((HeapWord*) referrer, (HeapWord*) referrer + referrer->size());
1146         }
1147         tty->print_cr("heap region containing object:");
1148         _heap->heap_region_containing(o)->print();
1149         tty->print_cr("heap region containing referrer:");
1150         _heap->heap_region_containing(p)->print();
1151         tty->print_cr("heap region containing forwardee:");
1152         _heap->heap_region_containing(oopDesc::bs()->resolve_oop(o))->print();
1153       }
1154       assert(o->is_oop(), "oop must be an oop");
1155       assert(Metaspace::contains(o->klass()), "klass pointer must go to metaspace");
1156       if (! (o == oopDesc::bs()->resolve_oop(o))) {
1157         tty->print_cr("oops has forwardee: p: "PTR_FORMAT" (%s), o = "PTR_FORMAT" (%s), new-o: "PTR_FORMAT" (%s)", p2i(p), BOOL_TO_STR(_heap->heap_region_containing(p)->is_in_collection_set()), p2i((HeapWord*) o),  BOOL_TO_STR(_heap->heap_region_containing(o)->is_in_collection_set()), p2i((HeapWord*) oopDesc::bs()->resolve_oop(o)), BOOL_TO_STR(_heap->heap_region_containing(oopDesc::bs()->resolve_oop(o))->is_in_collection_set()));
1158         tty->print_cr("oop class: %s", o->klass()->internal_name());
1159       }
1160       assert(o == oopDesc::bs()->resolve_oop(o), "oops must not be forwarded");
1161       assert(! _heap->heap_region_containing(o)->is_in_collection_set(), "references must not point to dirty heap regions");
1162       assert(_heap->is_marked_current(o), "live oops must be marked current");
1163     }
1164   }
1165 
1166   void do_oop(narrowOop* p) {
1167     Unimplemented();
1168   }
1169 
1170 };
1171 
1172 class IterateMarkedCurrentObjectsClosure: public ObjectClosure {
1173 private:
1174   ShenandoahHeap* _heap;
1175   ExtendedOopClosure* _cl;
1176 public:
1177   IterateMarkedCurrentObjectsClosure(ExtendedOopClosure* cl) :
1178     _heap(ShenandoahHeap::heap()), _cl(cl) {};
1179 
1180   void do_object(oop p) {
1181     if (_heap->is_marked_current(p)) {
1182       p->oop_iterate(_cl);
1183     }
1184   }
1185 
1186 };
1187 
1188 class IterateMarkedObjectsClosure: public ObjectClosure {
1189 private:
1190   ShenandoahHeap* _heap;
1191   ExtendedOopClosure* _cl;
1192 public:
1193   IterateMarkedObjectsClosure(ExtendedOopClosure* cl) :
1194     _heap(ShenandoahHeap::heap()), _cl(cl) {};
1195 
1196   void do_object(oop p) {
1197     if (_heap->is_marked_current(p)) {
1198       p->oop_iterate(_cl);
1199     }
1200   }
1201 
1202 };
1203 
1204 void ShenandoahHeap::verify_heap_after_marking() {
1205 
1206   verify_heap_size_consistency();
1207 
1208   if (ShenandoahGCVerbose) {
1209     tty->print("verifying heap after marking\n");
1210   }
1211   ensure_parsability(false);
1212   VerifyAfterMarkingOopClosure cl;
1213   roots_iterate(&cl);
1214 
1215   IterateMarkedCurrentObjectsClosure marked_oops(&cl);
1216   object_iterate(&marked_oops);
1217 }
1218 
1219 void ShenandoahHeap::prepare_for_concurrent_evacuation() {
1220   if (!cancelled_concgc()) {
1221 
1222     recycle_dirty_regions();
1223 
1224       ensure_parsability(true);
1225 
1226       // NOTE: This needs to be done during a stop the world pause, because
1227       // putting regions into the collection set concurrently with Java threads
1228       // will create a race. In particular, acmp could fail because when we
1229       // resolve the first operand, the containing region might not yet be in
1230       // the collection set, and thus return the original oop. When the 2nd
1231       // operand gets resolved, the region could be in the collection set
1232       // and the oop gets evacuated. If both operands have originally been
1233       // the same, we get false negatives.
1234       ShenandoahHeapRegionSet regions = ShenandoahHeapRegionSet(_num_regions, _ordered_regions, _num_regions);
1235       regions.reclaim_humongous_regions();
1236       _collection_set->clear();
1237       _free_regions->clear();
1238       _shenandoah_policy->choose_collection_and_free_sets(&regions, _collection_set, _free_regions);
1239 
1240       if (PrintGCTimeStamps) {
1241         gclog_or_tty->print("Collection set used = " SIZE_FORMAT " K live = " SIZE_FORMAT " K reclaimable = " SIZE_FORMAT " K\n",
1242                             _collection_set->used() / K, _collection_set->live_data() / K, _collection_set->garbage() / K);
1243       }
1244 
1245       if (_collection_set->length() == 0)
1246         cancel_concgc();
1247 
1248       _bytesAllocSinceCM = 0;
1249 
1250       Universe::update_heap_info_at_gc();
1251     }
1252 }
1253 
1254 
1255 class ShenandoahUpdateRootsClosure: public ExtendedOopClosure {
1256 
1257   void do_oop(oop* p)       {
1258     ShenandoahHeap::heap()->maybe_update_oop_ref(p);
1259   }
1260 
1261   void do_oop(narrowOop* p) {
1262     Unimplemented();
1263   }
1264 };
1265 
1266 void ShenandoahHeap::update_roots() {
1267 
1268   COMPILER2_PRESENT(DerivedPointerTable::clear());
1269 
1270   assert(SafepointSynchronize::is_at_safepoint(), "Only iterate roots while world is stopped");
1271 
1272   ShenandoahUpdateRootsClosure cl;
1273   CodeBlobToOopClosure blobsCl(&cl, false);
1274   CLDToOopClosure cldCl(&cl);
1275 
1276   ClassLoaderDataGraph::clear_claimed_marks();
1277 
1278   {
1279     ShenandoahRootProcessor rp(this, 1);
1280     rp.process_all_roots(&cl, &cldCl, &blobsCl);
1281     ShenandoahIsAliveClosure is_alive;
1282     JNIHandles::weak_oops_do(&is_alive, &cl);
1283   }
1284 
1285   COMPILER2_PRESENT(DerivedPointerTable::update_pointers());
1286 }
1287 
1288 class ShenandoahUpdateObjectsClosure : public ObjectClosure {
1289   ShenandoahHeap* _heap;
1290 
1291 public:
1292   ShenandoahUpdateObjectsClosure() :
1293     _heap(ShenandoahHeap::heap()) {
1294   }
1295 
1296   void do_object(oop p) {
1297     ShenandoahUpdateRootsClosure refs_cl;
1298     assert(ShenandoahHeap::heap()->is_in(p), "only update objects in heap (where else?)");
1299 
1300     if (_heap->is_marked_current(p)) {
1301       p->oop_iterate(&refs_cl);
1302     }
1303   }
1304 
1305 };
1306 
1307 class ParallelUpdateRefsTask : public AbstractGangTask {
1308 private:
1309   ShenandoahHeapRegionSet* _regions;
1310 
1311 public:
1312   ParallelUpdateRefsTask(ShenandoahHeapRegionSet* regions) :
1313     AbstractGangTask("Parallel Update References Task"),
1314   _regions(regions) {
1315   }
1316 
1317   void work(uint worker_id) {
1318     ShenandoahUpdateObjectsClosure update_refs_cl;
1319     ShenandoahHeapRegion* region = _regions->claim_next();
1320     ShenandoahHeap* heap = ShenandoahHeap::heap();
1321     while (region != NULL && ! heap->cancelled_concgc()) {
1322       if ((! region->is_in_collection_set()) && ! region->is_humongous_continuation()) {
1323         heap->marked_object_iterate_careful(region, &update_refs_cl);
1324       }
1325       heap->reset_mark_bitmap_range(region->bottom(), region->end());
1326       region = _regions->claim_next();
1327     }
1328     if (ShenandoahTracePhases && heap->cancelled_concgc()) {
1329       tty->print_cr("Cancelled concurrent update references");
1330     }
1331   }
1332 };
1333 
1334 class RetireTLABClosure : public ThreadClosure {
1335 private:
1336   bool _retire;
1337 
1338 public:
1339   RetireTLABClosure(bool retire) : _retire(retire) {
1340   }
1341 
1342   void do_thread(Thread* thread) {
1343     thread->gclab().make_parsable(_retire);
1344   }
1345 };
1346 
1347 void ShenandoahHeap::ensure_parsability(bool retire_tlabs) {
1348   if (UseTLAB) {
1349   CollectedHeap::ensure_parsability(retire_tlabs);
1350 
1351   RetireTLABClosure cl(retire_tlabs);
1352   for (JavaThread *thread = Threads::first(); thread != NULL; thread = thread->next()) {
1353     cl.do_thread(thread);
1354   }
1355   gc_threads_do(&cl);
1356   }
1357 }
1358 
1359 void ShenandoahHeap::prepare_for_update_references() {
1360   ensure_parsability(true);
1361 
1362   ShenandoahHeapRegionSet regions = ShenandoahHeapRegionSet(_num_regions, _ordered_regions, _num_regions);
1363   regions.set_concurrent_iteration_safe_limits();
1364 
1365   if (ShenandoahVerifyReadsToFromSpace) {
1366     set_from_region_protection(false);
1367 
1368     // We need to update the roots so that they are ok for C2 when returning from the safepoint.
1369     update_roots();
1370 
1371     set_from_region_protection(true);
1372 
1373   } else {
1374     // We need to update the roots so that they are ok for C2 when returning from the safepoint.
1375     update_roots();
1376   }
1377 
1378   set_update_references_in_progress(true);
1379 }
1380 
1381 void ShenandoahHeap::update_references() {
1382 
1383   ShenandoahHeapRegionSet regions = ShenandoahHeapRegionSet(_num_regions, _ordered_regions, _num_regions);
1384   ParallelUpdateRefsTask task = ParallelUpdateRefsTask(&regions);
1385   conc_workers()->set_active_workers(_max_conc_workers);
1386   _shenandoah_policy->record_phase_start(ShenandoahCollectorPolicy::conc_uprefs);
1387   conc_workers()->run_task(&task);
1388   _shenandoah_policy->record_phase_end(ShenandoahCollectorPolicy::conc_uprefs);
1389   conc_workers()->set_active_workers(_max_conc_workers);
1390 
1391   if (! cancelled_concgc()) {
1392     VM_ShenandoahUpdateRootRefs update_roots;
1393     if (ShenandoahConcurrentUpdateRefs) {
1394       VMThread::execute(&update_roots);
1395     } else {
1396       update_roots.doit();
1397     }
1398 
1399     _allocated_last_gc = used() - _used_start_gc;
1400     size_t max_allocated_gc = MAX2(_max_allocated_gc, _allocated_last_gc);
1401     /*
1402       tty->print_cr("prev max_allocated_gc: "SIZE_FORMAT", new max_allocated_gc: "SIZE_FORMAT", allocated_last_gc: "SIZE_FORMAT" diff %f", _max_allocated_gc, max_allocated_gc, _allocated_last_gc, ((double) max_allocated_gc/ (double) _allocated_last_gc));
1403     */
1404     _max_allocated_gc = max_allocated_gc;
1405 
1406     // Update-references completed, no need to update-refs during marking.
1407     set_need_update_refs(false);
1408   }
1409 
1410   Universe::update_heap_info_at_gc();
1411 
1412   set_update_references_in_progress(false);
1413 }
1414 
1415 
1416 class ShenandoahEvacuateUpdateRootsClosure: public ExtendedOopClosure {
1417 private:
1418   ShenandoahHeap* _heap;
1419   Thread* _thread;
1420 public:
1421   ShenandoahEvacuateUpdateRootsClosure() :
1422     _heap(ShenandoahHeap::heap()), _thread(Thread::current()) {
1423   }
1424 
1425   void do_oop(oop* p) {
1426     assert(_heap->is_evacuation_in_progress(), "Only do this when evacuation is in progress");
1427 
1428     oop obj = oopDesc::load_heap_oop(p);
1429     if (obj != NULL && _heap->in_cset_fast_test((HeapWord*) obj)) {
1430       assert(_heap->is_marked_current(obj), err_msg("only evacuate marked objects %d %d", _heap->is_marked_current(obj), _heap->is_marked_current(ShenandoahBarrierSet::resolve_oop_static_not_null(obj))));
1431       oop resolved = ShenandoahBarrierSet::resolve_oop_static_not_null(obj);
1432       if (resolved == obj) {
1433         resolved = _heap->evacuate_object(obj, _thread);
1434       }
1435       oopDesc::store_heap_oop(p, resolved);
1436     }
1437 #ifdef ASSERT
1438     else if (! oopDesc::is_null(obj)) {
1439       // tty->print_cr("not updating root at: "PTR_FORMAT" with object: "PTR_FORMAT", is_in_heap: %s, is_in_cset: %s, is_marked: %s", p2i(p), p2i((HeapWord*) obj), BOOL_TO_STR(_heap->is_in(obj)), BOOL_TO_STR(_heap->in_cset_fast_test(obj)), BOOL_TO_STR(_heap->is_marked_current(obj)));
1440     }
1441 #endif
1442   }
1443 
1444   void do_oop(narrowOop* p) {
1445     Unimplemented();
1446   }
1447 };
1448 
1449 class ShenandoahEvacuateUpdateStrongRootsTask : public AbstractGangTask {
1450   ShenandoahRootProcessor* _rp;
1451 public:
1452 
1453   ShenandoahEvacuateUpdateStrongRootsTask(ShenandoahRootProcessor* rp) :
1454     AbstractGangTask("Shenandoah evacuate and update strong roots"),
1455     _rp(rp)
1456   {
1457     // Nothing else to do.
1458   }
1459 
1460   void work(uint worker_id) {
1461     ShenandoahEvacuateUpdateRootsClosure cl;
1462     CodeBlobToOopClosure blobsCl(&cl, false);
1463     CLDToOopClosure cldCl(&cl);
1464 
1465     _rp->process_all_roots(&cl, &cldCl, &blobsCl);
1466   }
1467 };
1468 
1469 class ShenandoahEvacuateUpdateWeakRootsTask : public AbstractGangTask {
1470 public:
1471 
1472   ShenandoahEvacuateUpdateWeakRootsTask() : AbstractGangTask("Shenandoah evacuate and update weak roots") {
1473     // Nothing else to do.
1474   }
1475 
1476   void work(uint worker_id) {
1477     ShenandoahEvacuateUpdateRootsClosure cl;
1478     ShenandoahIsAliveClosure is_alive;
1479     JNIHandles::weak_oops_do(&is_alive, &cl);
1480 
1481     ShenandoahHeap* heap = ShenandoahHeap::heap();
1482     if (ShenandoahProcessReferences) {
1483       heap->ref_processor()->weak_oops_do(&cl);
1484     }
1485   }
1486 };
1487 
1488 void ShenandoahHeap::evacuate_and_update_roots() {
1489 
1490   COMPILER2_PRESENT(DerivedPointerTable::clear());
1491 
1492   if (ShenandoahVerifyReadsToFromSpace) {
1493     set_from_region_protection(false);
1494   }
1495 
1496   assert(SafepointSynchronize::is_at_safepoint(), "Only iterate roots while world is stopped");
1497   ClassLoaderDataGraph::clear_claimed_marks();
1498 
1499   {
1500     ShenandoahRootProcessor rp(this, _max_parallel_workers);
1501     ShenandoahEvacuateUpdateStrongRootsTask strong_roots_task(&rp);
1502     workers()->set_active_workers(_max_parallel_workers);
1503     workers()->run_task(&strong_roots_task);
1504   }
1505 
1506   // We process weak roots using only 1 worker thread, multi-threaded weak roots
1507   // processing is not implemented yet. We can't use the VMThread itself, because
1508   // we need to grab the Heap_lock.
1509   {
1510     ShenandoahEvacuateUpdateWeakRootsTask weak_roots_task;
1511     workers()->set_active_workers(1);
1512     workers()->run_task(&weak_roots_task);
1513     workers()->set_active_workers(_max_parallel_workers);
1514   }
1515 
1516   if (ShenandoahVerifyReadsToFromSpace) {
1517     set_from_region_protection(true);
1518   }
1519 
1520   COMPILER2_PRESENT(DerivedPointerTable::update_pointers());
1521 
1522 }
1523 
1524 
1525 void ShenandoahHeap::do_evacuation() {
1526   assert(Thread::current()->is_VM_thread() || ShenandoahConcurrentEvacuation, "Only evacuate from VMThread unless we do concurrent evacuation");
1527 
1528   parallel_evacuate();
1529 
1530   if (! ShenandoahConcurrentEvacuation) {
1531     // We need to make sure that after leaving the safepoint, all
1532     // GC roots are up-to-date. This is an assumption built into
1533     // the hotspot compilers, especially C2, that allows it to
1534     // do optimizations like lifting barriers outside of a loop.
1535 
1536     if (ShenandoahVerifyReadsToFromSpace) {
1537       set_from_region_protection(false);
1538 
1539       update_roots();
1540 
1541       set_from_region_protection(true);
1542 
1543     } else {
1544       update_roots();
1545     }
1546   }
1547 
1548   if (ShenandoahVerify && ! cancelled_concgc()) {
1549     VM_ShenandoahVerifyHeapAfterEvacuation verify_after_evacuation;
1550     if (Thread::current()->is_VM_thread()) {
1551       verify_after_evacuation.doit();
1552     } else {
1553       VMThread::execute(&verify_after_evacuation);
1554     }
1555   }
1556 
1557 }
1558 
1559 void ShenandoahHeap::parallel_evacuate() {
1560 
1561   if (! cancelled_concgc()) {
1562     assert(Thread::current()->is_VM_thread() || ShenandoahConcurrentEvacuation, "Only evacuate from VMThread unless we do concurrent evacuation");
1563 
1564     if (ShenandoahGCVerbose) {
1565       tty->print_cr("starting parallel_evacuate");
1566       //    PrintHeapRegionsClosure pc1;
1567       //    heap_region_iterate(&pc1);
1568     }
1569 
1570     _shenandoah_policy->record_phase_start(ShenandoahCollectorPolicy::conc_evac);
1571 
1572     if (ShenandoahGCVerbose) {
1573       tty->print("Printing all available regions");
1574       print_heap_regions();
1575     }
1576 
1577     if (ShenandoahPrintCollectionSet) {
1578       tty->print("Printing collection set which contains "SIZE_FORMAT" regions:\n", _collection_set->length());
1579       _collection_set->print();
1580 
1581       tty->print("Printing free set which contains "SIZE_FORMAT" regions:\n", _free_regions->length());
1582       _free_regions->print();
1583 
1584       //    if (_collection_set->length() == 0)
1585       //      print_heap_regions();
1586     }
1587 
1588     ParallelEvacuationTask evacuationTask = ParallelEvacuationTask(this, _collection_set);
1589 
1590     conc_workers()->set_active_workers(_max_conc_workers);
1591     conc_workers()->run_task(&evacuationTask);
1592     //workers()->set_active_workers(_max_parallel_workers);
1593 
1594     if (ShenandoahGCVerbose) {
1595 
1596       tty->print("Printing postgc collection set which contains "SIZE_FORMAT" regions:\n", _collection_set->available_regions());
1597       _collection_set->print();
1598 
1599       tty->print("Printing postgc free regions which contain "SIZE_FORMAT" free regions:\n", _free_regions->available_regions());
1600       _free_regions->print();
1601 
1602       tty->print_cr("finished parallel_evacuate");
1603       print_heap_regions();
1604 
1605       tty->print_cr("all regions after evacuation:");
1606       print_heap_regions();
1607     }
1608 
1609     _shenandoah_policy->record_phase_end(ShenandoahCollectorPolicy::conc_evac);
1610   }
1611 }
1612 
1613 class VerifyEvacuationClosure: public ExtendedOopClosure {
1614 private:
1615   ShenandoahHeap*  _heap;
1616   ShenandoahHeapRegion* _from_region;
1617 
1618 public:
1619   VerifyEvacuationClosure(ShenandoahHeapRegion* from_region) :
1620     _heap(ShenandoahHeap::heap()), _from_region(from_region) { }
1621 
1622   void do_oop(oop* p)       {
1623     oop heap_oop = oopDesc::load_heap_oop(p);
1624     if (! oopDesc::is_null(heap_oop)) {
1625       guarantee(! _from_region->is_in(heap_oop), err_msg("no references to from-region allowed after evacuation: "PTR_FORMAT, p2i((HeapWord*) heap_oop)));
1626     }
1627   }
1628 
1629   void do_oop(narrowOop* p) {
1630     Unimplemented();
1631   }
1632 
1633 };
1634 
1635 void ShenandoahHeap::roots_iterate(ExtendedOopClosure* cl) {
1636 
1637   assert(SafepointSynchronize::is_at_safepoint(), "Only iterate roots while world is stopped");
1638 
1639   CodeBlobToOopClosure blobsCl(cl, false);
1640   CLDToOopClosure cldCl(cl);
1641 
1642   ClassLoaderDataGraph::clear_claimed_marks();
1643 
1644   ShenandoahRootProcessor rp(this, 1);
1645   rp.process_all_roots(cl, &cldCl, &blobsCl);
1646 }
1647 
1648 void ShenandoahHeap::weak_roots_iterate(ExtendedOopClosure* cl) {
1649   if (ShenandoahProcessReferences) {
1650     ref_processor()->weak_oops_do(cl);
1651   }
1652   ShenandoahAlwaysTrueClosure always_true;
1653   JNIHandles::weak_oops_do(&always_true, cl);
1654 }
1655 
1656 void ShenandoahHeap::verify_evacuation(ShenandoahHeapRegion* from_region) {
1657 
1658   VerifyEvacuationClosure rootsCl(from_region);
1659   roots_iterate(&rootsCl);
1660 
1661 }
1662 
1663 bool ShenandoahHeap::supports_tlab_allocation() const {
1664   return true;
1665 }
1666 
1667 
1668 size_t  ShenandoahHeap::unsafe_max_tlab_alloc(Thread *thread) const {
1669   ShenandoahHeapRegion* current = get_current_region_skip_humongous();
1670   if (current == NULL)
1671     return 0;
1672   else if (current->free() > MinTLABSize) {
1673     return current->free();
1674   } else {
1675     return MinTLABSize;
1676   }
1677 }
1678 
1679 size_t ShenandoahHeap::max_tlab_size() const {
1680   return ShenandoahHeapRegion::RegionSizeBytes;
1681 }
1682 
1683 class ResizeGCLABClosure : public ThreadClosure {
1684 public:
1685   void do_thread(Thread* thread) {
1686     thread->gclab().resize();
1687   }
1688 };
1689 
1690 void ShenandoahHeap::resize_all_tlabs() {
1691   CollectedHeap::resize_all_tlabs();
1692 
1693   if (PrintTLAB && Verbose) {
1694     tty->print_cr("Resizing Shenandoah GCLABs...");
1695   }
1696 
1697   ResizeGCLABClosure cl;
1698   for (JavaThread *thread = Threads::first(); thread != NULL; thread = thread->next()) {
1699     cl.do_thread(thread);
1700   }
1701   gc_threads_do(&cl);
1702 
1703   if (PrintTLAB && Verbose) {
1704     tty->print_cr("Done resizing Shenandoah GCLABs...");
1705   }
1706 }
1707 
1708 class AccumulateStatisticsGCLABClosure : public ThreadClosure {
1709 public:
1710   void do_thread(Thread* thread) {
1711     thread->gclab().accumulate_statistics();
1712     thread->gclab().initialize_statistics();
1713   }
1714 };
1715 
1716 void ShenandoahHeap::accumulate_statistics_all_gclabs() {
1717 
1718   AccumulateStatisticsGCLABClosure cl;
1719   for (JavaThread *thread = Threads::first(); thread != NULL; thread = thread->next()) {
1720     cl.do_thread(thread);
1721   }
1722   gc_threads_do(&cl);
1723 }
1724 
1725 bool  ShenandoahHeap::can_elide_tlab_store_barriers() const {
1726   return true;
1727 }
1728 
1729 oop ShenandoahHeap::new_store_pre_barrier(JavaThread* thread, oop new_obj) {
1730   // Overridden to do nothing.
1731   return new_obj;
1732 }
1733 
1734 bool  ShenandoahHeap::can_elide_initializing_store_barrier(oop new_obj) {
1735   return true;
1736 }
1737 
1738 bool ShenandoahHeap::card_mark_must_follow_store() const {
1739   return false;
1740 }
1741 
1742 bool ShenandoahHeap::supports_heap_inspection() const {
1743   return false;
1744 }
1745 
1746 size_t ShenandoahHeap::unsafe_max_alloc() {
1747   return ShenandoahHeapRegion::RegionSizeBytes / HeapWordSize;
1748 }
1749 
1750 void ShenandoahHeap::collect(GCCause::Cause cause) {
1751   if (GCCause::is_user_requested_gc(cause)) {
1752     if (! DisableExplicitGC) {
1753       if (ShenandoahTraceFullGC) {
1754         gclog_or_tty->print_cr("Shenandoah-full-gc: requested full GC");
1755       }
1756       cancel_concgc();
1757       _concurrent_gc_thread->do_full_gc(cause);
1758     }
1759   } else if (cause == GCCause::_allocation_failure) {
1760 
1761     if (ShenandoahTraceFullGC) {
1762       gclog_or_tty->print_cr("Shenandoah-full-gc: full GC for allocation failure heap free: "SIZE_FORMAT", available: "SIZE_FORMAT, capacity() - used(), free_regions()->available());
1763     }
1764     cancel_concgc();
1765     collector_policy()->set_should_clear_all_soft_refs(true);
1766       _concurrent_gc_thread->do_full_gc(cause);
1767 
1768   } else if (cause == GCCause::_gc_locker) {
1769 
1770     if (ShenandoahTraceJNICritical) {
1771       gclog_or_tty->print_cr("Resuming deferred evacuation after JNI critical regions");
1772     }
1773 
1774     jni_critical()->notify_jni_critical();
1775   }
1776 }
1777 
1778 void ShenandoahHeap::do_full_collection(bool clear_all_soft_refs) {
1779   //assert(false, "Shouldn't need to do full collections");
1780 }
1781 
1782 AdaptiveSizePolicy* ShenandoahHeap::size_policy() {
1783   Unimplemented();
1784   return NULL;
1785 
1786 }
1787 
1788 ShenandoahCollectorPolicy* ShenandoahHeap::collector_policy() const {
1789   return _shenandoah_policy;
1790 }
1791 
1792 
1793 HeapWord* ShenandoahHeap::block_start(const void* addr) const {
1794   Space* sp = space_containing(addr);
1795   if (sp != NULL) {
1796     return sp->block_start(addr);
1797   }
1798   return NULL;
1799 }
1800 
1801 size_t ShenandoahHeap::block_size(const HeapWord* addr) const {
1802   Space* sp = space_containing(addr);
1803   assert(sp != NULL, "block_size of address outside of heap");
1804   return sp->block_size(addr);
1805 }
1806 
1807 bool ShenandoahHeap::block_is_obj(const HeapWord* addr) const {
1808   Space* sp = space_containing(addr);
1809   return sp->block_is_obj(addr);
1810 }
1811 
1812 jlong ShenandoahHeap::millis_since_last_gc() {
1813   return 0;
1814 }
1815 
1816 void ShenandoahHeap::prepare_for_verify() {
1817   if (SafepointSynchronize::is_at_safepoint() || ! UseTLAB) {
1818     ensure_parsability(false);
1819   }
1820 }
1821 
1822 void ShenandoahHeap::print_gc_threads_on(outputStream* st) const {
1823   workers()->print_worker_threads_on(st);
1824   conc_workers()->print_worker_threads_on(st);
1825 }
1826 
1827 void ShenandoahHeap::gc_threads_do(ThreadClosure* tcl) const {
1828   workers()->threads_do(tcl);
1829   conc_workers()->threads_do(tcl);
1830 }
1831 
1832 void ShenandoahHeap::print_tracing_info() const {
1833   if (PrintGCDetails) {
1834     _shenandoah_policy->print_tracing_info();
1835   }
1836 }
1837 
1838 class ShenandoahVerifyRootsClosure: public ExtendedOopClosure {
1839 private:
1840   ShenandoahHeap*  _heap;
1841   VerifyOption     _vo;
1842   bool             _failures;
1843 public:
1844   // _vo == UsePrevMarking -> use "prev" marking information,
1845   // _vo == UseNextMarking -> use "next" marking information,
1846   // _vo == UseMarkWord    -> use mark word from object header.
1847   ShenandoahVerifyRootsClosure(VerifyOption vo) :
1848     _heap(ShenandoahHeap::heap()),
1849     _vo(vo),
1850     _failures(false) { }
1851 
1852   bool failures() { return _failures; }
1853 
1854   void do_oop(oop* p)       {
1855     if (*p != NULL) {
1856       oop heap_oop = oopDesc::load_heap_oop(p);
1857       oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
1858       if (!obj->is_oop()) {
1859         { // Just for debugging.
1860           gclog_or_tty->print_cr("Root location "PTR_FORMAT
1861                                  "verified "PTR_FORMAT, p2i(p), p2i((void*) obj));
1862           //      obj->print_on(gclog_or_tty);
1863         }
1864       }
1865       guarantee(obj->is_oop(), "is_oop");
1866     }
1867   }
1868 
1869   void do_oop(narrowOop* p) {
1870     Unimplemented();
1871   }
1872 
1873 };
1874 
1875 class ShenandoahVerifyHeapClosure: public ObjectClosure {
1876 private:
1877   ShenandoahVerifyRootsClosure _rootsCl;
1878 public:
1879   ShenandoahVerifyHeapClosure(ShenandoahVerifyRootsClosure rc) :
1880     _rootsCl(rc) {};
1881 
1882   void do_object(oop p) {
1883     _rootsCl.do_oop(&p);
1884   }
1885 };
1886 
1887 class ShenandoahVerifyKlassClosure: public KlassClosure {
1888   OopClosure *_oop_closure;
1889  public:
1890   ShenandoahVerifyKlassClosure(OopClosure* cl) : _oop_closure(cl) {}
1891   void do_klass(Klass* k) {
1892     k->oops_do(_oop_closure);
1893   }
1894 };
1895 
1896 void ShenandoahHeap::verify(bool silent , VerifyOption vo) {
1897   if (SafepointSynchronize::is_at_safepoint() || ! UseTLAB) {
1898 
1899     ShenandoahVerifyRootsClosure rootsCl(vo);
1900 
1901     assert(Thread::current()->is_VM_thread(),
1902            "Expected to be executed serially by the VM thread at this point");
1903 
1904     roots_iterate(&rootsCl);
1905 
1906     bool failures = rootsCl.failures();
1907     if (ShenandoahGCVerbose)
1908       gclog_or_tty->print("verify failures: %s", BOOL_TO_STR(failures));
1909 
1910     ShenandoahVerifyHeapClosure heapCl(rootsCl);
1911 
1912     object_iterate(&heapCl);
1913     // TODO: Implement rest of it.
1914 #ifdef ASSERT_DISABLED
1915     verify_live();
1916 #endif
1917   } else {
1918     if (!silent) gclog_or_tty->print("(SKIPPING roots, heapRegions, remset) ");
1919   }
1920 }
1921 size_t ShenandoahHeap::tlab_capacity(Thread *thr) const {
1922   return _free_regions->available();
1923 }
1924 
1925 class ShenandoahIterateObjectClosureRegionClosure: public ShenandoahHeapRegionClosure {
1926   ObjectClosure* _cl;
1927 public:
1928   ShenandoahIterateObjectClosureRegionClosure(ObjectClosure* cl) : _cl(cl) {}
1929   bool doHeapRegion(ShenandoahHeapRegion* r) {
1930     r->object_iterate_interruptible(_cl, false);
1931     return false;
1932   }
1933 };
1934 
1935 class ShenandoahIterateUpdateClosure: public ShenandoahHeapRegionClosure {
1936   ObjectClosure* _cl;
1937 public:
1938   ShenandoahIterateUpdateClosure(ObjectClosure *cl) : _cl(cl) {}
1939   bool doHeapRegion(ShenandoahHeapRegion* r) {
1940     if ((! r->is_in_collection_set()) && !r->is_humongous_continuation()) {
1941       r->object_iterate_interruptible(_cl, false);
1942     }
1943     return false;
1944   }
1945 };
1946 
1947 void ShenandoahHeap::cleanup_after_cancelconcgc() {
1948   if (need_update_refs()) {
1949   ShenandoahUpdateObjectsClosure update_refs_cl;
1950   ShenandoahIterateUpdateClosure blk(&update_refs_cl);
1951   heap_region_iterate(&blk, false, false);
1952   }
1953 }
1954 
1955 class ShenandoahIterateObjectClosureCarefulRegionClosure: public ShenandoahHeapRegionClosure {
1956   ObjectClosureCareful* _cl;
1957 public:
1958   ShenandoahIterateObjectClosureCarefulRegionClosure(ObjectClosureCareful* cl) : _cl(cl) {}
1959   bool doHeapRegion(ShenandoahHeapRegion* r) {
1960     r->object_iterate_careful(_cl);
1961     return false;
1962   }
1963 };
1964 
1965 void ShenandoahHeap::object_iterate(ObjectClosure* cl) {
1966   ShenandoahIterateObjectClosureRegionClosure blk(cl);
1967   heap_region_iterate(&blk, false, true);
1968 }
1969 
1970 void ShenandoahHeap::object_iterate_careful(ObjectClosureCareful* cl) {
1971   ShenandoahIterateObjectClosureCarefulRegionClosure blk(cl);
1972   heap_region_iterate(&blk, false, true);
1973 }
1974 
1975 void ShenandoahHeap::safe_object_iterate(ObjectClosure* cl) {
1976   Unimplemented();
1977 }
1978 
1979 void ShenandoahHeap::marked_object_iterate(ShenandoahHeapRegion* region, ObjectClosure* cl) {
1980   marked_object_iterate(region, cl, region->bottom(), region->top());
1981 }
1982 
1983 void ShenandoahHeap::marked_object_iterate_careful(ShenandoahHeapRegion* region, ObjectClosure* cl) {
1984   marked_object_iterate(region, cl, region->bottom(), region->concurrent_iteration_safe_limit());
1985 }
1986 
1987 void ShenandoahHeap::marked_object_iterate(ShenandoahHeapRegion* region, ObjectClosure* cl,
1988                                            HeapWord* addr, HeapWord* limit) {
1989   addr += BrooksPointer::BROOKS_POINTER_OBJ_SIZE;
1990   HeapWord* last_addr = NULL;
1991   size_t last_size = 0;
1992   while (addr < limit) {
1993     addr = _next_mark_bit_map->getNextMarkedWordAddress(addr, limit);
1994     if (addr < limit) {
1995       oop obj = oop(addr);
1996       assert(is_marked_current(obj), "object expected to be marked");
1997       cl->do_object(obj);
1998       last_addr = addr;
1999       last_size = obj->size();
2000       addr += obj->size() + BrooksPointer::BROOKS_POINTER_OBJ_SIZE;
2001     } else {
2002       break;
2003     }
2004   }
2005 }
2006 
2007 class ShenandoahIterateOopClosureRegionClosure : public ShenandoahHeapRegionClosure {
2008   MemRegion _mr;
2009   ExtendedOopClosure* _cl;
2010   bool _skip_unreachable_objects;
2011 public:
2012   ShenandoahIterateOopClosureRegionClosure(ExtendedOopClosure* cl, bool skip_unreachable_objects) :
2013     _cl(cl), _skip_unreachable_objects(skip_unreachable_objects) {}
2014   ShenandoahIterateOopClosureRegionClosure(MemRegion mr, ExtendedOopClosure* cl)
2015     :_mr(mr), _cl(cl) {}
2016   bool doHeapRegion(ShenandoahHeapRegion* r) {
2017     r->oop_iterate_skip_unreachable(_cl, _skip_unreachable_objects);
2018     return false;
2019   }
2020 };
2021 
2022 void ShenandoahHeap::oop_iterate(ExtendedOopClosure* cl, bool skip_dirty_regions, bool skip_unreachable_objects) {
2023   ShenandoahIterateOopClosureRegionClosure blk(cl, skip_unreachable_objects);
2024   heap_region_iterate(&blk, skip_dirty_regions, true);
2025 }
2026 
2027 void ShenandoahHeap::oop_iterate(MemRegion mr,
2028                                  ExtendedOopClosure* cl) {
2029   ShenandoahIterateOopClosureRegionClosure blk(mr, cl);
2030   heap_region_iterate(&blk, false, true);
2031 }
2032 
2033 void  ShenandoahHeap::object_iterate_since_last_GC(ObjectClosure* cl) {
2034   Unimplemented();
2035 }
2036 
2037 class SpaceClosureRegionClosure: public ShenandoahHeapRegionClosure {
2038   SpaceClosure* _cl;
2039 public:
2040   SpaceClosureRegionClosure(SpaceClosure* cl) : _cl(cl) {}
2041   bool doHeapRegion(ShenandoahHeapRegion* r) {
2042     _cl->do_space(r);
2043     return false;
2044   }
2045 };
2046 
2047 void  ShenandoahHeap::space_iterate(SpaceClosure* cl) {
2048   SpaceClosureRegionClosure blk(cl);
2049   heap_region_iterate(&blk);
2050 }
2051 
2052 ShenandoahHeapRegion*
2053 ShenandoahHeap::heap_region_containing(const void* addr) const {
2054   uint index = heap_region_index_containing(addr);
2055   ShenandoahHeapRegion* result = _ordered_regions[index];
2056 #ifdef ASSERT
2057   if (!(addr >= result->bottom() && addr < result->end())) {
2058     tty->print_cr("heap region does not contain address, first_region_bottom: "PTR_FORMAT", real bottom of first region: "PTR_FORMAT", num_regions: "SIZE_FORMAT, p2i(_first_region_bottom), p2i(_ordered_regions[0]->bottom()), _num_regions);
2059   }
2060 #endif
2061   assert(addr >= result->bottom() && addr < result->end(), "address must be in found region");
2062   return result;
2063 }
2064 
2065 Space*  ShenandoahHeap::space_containing(const void* oop) const {
2066   Space* res = heap_region_containing(oop);
2067   return res;
2068 }
2069 
2070 void  ShenandoahHeap::gc_prologue(bool b) {
2071   Unimplemented();
2072 }
2073 
2074 void  ShenandoahHeap::gc_epilogue(bool b) {
2075   Unimplemented();
2076 }
2077 
2078 // Apply blk->doHeapRegion() on all committed regions in address order,
2079 // terminating the iteration early if doHeapRegion() returns true.
2080 void ShenandoahHeap::heap_region_iterate(ShenandoahHeapRegionClosure* blk, bool skip_dirty_regions, bool skip_humongous_continuation) const {
2081   for (size_t i = 0; i < _num_regions; i++) {
2082     ShenandoahHeapRegion* current  = _ordered_regions[i];
2083     if (skip_humongous_continuation && current->is_humongous_continuation()) {
2084       continue;
2085     }
2086     if (skip_dirty_regions && current->is_in_collection_set()) {
2087       continue;
2088     }
2089     if (blk->doHeapRegion(current)) {
2090       return;
2091     }
2092   }
2093 }
2094 
2095 /**
2096  * Maybe we need that at some point...
2097 oop* ShenandoahHeap::resolve_oop_ptr(oop* p) {
2098   if (is_in(p) && heap_region_containing(p)->is_dirty()) {
2099     // If the reference is in an object in from-space, we need to first
2100     // find its to-space counterpart.
2101     // TODO: This here is slow (linear search inside region). Make it faster.
2102     oop from_space_oop = oop_containing_oop_ptr(p);
2103     HeapWord* to_space_obj = (HeapWord*) oopDesc::bs()->resolve_oop(from_space_oop);
2104     return (oop*) (to_space_obj + ((HeapWord*) p - ((HeapWord*) from_space_oop)));
2105   } else {
2106     return p;
2107   }
2108 }
2109 
2110 oop ShenandoahHeap::oop_containing_oop_ptr(oop* p) {
2111   HeapWord* from_space_ref = (HeapWord*) p;
2112   ShenandoahHeapRegion* region = heap_region_containing(from_space_ref);
2113   HeapWord* from_space_obj = NULL;
2114   for (HeapWord* curr = region->bottom(); curr < from_space_ref; ) {
2115     oop curr_obj = (oop) curr;
2116     if (curr < from_space_ref && from_space_ref < (curr + curr_obj->size())) {
2117       from_space_obj = curr;
2118       break;
2119     } else {
2120       curr += curr_obj->size();
2121     }
2122   }
2123   assert (from_space_obj != NULL, "must not happen");
2124   oop from_space_oop = (oop) from_space_obj;
2125   assert (from_space_oop->is_oop(), "must be oop");
2126   assert(ShenandoahBarrierSet::is_brooks_ptr(oop(((HeapWord*) from_space_oop) - BrooksPointer::BROOKS_POINTER_OBJ_SIZE)), "oop must have a brooks ptr");
2127   return from_space_oop;
2128 }
2129  */
2130 
2131 class ClearLivenessClosure : public ShenandoahHeapRegionClosure {
2132   ShenandoahHeap* sh;
2133 public:
2134   ClearLivenessClosure(ShenandoahHeap* heap) : sh(heap) { }
2135 
2136   bool doHeapRegion(ShenandoahHeapRegion* r) {
2137     r->clearLiveData();
2138     return false;
2139   }
2140 };
2141 
2142 
2143 void ShenandoahHeap::start_concurrent_marking() {
2144 
2145   accumulate_statistics_all_tlabs();
2146 
2147   set_concurrent_mark_in_progress(true);
2148   // We need to reset all TLABs because we'd lose marks on all objects allocated in them.
2149   if (UseTLAB) {
2150     ensure_parsability(true);
2151   }
2152 
2153   _shenandoah_policy->record_bytes_allocated(_bytesAllocSinceCM);
2154   _used_start_gc = used();
2155 
2156 #ifdef ASSERT
2157   if (ShenandoahDumpHeapBeforeConcurrentMark) {
2158     ensure_parsability(false);
2159     print_all_refs("pre-mark");
2160   }
2161 #endif
2162 
2163   ClearLivenessClosure clc(this);
2164   heap_region_iterate(&clc);
2165 
2166   // print_all_refs("pre -mark");
2167 
2168   // oopDesc::_debug = true;
2169 
2170   concurrentMark()->prepare_unmarked_root_objs();
2171 
2172   //  print_all_refs("pre-mark2");
2173 }
2174 
2175 
2176 class VerifyLivenessClosure : public ExtendedOopClosure {
2177 
2178   ShenandoahHeap* _sh;
2179 
2180 public:
2181   VerifyLivenessClosure() : _sh ( ShenandoahHeap::heap() ) {}
2182 
2183   template<class T> void do_oop_nv(T* p) {
2184     T heap_oop = oopDesc::load_heap_oop(p);
2185     if (!oopDesc::is_null(heap_oop)) {
2186       oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
2187       guarantee(_sh->heap_region_containing(obj)->is_in_collection_set() == (obj != oopDesc::bs()->resolve_oop(obj)),
2188                 err_msg("forwarded objects can only exist in dirty (from-space) regions is_dirty: %s, is_forwarded: %s",
2189                         BOOL_TO_STR(_sh->heap_region_containing(obj)->is_in_collection_set()),
2190                         BOOL_TO_STR(obj != oopDesc::bs()->resolve_oop(obj)))
2191                 );
2192       obj = oopDesc::bs()->resolve_oop(obj);
2193       guarantee(! _sh->heap_region_containing(obj)->is_in_collection_set(), "forwarded oops must not point to dirty regions");
2194       guarantee(obj->is_oop(), "is_oop");
2195       ShenandoahHeap* sh = (ShenandoahHeap*) Universe::heap();
2196       if (! sh->is_marked_current(obj)) {
2197         sh->print_on(tty);
2198       }
2199       assert(sh->is_marked_current(obj), err_msg("Referenced Objects should be marked obj: "PTR_FORMAT", marked: %s, is_in_heap: %s",
2200                                                p2i((HeapWord*) obj), BOOL_TO_STR(sh->is_marked_current(obj)), BOOL_TO_STR(sh->is_in(obj))));
2201     }
2202   }
2203 
2204   void do_oop(oop* p)       { do_oop_nv(p); }
2205   void do_oop(narrowOop* p) { do_oop_nv(p); }
2206 
2207 };
2208 
2209 void ShenandoahHeap::verify_live() {
2210 
2211   VerifyLivenessClosure cl;
2212   roots_iterate(&cl);
2213 
2214   IterateMarkedObjectsClosure marked_oops(&cl);
2215   object_iterate(&marked_oops);
2216 
2217 }
2218 
2219 class VerifyAfterEvacuationClosure : public ExtendedOopClosure {
2220 
2221   ShenandoahHeap* _sh;
2222 
2223 public:
2224   VerifyAfterEvacuationClosure() : _sh ( ShenandoahHeap::heap() ) {}
2225 
2226   template<class T> void do_oop_nv(T* p) {
2227     T heap_oop = oopDesc::load_heap_oop(p);
2228     if (!oopDesc::is_null(heap_oop)) {
2229       oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
2230       guarantee(_sh->heap_region_containing(obj)->is_in_collection_set() == (obj != oopDesc::bs()->resolve_oop(obj)),
2231                 err_msg("forwarded objects can only exist in dirty (from-space) regions is_dirty: %s, is_forwarded: %s obj-klass: %s, marked: %s",
2232                         BOOL_TO_STR(_sh->heap_region_containing(obj)->is_in_collection_set()),
2233                         BOOL_TO_STR(obj != oopDesc::bs()->resolve_oop(obj)), obj->klass()->external_name(), BOOL_TO_STR(_sh->is_marked_current(obj)))
2234                 );
2235       obj = oopDesc::bs()->resolve_oop(obj);
2236       guarantee(! _sh->heap_region_containing(obj)->is_in_collection_set(), "forwarded oops must not point to dirty regions");
2237       guarantee(obj->is_oop(), "is_oop");
2238       guarantee(Metaspace::contains(obj->klass()), "klass pointer must go to metaspace");
2239     }
2240   }
2241 
2242   void do_oop(oop* p)       { do_oop_nv(p); }
2243   void do_oop(narrowOop* p) { do_oop_nv(p); }
2244 
2245 };
2246 
2247 class VerifyAfterUpdateRefsClosure : public ExtendedOopClosure {
2248 
2249   ShenandoahHeap* _sh;
2250 
2251 public:
2252   VerifyAfterUpdateRefsClosure() : _sh ( ShenandoahHeap::heap() ) {}
2253 
2254   template<class T> void do_oop_nv(T* p) {
2255     T heap_oop = oopDesc::load_heap_oop(p);
2256     if (!oopDesc::is_null(heap_oop)) {
2257       oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
2258       guarantee((! _sh->heap_region_containing(obj)->is_in_collection_set()),
2259                 err_msg("no live reference must point to from-space, is_marked: %s",
2260                         BOOL_TO_STR(_sh->is_marked_current(obj))));
2261       if (obj != oopDesc::bs()->resolve_oop(obj) && _sh->is_in(p)) {
2262         tty->print_cr("top-limit: "PTR_FORMAT", p: "PTR_FORMAT, p2i(_sh->heap_region_containing(p)->concurrent_iteration_safe_limit()), p2i(p));
2263       }
2264       guarantee(obj == oopDesc::bs()->resolve_oop(obj), "no live reference must point to forwarded object");
2265       guarantee(obj->is_oop(), "is_oop");
2266       guarantee(Metaspace::contains(obj->klass()), "klass pointer must go to metaspace");
2267     }
2268   }
2269 
2270   void do_oop(oop* p)       { do_oop_nv(p); }
2271   void do_oop(narrowOop* p) { do_oop_nv(p); }
2272 
2273 };
2274 
2275 void ShenandoahHeap::verify_heap_after_evacuation() {
2276 
2277   verify_heap_size_consistency();
2278 
2279   ensure_parsability(false);
2280 
2281   VerifyAfterEvacuationClosure cl;
2282   roots_iterate(&cl);
2283 
2284   IterateMarkedCurrentObjectsClosure marked_oops(&cl);
2285   object_iterate(&marked_oops);
2286 
2287 }
2288 
2289 class VerifyRegionsAfterUpdateRefsClosure : public ShenandoahHeapRegionClosure {
2290 public:
2291   bool doHeapRegion(ShenandoahHeapRegion* r) {
2292     assert(! r->is_in_collection_set(), "no region must be in collection set");
2293     assert(! ShenandoahHeap::heap()->in_cset_fast_test(r->bottom()), "no region must be in collection set");
2294     return false;
2295   }
2296 };
2297 
2298 void ShenandoahHeap::verify_regions_after_update_refs() {
2299   VerifyRegionsAfterUpdateRefsClosure verify_regions;
2300   heap_region_iterate(&verify_regions);
2301 }
2302 
2303 void ShenandoahHeap::verify_heap_after_update_refs() {
2304 
2305   verify_heap_size_consistency();
2306 
2307   ensure_parsability(false);
2308 
2309   VerifyAfterUpdateRefsClosure cl;
2310 
2311   roots_iterate(&cl);
2312   weak_roots_iterate(&cl);
2313   oop_iterate(&cl, true, true);
2314 
2315 }
2316 
2317 void ShenandoahHeap::stop_concurrent_marking() {
2318   assert(concurrent_mark_in_progress(), "How else could we get here?");
2319   if (! cancelled_concgc()) {
2320     // If we needed to update refs, and concurrent marking has been cancelled,
2321     // we need to finish updating references.
2322     set_need_update_refs(false);
2323   }
2324   set_concurrent_mark_in_progress(false);
2325 
2326   if (ShenandoahGCVerbose) {
2327     print_heap_regions();
2328   }
2329 
2330 #ifdef ASSERT
2331   if (ShenandoahVerify && ! _cancelled_concgc) {
2332     verify_heap_after_marking();
2333   }
2334 
2335 #endif
2336 }
2337 
2338 bool ShenandoahHeap::concurrent_mark_in_progress() {
2339   return _concurrent_mark_in_progress;
2340 }
2341 
2342 void ShenandoahHeap::set_concurrent_mark_in_progress(bool in_progress) {
2343   if (ShenandoahTracePhases) {
2344     if (in_progress) {
2345       gclog_or_tty->print_cr("Shenandoah starting concurrent marking, heap used: "SIZE_FORMAT" MB", used() / M);
2346     } else {
2347       gclog_or_tty->print_cr("Shenandoah finishing concurrent marking, heap used: "SIZE_FORMAT" MB", used() / M);
2348     }
2349   }
2350 
2351   _concurrent_mark_in_progress = in_progress;
2352   JavaThread::satb_mark_queue_set().set_active_all_threads(in_progress, ! in_progress);
2353 }
2354 
2355 void ShenandoahHeap::set_evacuation_in_progress(bool in_progress) {
2356   if (ShenandoahTracePhases) {
2357     if (ShenandoahConcurrentEvacuation) {
2358       if (in_progress) {
2359         gclog_or_tty->print_cr("Shenandoah starting concurrent evacuation, heap used: "SIZE_FORMAT" MB", used() / M);
2360       } else {
2361         gclog_or_tty->print_cr("Shenandoah finishing concurrent evacuation, heap used: "SIZE_FORMAT" MB", used() / M);
2362       }
2363     } else {
2364       if (in_progress) {
2365         gclog_or_tty->print_cr("Shenandoah starting non-concurrent evacuation");
2366       } else {
2367         gclog_or_tty->print_cr("Shenandoah finishing non-concurrent evacuation");
2368       }
2369     }
2370   }
2371   JavaThread::set_evacuation_in_progress_all_threads(in_progress);
2372   _evacuation_in_progress = in_progress;
2373   OrderAccess::fence();
2374 }
2375 
2376 bool ShenandoahHeap::is_evacuation_in_progress() {
2377   return _evacuation_in_progress;
2378 }
2379 
2380 bool ShenandoahHeap::is_update_references_in_progress() {
2381   return _update_references_in_progress;
2382 }
2383 
2384 void ShenandoahHeap::set_update_references_in_progress(bool update_refs_in_progress) {
2385   if (ShenandoahTracePhases) {
2386     if (ShenandoahConcurrentUpdateRefs) {
2387       if (update_refs_in_progress) {
2388         gclog_or_tty->print_cr("Shenandoah starting concurrent reference-updating");
2389       } else {
2390         gclog_or_tty->print_cr("Shenandoah finishing concurrent reference-updating");
2391       }
2392     } else {
2393       if (update_refs_in_progress) {
2394         gclog_or_tty->print_cr("Shenandoah starting non-concurrent reference-updating");
2395       } else {
2396         gclog_or_tty->print_cr("Shenandoah finishing non-concurrent reference-updating");
2397       }
2398     }
2399   }
2400   _update_references_in_progress = update_refs_in_progress;
2401 }
2402 
2403 void ShenandoahHeap::verify_copy(oop p,oop c){
2404     assert(p != oopDesc::bs()->resolve_oop(p), "forwarded correctly");
2405     assert(oopDesc::bs()->resolve_oop(p) == c, "verify pointer is correct");
2406     if (p->klass() != c->klass()) {
2407       print_heap_regions();
2408     }
2409     assert(p->klass() == c->klass(), err_msg("verify class p-size: "INT32_FORMAT" c-size: "INT32_FORMAT, p->size(), c->size()));
2410     assert(p->size() == c->size(), "verify size");
2411     // Object may have been locked between copy and verification
2412     //    assert(p->mark() == c->mark(), "verify mark");
2413     assert(c == oopDesc::bs()->resolve_oop(c), "verify only forwarded once");
2414   }
2415 
2416 void ShenandoahHeap::oom_during_evacuation() {
2417   // tty->print_cr("Out of memory during evacuation, cancel evacuation, schedule full GC");
2418   // We ran out of memory during evacuation. Cancel evacuation, and schedule a full-GC.
2419   collector_policy()->set_should_clear_all_soft_refs(true);
2420   concurrent_thread()->schedule_full_gc();
2421   cancel_concgc();
2422 
2423   if ((! Thread::current()->is_GC_task_thread()) && (! Thread::current()->is_ConcurrentGC_thread())) {
2424     tty->print_cr("OOM during evacuation. Let Java thread wait until evacuation settlded..");
2425     while (_evacuation_in_progress) { // wait.
2426       Thread::current()->_ParkEvent->park(1) ;
2427     }
2428   }
2429 
2430 }
2431 
2432 void ShenandoahHeap::copy_object(oop p, HeapWord* s) {
2433   HeapWord* filler = s;
2434   assert(s != NULL, "allocation of brooks pointer must not fail");
2435   HeapWord* copy = s + BrooksPointer::BROOKS_POINTER_OBJ_SIZE;
2436 
2437   guarantee(copy != NULL, "allocation of copy object must not fail");
2438   Copy::aligned_disjoint_words((HeapWord*) p, copy, p->size());
2439   initialize_brooks_ptr(filler, copy);
2440 
2441 #ifdef ASSERT
2442   if (ShenandoahTraceEvacuations) {
2443     tty->print_cr("copy object from "PTR_FORMAT" to: "PTR_FORMAT, p2i((HeapWord*) p), p2i(copy));
2444   }
2445 #endif
2446 }
2447 
2448 oop ShenandoahHeap::evacuate_object(oop p, Thread* thread) {
2449   ShenandoahHeapRegion* hr;
2450   size_t required;
2451 
2452 #ifdef ASSERT
2453   if (ShenandoahVerifyReadsToFromSpace) {
2454     hr = heap_region_containing(p);
2455     {
2456       hr->memProtectionOff();
2457       required  = BrooksPointer::BROOKS_POINTER_OBJ_SIZE + p->size();
2458       hr->memProtectionOn();
2459     }
2460   } else {
2461     required  = BrooksPointer::BROOKS_POINTER_OBJ_SIZE + p->size();
2462   }
2463 #else
2464     required  = BrooksPointer::BROOKS_POINTER_OBJ_SIZE + p->size();
2465 #endif
2466 
2467   assert(! heap_region_containing(p)->is_humongous(), "never evacuate humongous objects");
2468 
2469   // Don't even attempt to evacuate anything if evacuation has been cancelled.
2470   if (_cancelled_concgc) {
2471     return ShenandoahBarrierSet::resolve_oop_static(p);
2472   }
2473 
2474   bool alloc_from_gclab = true;
2475   thread->set_evacuating(true);
2476   HeapWord* filler = allocate_from_gclab(thread, required);
2477   if (filler == NULL) {
2478     filler = allocate_memory(required);
2479     alloc_from_gclab = false;
2480   }
2481   thread->set_evacuating(false);
2482 
2483   if (filler == NULL) {
2484     oom_during_evacuation();
2485     // If this is a Java thread, it should have waited
2486     // until all GC threads are done, and then we
2487     // return the forwardee.
2488     oop resolved = ShenandoahBarrierSet::resolve_oop_static(p);
2489     return resolved;
2490   }
2491 
2492   HeapWord* copy = filler + BrooksPointer::BROOKS_POINTER_OBJ_SIZE;
2493 
2494 #ifdef ASSERT
2495   if (ShenandoahVerifyReadsToFromSpace) {
2496     hr->memProtectionOff();
2497     copy_object(p, filler);
2498     hr->memProtectionOn();
2499   } else {
2500     copy_object(p, filler);
2501   }
2502 #else
2503     copy_object(p, filler);
2504 #endif
2505 
2506   HeapWord* result = BrooksPointer::get(p).cas_forwardee((HeapWord*) p, copy);
2507 
2508   oop return_val;
2509   if (result == (HeapWord*) p) {
2510     return_val = oop(copy);
2511 
2512     if (shenandoahPolicy()->update_refs_early()) {
2513       mark_current(return_val);
2514     }
2515 
2516 #ifdef ASSERT
2517     if (ShenandoahTraceEvacuations) {
2518       tty->print("Copy of "PTR_FORMAT" to "PTR_FORMAT" succeeded \n", p2i((HeapWord*) p), p2i(copy));
2519     }
2520     assert(return_val->is_oop(), "expect oop");
2521     assert(p->klass() == return_val->klass(), err_msg("Should have the same class p: "PTR_FORMAT", copy: "PTR_FORMAT, p2i((HeapWord*) p), p2i((HeapWord*) copy)));
2522 #endif
2523   }  else {
2524     if (alloc_from_gclab) {
2525       thread->gclab().rollback(required);
2526     }
2527 #ifdef ASSERT
2528     if (ShenandoahTraceEvacuations) {
2529       tty->print_cr("Copy of "PTR_FORMAT" to "PTR_FORMAT" failed, use other: "PTR_FORMAT, p2i((HeapWord*) p), p2i(copy), p2i((HeapWord*) result));
2530     }
2531 #endif
2532     return_val = (oopDesc*) result;
2533   }
2534 
2535   return return_val;
2536 }
2537 
2538 HeapWord* ShenandoahHeap::tlab_post_allocation_setup(HeapWord* obj) {
2539   HeapWord* result = obj + BrooksPointer::BROOKS_POINTER_OBJ_SIZE;
2540   initialize_brooks_ptr(obj, result);
2541   return result;
2542 }
2543 
2544 uint ShenandoahHeap::oop_extra_words() {
2545   return BrooksPointer::BROOKS_POINTER_OBJ_SIZE;
2546 }
2547 
2548 bool ShenandoahHeap::grow_heap_by() {
2549   int new_region_index = ensure_new_regions(1);
2550   if (new_region_index != -1) {
2551     ShenandoahHeapRegion* new_region = new ShenandoahHeapRegion();
2552     HeapWord* start = _first_region_bottom + (ShenandoahHeapRegion::RegionSizeBytes / HeapWordSize) * new_region_index;
2553     new_region->initialize_heap_region(start, ShenandoahHeapRegion::RegionSizeBytes / HeapWordSize, new_region_index);
2554     if (ShenandoahGCVerbose) {
2555       tty->print_cr("allocating new region at index: "INT32_FORMAT, new_region_index);
2556       new_region->print();
2557     }
2558     _ordered_regions[new_region_index] = new_region;
2559     _free_regions->append(new_region);
2560     return true;
2561   } else {
2562     return false;
2563   }
2564 }
2565 
2566 int ShenandoahHeap::ensure_new_regions(int new_regions) {
2567 
2568   size_t num_regions = _num_regions;
2569   size_t new_num_regions = num_regions + new_regions;
2570   if (new_num_regions >= _max_regions) {
2571     // Not enough regions left.
2572     return -1;
2573   }
2574 
2575   size_t expand_size = new_regions * ShenandoahHeapRegion::RegionSizeBytes;
2576   if (ShenandoahGCVerbose) {
2577     tty->print_cr("expanding storage by "SIZE_FORMAT_HEX" bytes, for "INT32_FORMAT" new regions", expand_size, new_regions);
2578   }
2579   bool success = _storage.expand_by(expand_size);
2580   assert(success, "should always be able to expand by requested size");
2581 
2582   _num_regions = new_num_regions;
2583 
2584   return num_regions;
2585 
2586 }
2587 
2588 bool  ShenandoahIsAliveClosure:: do_object_b(oop obj) {
2589 
2590   ShenandoahHeap* sh = ShenandoahHeap::heap();
2591   if (sh->need_update_refs()) {
2592     obj = ShenandoahBarrierSet::resolve_oop_static(obj);
2593   }
2594 
2595 #ifdef ASSERT
2596   if (obj != ShenandoahBarrierSet::resolve_oop_static(obj)) {
2597     ShenandoahHeap* sh = ShenandoahHeap::heap();
2598   }
2599 #endif
2600   assert(obj == ShenandoahBarrierSet::resolve_oop_static(obj), "needs to be in to-space");
2601 
2602     HeapWord* addr = (HeapWord*) obj;
2603 
2604     if (ShenandoahTraceWeakReferences) {
2605 
2606       if (addr != NULL) {
2607         if(sh->is_in(addr)) {
2608           if (sh->is_obj_ill(obj)) {
2609             HandleMark hm;
2610             tty->print_cr("ShenandoahIsAliveClosure Found an ill object "PTR_FORMAT, p2i((HeapWord*) obj));
2611             obj->print();
2612           }
2613           else
2614             tty->print_cr("found a healthy object "PTR_FORMAT, p2i((HeapWord*) obj));
2615 
2616         } else {
2617           tty->print_cr("found an object outside the heap "PTR_FORMAT, p2i((HeapWord*) obj));
2618         }
2619       } else {
2620         tty->print_cr("found a null object "PTR_FORMAT, p2i((HeapWord*) obj));
2621       }
2622     }
2623 
2624     return addr != NULL && sh->is_marked_current(obj); //(!sh->is_in(addr) || !sh->is_obj_ill(obj));
2625 }
2626 
2627 void ShenandoahHeap::ref_processing_init() {
2628   MemRegion mr = reserved_region();
2629 
2630   // Concurrent Mark ref processor
2631 //   _ref_processor =
2632 //     new ReferenceProcessor(mr,    // span
2633 //                            ParallelRefProcEnabled && (ParallelGCThreads > 1),
2634 //                                 // mt processing
2635 //                            (int) ParallelGCThreads,
2636 //                                 // degree of mt processing
2637 //                            (ParallelGCThreads > 1) || (ConcGCThreads > 1),
2638 //                                 // mt discovery
2639 //                            (int) MAX2(ParallelGCThreads, ConcGCThreads),
2640 //                                 // degree of mt discovery
2641 //                            false,
2642 //                                 // Reference discovery is not atomic
2643 //                         &isAlive);
2644 //                                 // is alive closure
2645 //                                 // (for efficiency/performance)
2646   _ref_processor =
2647     new ReferenceProcessor(mr,    // span
2648                            ParallelRefProcEnabled && (ConcGCThreads > 1),
2649                            // mt processing
2650                            (int) ConcGCThreads,
2651                            // degree of mt processing
2652                            (ConcGCThreads > 1),
2653                            // mt discovery
2654                            (int) ConcGCThreads,
2655                            // degree of mt discovery
2656                            false,
2657                            // Reference discovery is not atomic
2658                            &isAlive);
2659   // is alive closure
2660   // (for efficiency/performance)
2661 
2662 
2663 
2664 }
2665 
2666 #ifdef ASSERT
2667 void ShenandoahHeap::set_from_region_protection(bool protect) {
2668   for (uint i = 0; i < _num_regions; i++) {
2669     ShenandoahHeapRegion* region = _ordered_regions[i];
2670     if (region != NULL && region->is_in_collection_set()) {
2671       if (protect) {
2672         region->memProtectionOn();
2673       } else {
2674         region->memProtectionOff();
2675       }
2676     }
2677   }
2678 }
2679 #endif
2680 
2681 void ShenandoahHeap::acquire_pending_refs_lock() {
2682   _concurrent_gc_thread->slt()->manipulatePLL(SurrogateLockerThread::acquirePLL);
2683 }
2684 
2685 void ShenandoahHeap::release_pending_refs_lock() {
2686   _concurrent_gc_thread->slt()->manipulatePLL(SurrogateLockerThread::releaseAndNotifyPLL);
2687 }
2688 
2689 ShenandoahHeapRegion** ShenandoahHeap::heap_regions() {
2690   return _ordered_regions;
2691 }
2692 
2693 size_t ShenandoahHeap::num_regions() {
2694   return _num_regions;
2695 }
2696 
2697 size_t ShenandoahHeap::max_regions() {
2698   return _max_regions;
2699 }
2700 
2701 GCTracer* ShenandoahHeap::tracer() {
2702   return collector_policy()->tracer();
2703 }
2704 
2705 size_t ShenandoahHeap::tlab_used(Thread* thread) const {
2706   return _free_regions->used();
2707 }
2708 
2709 void ShenandoahHeap::cancel_concgc() {
2710   // only report it once
2711   if (!_cancelled_concgc) {
2712     if (ShenandoahTracePhases) {
2713       tty->print_cr("Cancelling GC");
2714     }
2715     _cancelled_concgc = true;
2716     OrderAccess::fence();
2717     _shenandoah_policy->report_concgc_cancelled();
2718   }
2719 
2720   if ((! Thread::current()->is_GC_task_thread()) && (! Thread::current()->is_ConcurrentGC_thread())) {
2721     while (_evacuation_in_progress) { // wait.
2722       Thread::current()->_ParkEvent->park(1) ;
2723     }
2724   }
2725 }
2726 
2727 bool ShenandoahHeap::cancelled_concgc() {
2728   bool cancelled = _cancelled_concgc;
2729   return cancelled;
2730 }
2731 
2732 void ShenandoahHeap::clear_cancelled_concgc() {
2733   _cancelled_concgc = false;
2734 }
2735 
2736 int ShenandoahHeap::max_workers() {
2737   return _max_workers;
2738 }
2739 
2740 int ShenandoahHeap::max_parallel_workers() {
2741   return _max_parallel_workers;
2742 }
2743 int ShenandoahHeap::max_conc_workers() {
2744   return _max_conc_workers;
2745 }
2746 
2747 void ShenandoahHeap::shutdown() {
2748   // We set this early here, to let GC threads terminate before we ask the concurrent thread
2749   // to terminate, which would otherwise block until all GC threads come to finish normally.
2750   _cancelled_concgc = true;
2751   _concurrent_gc_thread->shutdown();
2752   cancel_concgc();
2753 }
2754 
2755 class ShenandoahStringSymbolTableUnlinkTask : public AbstractGangTask {
2756 private:
2757   BoolObjectClosure* _is_alive;
2758   int _initial_string_table_size;
2759   int _initial_symbol_table_size;
2760 
2761   bool  _process_strings;
2762   int _strings_processed;
2763   int _strings_removed;
2764 
2765   bool  _process_symbols;
2766   int _symbols_processed;
2767   int _symbols_removed;
2768 
2769 public:
2770   ShenandoahStringSymbolTableUnlinkTask(BoolObjectClosure* is_alive, bool process_strings, bool process_symbols) :
2771     AbstractGangTask("String/Symbol Unlinking"),
2772     _is_alive(is_alive),
2773     _process_strings(process_strings), _strings_processed(0), _strings_removed(0),
2774     _process_symbols(process_symbols), _symbols_processed(0), _symbols_removed(0) {
2775 
2776     _initial_string_table_size = StringTable::the_table()->table_size();
2777     _initial_symbol_table_size = SymbolTable::the_table()->table_size();
2778     if (process_strings) {
2779       StringTable::clear_parallel_claimed_index();
2780     }
2781     if (process_symbols) {
2782       SymbolTable::clear_parallel_claimed_index();
2783     }
2784   }
2785 
2786   ~ShenandoahStringSymbolTableUnlinkTask() {
2787     guarantee(!_process_strings || StringTable::parallel_claimed_index() >= _initial_string_table_size,
2788               err_msg("claim value %d after unlink less than initial string table size %d",
2789                       StringTable::parallel_claimed_index(), _initial_string_table_size));
2790     guarantee(!_process_symbols || SymbolTable::parallel_claimed_index() >= _initial_symbol_table_size,
2791               err_msg("claim value %d after unlink less than initial symbol table size %d",
2792                       SymbolTable::parallel_claimed_index(), _initial_symbol_table_size));
2793 
2794     if (G1TraceStringSymbolTableScrubbing) {
2795       gclog_or_tty->print_cr("Cleaned string and symbol table, "
2796                              "strings: "SIZE_FORMAT" processed, "SIZE_FORMAT" removed, "
2797                              "symbols: "SIZE_FORMAT" processed, "SIZE_FORMAT" removed",
2798                              strings_processed(), strings_removed(),
2799                              symbols_processed(), symbols_removed());
2800     }
2801   }
2802 
2803   void work(uint worker_id) {
2804     int strings_processed = 0;
2805     int strings_removed = 0;
2806     int symbols_processed = 0;
2807     int symbols_removed = 0;
2808     if (_process_strings) {
2809       StringTable::possibly_parallel_unlink(_is_alive, &strings_processed, &strings_removed);
2810       Atomic::add(strings_processed, &_strings_processed);
2811       Atomic::add(strings_removed, &_strings_removed);
2812     }
2813     if (_process_symbols) {
2814       SymbolTable::possibly_parallel_unlink(&symbols_processed, &symbols_removed);
2815       Atomic::add(symbols_processed, &_symbols_processed);
2816       Atomic::add(symbols_removed, &_symbols_removed);
2817     }
2818   }
2819 
2820   size_t strings_processed() const { return (size_t)_strings_processed; }
2821   size_t strings_removed()   const { return (size_t)_strings_removed; }
2822 
2823   size_t symbols_processed() const { return (size_t)_symbols_processed; }
2824   size_t symbols_removed()   const { return (size_t)_symbols_removed; }
2825 };
2826 
2827 void ShenandoahHeap::unlink_string_and_symbol_table(BoolObjectClosure* is_alive, bool process_strings, bool process_symbols) {
2828 
2829   workers()->set_active_workers(_max_parallel_workers);
2830   ShenandoahStringSymbolTableUnlinkTask shenandoah_unlink_task(is_alive, process_strings, process_symbols);
2831   workers()->run_task(&shenandoah_unlink_task);
2832 
2833   //  if (G1StringDedup::is_enabled()) {
2834   //    G1StringDedup::unlink(is_alive);
2835   //  }
2836 }
2837 
2838 bool ShenandoahHeap::is_obj_ill(const oop obj) const {
2839   return ! is_marked_current(obj);
2840 }
2841 
2842 void ShenandoahHeap::set_need_update_refs(bool need_update_refs) {
2843   _need_update_refs = need_update_refs;
2844 }
2845 
2846 ShenandoahJNICritical* ShenandoahHeap::jni_critical() {
2847   return _jni_critical;
2848 }
2849 
2850 ShenandoahHeapRegion* ShenandoahHeap::next_compaction_region(const ShenandoahHeapRegion* r) {
2851   int region_idx = r->region_number() + 1;
2852   ShenandoahHeapRegion* next = _ordered_regions[region_idx];
2853   guarantee(next->region_number() == region_idx, "region number must match");
2854   while (next->is_humongous()) {
2855     region_idx = next->region_number() + 1;
2856     next = _ordered_regions[region_idx];
2857     guarantee(next->region_number() == region_idx, "region number must match");
2858   }
2859   return next;
2860 }