1 /*
   2  * Copyright (c) 2013, 2017, Red Hat, Inc. and/or its affiliates.
   3  *
   4  * This code is free software; you can redistribute it and/or modify it
   5  * under the terms of the GNU General Public License version 2 only, as
   6  * published by the Free Software Foundation.
   7  *
   8  * This code is distributed in the hope that it will be useful, but WITHOUT
   9  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  10  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  11  * version 2 for more details (a copy is included in the LICENSE file that
  12  * accompanied this code).
  13  *
  14  * You should have received a copy of the GNU General Public License version
  15  * 2 along with this work; if not, write to the Free Software Foundation,
  16  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  17  *
  18  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  19  * or visit www.oracle.com if you need additional information or have any
  20  * questions.
  21  *
  22  */
  23 
  24 #include "precompiled.hpp"
  25 #include "memory/allocation.hpp"
  26 
  27 #include "gc/shared/gcTimer.hpp"
  28 #include "gc/shared/gcTraceTime.inline.hpp"
  29 #include "gc/shared/parallelCleaning.hpp"
  30 
  31 #include "gc/shenandoah/brooksPointer.hpp"
  32 #include "gc/shenandoah/shenandoahAllocTracker.hpp"
  33 #include "gc/shenandoah/shenandoahBarrierSet.hpp"
  34 #include "gc/shenandoah/shenandoahCollectionSet.hpp"
  35 #include "gc/shenandoah/shenandoahCollectorPolicy.hpp"
  36 #include "gc/shenandoah/shenandoahConcurrentMark.hpp"
  37 #include "gc/shenandoah/shenandoahConcurrentMark.inline.hpp"
  38 #include "gc/shenandoah/shenandoahConcurrentThread.hpp"
  39 #include "gc/shenandoah/shenandoahFreeSet.hpp"
  40 #include "gc/shenandoah/shenandoahPhaseTimings.hpp"
  41 #include "gc/shenandoah/shenandoahHeap.inline.hpp"
  42 #include "gc/shenandoah/shenandoahHeapRegion.hpp"
  43 #include "gc/shenandoah/shenandoahHeapRegionSet.hpp"
  44 #include "gc/shenandoah/shenandoahMarkCompact.hpp"
  45 #include "gc/shenandoah/shenandoahMonitoringSupport.hpp"
  46 #include "gc/shenandoah/shenandoahOopClosures.inline.hpp"
  47 #include "gc/shenandoah/shenandoahPartialGC.hpp"
  48 #include "gc/shenandoah/shenandoahRootProcessor.hpp"
  49 #include "gc/shenandoah/shenandoahStringDedup.hpp"
  50 #include "gc/shenandoah/shenandoahUtils.hpp"
  51 #include "gc/shenandoah/shenandoahVerifier.hpp"
  52 #include "gc/shenandoah/shenandoahCodeRoots.hpp"
  53 #include "gc/shenandoah/vm_operations_shenandoah.hpp"
  54 
  55 #include "runtime/vmThread.hpp"
  56 #include "services/mallocTracker.hpp"
  57 
  58 ShenandoahUpdateRefsClosure::ShenandoahUpdateRefsClosure() : _heap(ShenandoahHeap::heap()) {}
  59 
  60 #ifdef ASSERT
  61 template <class T>
  62 void ShenandoahAssertToSpaceClosure::do_oop_nv(T* p) {
  63   T o = oopDesc::load_heap_oop(p);
  64   if (! oopDesc::is_null(o)) {
  65     oop obj = oopDesc::decode_heap_oop_not_null(o);
  66     assert(oopDesc::unsafe_equals(obj, ShenandoahBarrierSet::resolve_oop_static_not_null(obj)),
  67            "need to-space object here obj: "PTR_FORMAT" , rb(obj): "PTR_FORMAT", p: "PTR_FORMAT,
  68            p2i(obj), p2i(ShenandoahBarrierSet::resolve_oop_static_not_null(obj)), p2i(p));
  69   }
  70 }
  71 
  72 void ShenandoahAssertToSpaceClosure::do_oop(narrowOop* p) { do_oop_nv(p); }
  73 void ShenandoahAssertToSpaceClosure::do_oop(oop* p)       { do_oop_nv(p); }
  74 #endif
  75 
  76 const char* ShenandoahHeap::name() const {
  77   return "Shenandoah";
  78 }
  79 
  80 class ShenandoahPretouchTask : public AbstractGangTask {
  81 private:
  82   ShenandoahHeapRegionSet* _regions;
  83   const size_t _bitmap_size;
  84   const size_t _page_size;
  85   char* _bitmap0_base;
  86   char* _bitmap1_base;
  87 public:
  88   ShenandoahPretouchTask(ShenandoahHeapRegionSet* regions,
  89                          char* bitmap0_base, char* bitmap1_base, size_t bitmap_size,
  90                          size_t page_size) :
  91     AbstractGangTask("Shenandoah PreTouch",
  92                      Universe::is_fully_initialized() ? GCId::current_raw() :
  93                                                         // During VM initialization there is
  94                                                         // no GC cycle that this task can be
  95                                                         // associated with.
  96                                                         GCId::undefined()),
  97     _bitmap0_base(bitmap0_base),
  98     _bitmap1_base(bitmap1_base),
  99     _regions(regions),
 100     _bitmap_size(bitmap_size),
 101     _page_size(page_size) {
 102     _regions->clear_current_index();
 103   };
 104 
 105   virtual void work(uint worker_id) {
 106     ShenandoahHeapRegion* r = _regions->claim_next();
 107     while (r != NULL) {
 108       log_trace(gc, heap)("Pretouch region " SIZE_FORMAT ": " PTR_FORMAT " -> " PTR_FORMAT,
 109                           r->region_number(), p2i(r->bottom()), p2i(r->end()));
 110       os::pretouch_memory(r->bottom(), r->end(), _page_size);
 111 
 112       size_t start = r->region_number()       * ShenandoahHeapRegion::region_size_bytes() / MarkBitMap::heap_map_factor();
 113       size_t end   = (r->region_number() + 1) * ShenandoahHeapRegion::region_size_bytes() / MarkBitMap::heap_map_factor();
 114       assert (end <= _bitmap_size, "end is sane: " SIZE_FORMAT " < " SIZE_FORMAT, end, _bitmap_size);
 115 
 116       log_trace(gc, heap)("Pretouch bitmap under region " SIZE_FORMAT ": " PTR_FORMAT " -> " PTR_FORMAT,
 117                           r->region_number(), p2i(_bitmap0_base + start), p2i(_bitmap0_base + end));
 118       os::pretouch_memory(_bitmap0_base + start, _bitmap0_base + end, _page_size);
 119 
 120       log_trace(gc, heap)("Pretouch bitmap under region " SIZE_FORMAT ": " PTR_FORMAT " -> " PTR_FORMAT,
 121                           r->region_number(), p2i(_bitmap1_base + start), p2i(_bitmap1_base + end));
 122       os::pretouch_memory(_bitmap1_base + start, _bitmap1_base + end, _page_size);
 123 
 124       r = _regions->claim_next();
 125     }
 126   }
 127 };
 128 
 129 jint ShenandoahHeap::initialize() {
 130   CollectedHeap::pre_initialize();
 131 
 132   BrooksPointer::initial_checks();
 133 
 134   size_t init_byte_size = collector_policy()->initial_heap_byte_size();
 135   size_t max_byte_size = collector_policy()->max_heap_byte_size();
 136   size_t heap_alignment = collector_policy()->heap_alignment();
 137 
 138   if (ShenandoahAlwaysPreTouch) {
 139     // Enabled pre-touch means the entire heap is committed right away.
 140     init_byte_size = max_byte_size;
 141   }
 142 
 143   Universe::check_alignment(max_byte_size,
 144                             ShenandoahHeapRegion::region_size_bytes(),
 145                             "shenandoah heap");
 146   Universe::check_alignment(init_byte_size,
 147                             ShenandoahHeapRegion::region_size_bytes(),
 148                             "shenandoah heap");
 149 
 150   ReservedSpace heap_rs = Universe::reserve_heap(max_byte_size,
 151                                                  heap_alignment);
 152   initialize_reserved_region((HeapWord*)heap_rs.base(), (HeapWord*) (heap_rs.base() + heap_rs.size()));
 153 
 154   set_barrier_set(new ShenandoahBarrierSet(this));
 155   ReservedSpace pgc_rs = heap_rs.first_part(max_byte_size);
 156 
 157   _num_regions = max_byte_size / ShenandoahHeapRegion::region_size_bytes();
 158   size_t num_committed_regions = init_byte_size / ShenandoahHeapRegion::region_size_bytes();
 159   _initial_size = num_committed_regions * ShenandoahHeapRegion::region_size_bytes();
 160   _committed = _initial_size;
 161 
 162   log_info(gc, heap)("Initialize Shenandoah heap with initial size " SIZE_FORMAT " bytes", init_byte_size);
 163   if (!os::commit_memory(pgc_rs.base(), _initial_size, false)) {
 164     vm_exit_out_of_memory(_initial_size, OOM_MMAP_ERROR, "Shenandoah failed to initialize heap");
 165   }
 166 
 167   size_t reg_size_words = ShenandoahHeapRegion::region_size_words();
 168   size_t reg_size_bytes = ShenandoahHeapRegion::region_size_bytes();
 169 
 170   _ordered_regions = new ShenandoahHeapRegionSet(_num_regions);
 171   _free_regions = new ShenandoahFreeSet(_ordered_regions, _num_regions);
 172 
 173   _collection_set = new ShenandoahCollectionSet(this, (HeapWord*)pgc_rs.base());
 174 
 175   _next_top_at_mark_starts_base = NEW_C_HEAP_ARRAY(HeapWord*, _num_regions, mtGC);
 176   _next_top_at_mark_starts = _next_top_at_mark_starts_base -
 177                ((uintx) pgc_rs.base() >> ShenandoahHeapRegion::region_size_bytes_shift());
 178 
 179   _complete_top_at_mark_starts_base = NEW_C_HEAP_ARRAY(HeapWord*, _num_regions, mtGC);
 180   _complete_top_at_mark_starts = _complete_top_at_mark_starts_base -
 181                ((uintx) pgc_rs.base() >> ShenandoahHeapRegion::region_size_bytes_shift());
 182 
 183   {
 184     ShenandoahHeapLocker locker(lock());
 185     for (size_t i = 0; i < _num_regions; i++) {
 186       ShenandoahHeapRegion* r = new ShenandoahHeapRegion(this,
 187                                                          (HeapWord*) pgc_rs.base() + reg_size_words * i,
 188                                                          reg_size_words,
 189                                                          i,
 190                                                          i < num_committed_regions);
 191 
 192       _complete_top_at_mark_starts_base[i] = r->bottom();
 193       _next_top_at_mark_starts_base[i] = r->bottom();
 194 
 195       // Add to ordered regions first.
 196       // We use the active size of ordered regions as the number of active regions in heap,
 197       // free set and collection set use the number to assert the correctness of incoming regions.
 198       _ordered_regions->add_region(r);
 199       _free_regions->add_region(r);
 200       assert(!collection_set()->is_in(i), "New region should not be in collection set");
 201     }
 202   }
 203 
 204   assert(_ordered_regions->active_regions() == _num_regions, "Must match");
 205   assert((((size_t) base()) & ShenandoahHeapRegion::region_size_bytes_mask()) == 0,
 206          "misaligned heap: "PTR_FORMAT, p2i(base()));
 207 
 208   LogTarget(Trace, gc, region) lt;
 209   if (lt.is_enabled()) {
 210     ResourceMark rm;
 211     LogStream ls(lt);
 212     log_trace(gc, region)("All Regions");
 213     _ordered_regions->print_on(&ls);
 214     log_trace(gc, region)("Free Regions");
 215     _free_regions->print_on(&ls);
 216   }
 217 
 218   // The call below uses stuff (the SATB* things) that are in G1, but probably
 219   // belong into a shared location.
 220   JavaThread::satb_mark_queue_set().initialize(SATB_Q_CBL_mon,
 221                                                SATB_Q_FL_lock,
 222                                                20 /*G1SATBProcessCompletedThreshold */,
 223                                                Shared_SATB_Q_lock);
 224 
 225   // Reserve space for prev and next bitmap.
 226   _bitmap_size = MarkBitMap::compute_size(heap_rs.size());
 227   _heap_region = MemRegion((HeapWord*) heap_rs.base(), heap_rs.size() / HeapWordSize);
 228 
 229   size_t bitmap_bytes_per_region = reg_size_bytes / MarkBitMap::heap_map_factor();
 230 
 231   guarantee(bitmap_bytes_per_region != 0,
 232             "Bitmap bytes per region should not be zero");
 233   guarantee(is_power_of_2(bitmap_bytes_per_region),
 234             "Bitmap bytes per region should be power of two: " SIZE_FORMAT, bitmap_bytes_per_region);
 235 
 236   size_t bitmap_page_size = UseLargePages ? (size_t)os::large_page_size() : (size_t)os::vm_page_size();
 237 
 238   if (bitmap_page_size > bitmap_bytes_per_region) {
 239     _bitmap_regions_per_slice = bitmap_page_size / bitmap_bytes_per_region;
 240     _bitmap_bytes_per_slice = bitmap_page_size;
 241   } else {
 242     _bitmap_regions_per_slice = 1;
 243     _bitmap_bytes_per_slice = bitmap_bytes_per_region;
 244   }
 245 
 246   guarantee(_bitmap_regions_per_slice >= 1,
 247             "Should have at least one region per slice: " SIZE_FORMAT,
 248             _bitmap_regions_per_slice);
 249 
 250   guarantee(((_bitmap_bytes_per_slice) % bitmap_page_size) == 0,
 251             "Bitmap slices should be page-granular: bps = " SIZE_FORMAT ", page size = " SIZE_FORMAT,
 252             _bitmap_bytes_per_slice, bitmap_page_size);
 253 
 254   ReservedSpace bitmap0(_bitmap_size, bitmap_page_size);
 255   MemTracker::record_virtual_memory_type(bitmap0.base(), mtGC);
 256   _bitmap0_region = MemRegion((HeapWord*) bitmap0.base(), bitmap0.size() / HeapWordSize);
 257 
 258   ReservedSpace bitmap1(_bitmap_size, bitmap_page_size);
 259   MemTracker::record_virtual_memory_type(bitmap1.base(), mtGC);
 260   _bitmap1_region = MemRegion((HeapWord*) bitmap1.base(), bitmap1.size() / HeapWordSize);
 261 
 262   size_t bitmap_init_commit = _bitmap_bytes_per_slice *
 263                               align_up(num_committed_regions, _bitmap_regions_per_slice) / _bitmap_regions_per_slice;
 264   bitmap_init_commit = MIN2(_bitmap_size, bitmap_init_commit);
 265   os::commit_memory_or_exit((char *) (_bitmap0_region.start()), bitmap_init_commit, false,
 266                             "couldn't allocate initial bitmap");
 267   os::commit_memory_or_exit((char *) (_bitmap1_region.start()), bitmap_init_commit, false,
 268                             "couldn't allocate initial bitmap");
 269 
 270   size_t page_size = UseLargePages ? (size_t)os::large_page_size() : (size_t)os::vm_page_size();
 271 
 272   if (ShenandoahVerify) {
 273     ReservedSpace verify_bitmap(_bitmap_size, page_size);
 274     os::commit_memory_or_exit(verify_bitmap.base(), verify_bitmap.size(), false,
 275                               "couldn't allocate verification bitmap");
 276     MemTracker::record_virtual_memory_type(verify_bitmap.base(), mtGC);
 277     MemRegion verify_bitmap_region = MemRegion((HeapWord *) verify_bitmap.base(), verify_bitmap.size() / HeapWordSize);
 278     _verification_bit_map.initialize(_heap_region, verify_bitmap_region);
 279     _verifier = new ShenandoahVerifier(this, &_verification_bit_map);
 280   }
 281 
 282   if (ShenandoahAlwaysPreTouch) {
 283     assert (!AlwaysPreTouch, "Should have been overridden");
 284 
 285     // For NUMA, it is important to pre-touch the storage under bitmaps with worker threads,
 286     // before initialize() below zeroes it with initializing thread. For any given region,
 287     // we touch the region and the corresponding bitmaps from the same thread.
 288 
 289     log_info(gc, heap)("Parallel pretouch " SIZE_FORMAT " regions with " SIZE_FORMAT " byte pages",
 290                        _ordered_regions->count(), page_size);
 291     ShenandoahPretouchTask cl(_ordered_regions, bitmap0.base(), bitmap1.base(), _bitmap_size, page_size);
 292     _workers->run_task(&cl);
 293   }
 294 
 295   _mark_bit_map0.initialize(_heap_region, _bitmap0_region);
 296   _complete_mark_bit_map = &_mark_bit_map0;
 297 
 298   _mark_bit_map1.initialize(_heap_region, _bitmap1_region);
 299   _next_mark_bit_map = &_mark_bit_map1;
 300 
 301   // Reserve aux bitmap for use in object_iterate(). We don't commit it here.
 302   ReservedSpace aux_bitmap(_bitmap_size, bitmap_page_size);
 303   MemTracker::record_virtual_memory_type(aux_bitmap.base(), mtGC);
 304   _aux_bitmap_region = MemRegion((HeapWord*) aux_bitmap.base(), aux_bitmap.size() / HeapWordSize);
 305   _aux_bit_map.initialize(_heap_region, _aux_bitmap_region);
 306 
 307   if (UseShenandoahMatrix) {
 308     _connection_matrix = new ShenandoahConnectionMatrix(_num_regions);
 309   } else {
 310     _connection_matrix = NULL;
 311   }
 312 
 313   _partial_gc = _shenandoah_policy->can_do_partial_gc() ?
 314                 new ShenandoahPartialGC(this, _num_regions) :
 315                 NULL;
 316 
 317   _monitoring_support = new ShenandoahMonitoringSupport(this);
 318 
 319   _phase_timings = new ShenandoahPhaseTimings();
 320 
 321   if (ShenandoahAllocationTrace) {
 322     _alloc_tracker = new ShenandoahAllocTracker();
 323   }
 324 
 325   ShenandoahStringDedup::initialize();
 326 
 327   _concurrent_gc_thread = new ShenandoahConcurrentThread();
 328 
 329   ShenandoahMarkCompact::initialize();
 330 
 331   ShenandoahCodeRoots::initialize();
 332 
 333   return JNI_OK;
 334 }
 335 
 336 ShenandoahHeap::ShenandoahHeap(ShenandoahCollectorPolicy* policy) :
 337   CollectedHeap(),
 338   _shenandoah_policy(policy),
 339   _concurrent_mark_in_progress(0),
 340   _evacuation_in_progress(0),
 341   _full_gc_in_progress(false),
 342   _update_refs_in_progress(false),
 343   _concurrent_partial_in_progress(false),
 344   _free_regions(NULL),
 345   _collection_set(NULL),
 346   _bytes_allocated_since_cm(0),
 347   _bytes_allocated_during_cm(0),
 348   _allocated_last_gc(0),
 349   _used_start_gc(0),
 350   _max_workers(MAX2(ConcGCThreads, ParallelGCThreads)),
 351   _ref_processor(NULL),
 352   _next_top_at_mark_starts(NULL),
 353   _next_top_at_mark_starts_base(NULL),
 354   _complete_top_at_mark_starts(NULL),
 355   _complete_top_at_mark_starts_base(NULL),
 356   _mark_bit_map0(),
 357   _mark_bit_map1(),
 358   _aux_bit_map(),
 359   _connection_matrix(NULL),
 360   _cancelled_concgc(0),
 361   _need_update_refs(false),
 362   _need_reset_bitmaps(false),
 363   _verifier(NULL),
 364   _heap_lock(0),
 365   _used_at_last_gc(0),
 366   _alloc_seq_at_last_gc_start(0),
 367   _alloc_seq_at_last_gc_end(0),
 368   _safepoint_workers(NULL),
 369 #ifdef ASSERT
 370   _heap_lock_owner(NULL),
 371   _heap_expansion_count(0),
 372 #endif
 373   _gc_timer(new (ResourceObj::C_HEAP, mtGC) ConcurrentGCTimer()),
 374   _phase_timings(NULL),
 375   _alloc_tracker(NULL)
 376 {
 377   log_info(gc, init)("Parallel GC threads: "UINT32_FORMAT, ParallelGCThreads);
 378   log_info(gc, init)("Concurrent GC threads: "UINT32_FORMAT, ConcGCThreads);
 379   log_info(gc, init)("Parallel reference processing enabled: %s", BOOL_TO_STR(ParallelRefProcEnabled));
 380 
 381   _scm = new ShenandoahConcurrentMark();
 382   _used = 0;
 383 
 384   _max_workers = MAX2(_max_workers, 1U);
 385   _workers = new ShenandoahWorkGang("Shenandoah GC Threads", _max_workers,
 386                             /* are_GC_task_threads */true,
 387                             /* are_ConcurrentGC_threads */false);
 388   if (_workers == NULL) {
 389     vm_exit_during_initialization("Failed necessary allocation.");
 390   } else {
 391     _workers->initialize_workers();
 392   }
 393 
 394   if (ParallelSafepointCleanupThreads > 1) {
 395     _safepoint_workers = new ShenandoahWorkGang("Safepoint Cleanup Thread",
 396                                                 ParallelSafepointCleanupThreads,
 397                                                 false, false);
 398     _safepoint_workers->initialize_workers();
 399   }
 400 }
 401 
 402 class ShenandoahResetNextBitmapTask : public AbstractGangTask {
 403 private:
 404   ShenandoahHeapRegionSet* _regions;
 405 
 406 public:
 407   ShenandoahResetNextBitmapTask(ShenandoahHeapRegionSet* regions) :
 408     AbstractGangTask("Parallel Reset Bitmap Task"),
 409     _regions(regions) {
 410     _regions->clear_current_index();
 411   }
 412 
 413   void work(uint worker_id) {
 414     ShenandoahHeapRegion* region = _regions->claim_next();
 415     ShenandoahHeap* heap = ShenandoahHeap::heap();
 416     while (region != NULL) {
 417       if (heap->is_bitmap_slice_committed(region)) {
 418         HeapWord* bottom = region->bottom();
 419         HeapWord* top = heap->next_top_at_mark_start(region->bottom());
 420         if (top > bottom) {
 421           heap->next_mark_bit_map()->clear_range_large(MemRegion(bottom, top));
 422         }
 423         assert(heap->is_next_bitmap_clear_range(bottom, region->end()), "must be clear");
 424       }
 425       region = _regions->claim_next();
 426     }
 427   }
 428 };
 429 
 430 void ShenandoahHeap::reset_next_mark_bitmap(WorkGang* workers) {
 431   assert_gc_workers(workers->active_workers());
 432 
 433   ShenandoahResetNextBitmapTask task = ShenandoahResetNextBitmapTask(_ordered_regions);
 434   workers->run_task(&task);
 435 }
 436 
 437 class ShenandoahResetCompleteBitmapTask : public AbstractGangTask {
 438 private:
 439   ShenandoahHeapRegionSet* _regions;
 440 
 441 public:
 442   ShenandoahResetCompleteBitmapTask(ShenandoahHeapRegionSet* regions) :
 443     AbstractGangTask("Parallel Reset Bitmap Task"),
 444     _regions(regions) {
 445     _regions->clear_current_index();
 446   }
 447 
 448   void work(uint worker_id) {
 449     ShenandoahHeapRegion* region = _regions->claim_next();
 450     ShenandoahHeap* heap = ShenandoahHeap::heap();
 451     while (region != NULL) {
 452       if (heap->is_bitmap_slice_committed(region)) {
 453         HeapWord* bottom = region->bottom();
 454         HeapWord* top = heap->complete_top_at_mark_start(region->bottom());
 455         if (top > bottom) {
 456           heap->complete_mark_bit_map()->clear_range_large(MemRegion(bottom, top));
 457         }
 458         assert(heap->is_complete_bitmap_clear_range(bottom, region->end()), "must be clear");
 459       }
 460       region = _regions->claim_next();
 461     }
 462   }
 463 };
 464 
 465 void ShenandoahHeap::reset_complete_mark_bitmap(WorkGang* workers) {
 466   assert_gc_workers(workers->active_workers());
 467 
 468   ShenandoahResetCompleteBitmapTask task = ShenandoahResetCompleteBitmapTask(_ordered_regions);
 469   workers->run_task(&task);
 470 }
 471 
 472 bool ShenandoahHeap::is_next_bitmap_clear() {
 473   for (size_t idx = 0; idx < _num_regions; idx++) {
 474     ShenandoahHeapRegion* r = _ordered_regions->get(idx);
 475     if (is_bitmap_slice_committed(r) && !is_next_bitmap_clear_range(r->bottom(), r->end())) {
 476       return false;
 477     }
 478   }
 479   return true;
 480 }
 481 
 482 bool ShenandoahHeap::is_next_bitmap_clear_range(HeapWord* start, HeapWord* end) {
 483   return _next_mark_bit_map->getNextMarkedWordAddress(start, end) == end;
 484 }
 485 
 486 bool ShenandoahHeap::is_complete_bitmap_clear_range(HeapWord* start, HeapWord* end) {
 487   return _complete_mark_bit_map->getNextMarkedWordAddress(start, end) == end;
 488 }
 489 
 490 void ShenandoahHeap::print_on(outputStream* st) const {
 491   st->print_cr("Shenandoah Heap");
 492   st->print_cr(" " SIZE_FORMAT "K total, " SIZE_FORMAT "K committed, " SIZE_FORMAT "K used",
 493                capacity() / K, committed() / K, used() / K);
 494   st->print_cr(" " SIZE_FORMAT " x " SIZE_FORMAT"K regions",
 495                num_regions(), ShenandoahHeapRegion::region_size_bytes() / K);
 496 
 497   st->print("Status: ");
 498   if (concurrent_mark_in_progress()) {
 499     st->print("marking ");
 500   } else if (is_evacuation_in_progress()) {
 501     st->print("evacuating ");
 502   } else if (is_update_refs_in_progress()) {
 503     st->print("updating refs ");
 504   } else {
 505     st->print("idle ");
 506   }
 507   if (cancelled_concgc()) {
 508     st->print("cancelled ");
 509   }
 510   st->cr();
 511 
 512   st->print_cr("Reserved region:");
 513   st->print_cr(" - [" PTR_FORMAT ", " PTR_FORMAT ") ",
 514                p2i(reserved_region().start()),
 515                p2i(reserved_region().end()));
 516 
 517   if (UseShenandoahMatrix) {
 518     st->print_cr("Matrix:");
 519 
 520     ShenandoahConnectionMatrix* matrix = connection_matrix();
 521     if (matrix != NULL) {
 522       st->print_cr(" - base: " PTR_FORMAT, p2i(matrix->matrix_addr()));
 523       st->print_cr(" - stride: " SIZE_FORMAT, matrix->stride());
 524       st->print_cr(" - magic: " PTR_FORMAT, matrix->magic_offset());
 525     } else {
 526       st->print_cr(" No matrix.");
 527     }
 528   }
 529 
 530   if (Verbose) {
 531     print_heap_regions_on(st);
 532   }
 533 }
 534 
 535 class ShenandoahInitGCLABClosure : public ThreadClosure {
 536 public:
 537   void do_thread(Thread* thread) {
 538     thread->gclab().initialize(true);
 539   }
 540 };
 541 
 542 void ShenandoahHeap::post_initialize() {
 543   if (UseTLAB) {
 544     MutexLocker ml(Threads_lock);
 545 
 546     ShenandoahInitGCLABClosure init_gclabs;
 547     Threads::java_threads_do(&init_gclabs);
 548     gc_threads_do(&init_gclabs);
 549 
 550     // gclab can not be initialized early during VM startup, as it can not determinate its max_size.
 551     // Now, we will let WorkGang to initialize gclab when new worker is created.
 552     _workers->set_initialize_gclab();
 553   }
 554 
 555   _scm->initialize(_max_workers);
 556 
 557   ref_processing_init();
 558 
 559   _shenandoah_policy->post_heap_initialize();
 560 }
 561 
 562 size_t ShenandoahHeap::used() const {
 563   OrderAccess::acquire();
 564   return _used;
 565 }
 566 
 567 size_t ShenandoahHeap::committed() const {
 568   OrderAccess::acquire();
 569   return _committed;
 570 }
 571 
 572 void ShenandoahHeap::increase_committed(size_t bytes) {
 573   assert_heaplock_or_safepoint();
 574   _committed += bytes;
 575 }
 576 
 577 void ShenandoahHeap::decrease_committed(size_t bytes) {
 578   assert_heaplock_or_safepoint();
 579   _committed -= bytes;
 580 }
 581 
 582 void ShenandoahHeap::increase_used(size_t bytes) {
 583   assert_heaplock_or_safepoint();
 584   _used += bytes;
 585 }
 586 
 587 void ShenandoahHeap::set_used(size_t bytes) {
 588   assert_heaplock_or_safepoint();
 589   _used = bytes;
 590 }
 591 
 592 void ShenandoahHeap::decrease_used(size_t bytes) {
 593   assert_heaplock_or_safepoint();
 594   assert(_used >= bytes, "never decrease heap size by more than we've left");
 595   _used -= bytes;
 596 }
 597 
 598 size_t ShenandoahHeap::capacity() const {
 599   return num_regions() * ShenandoahHeapRegion::region_size_bytes();
 600 }
 601 
 602 bool ShenandoahHeap::is_maximal_no_gc() const {
 603   Unimplemented();
 604   return true;
 605 }
 606 
 607 size_t ShenandoahHeap::max_capacity() const {
 608   return _num_regions * ShenandoahHeapRegion::region_size_bytes();
 609 }
 610 
 611 size_t ShenandoahHeap::initial_capacity() const {
 612   return _initial_size;
 613 }
 614 
 615 bool ShenandoahHeap::is_in(const void* p) const {
 616   HeapWord* heap_base = (HeapWord*) base();
 617   HeapWord* last_region_end = heap_base + ShenandoahHeapRegion::region_size_words() * num_regions();
 618   return p >= heap_base && p < last_region_end;
 619 }
 620 
 621 bool ShenandoahHeap::is_scavengable(const void* p) {
 622   return true;
 623 }
 624 
 625 void ShenandoahHeap::handle_heap_shrinkage() {
 626   ShenandoahHeapLocker locker(lock());
 627 
 628   ShenandoahHeapRegionSet* set = regions();
 629 
 630   size_t count = 0;
 631   double current = os::elapsedTime();
 632   for (size_t i = 0; i < num_regions(); i++) {
 633     ShenandoahHeapRegion* r = set->get(i);
 634     if (r->is_empty_committed() &&
 635             (current - r->empty_time()) * 1000 > ShenandoahUncommitDelay &&
 636             r->make_empty_uncommitted()) {
 637       count++;
 638     }
 639   }
 640 
 641   if (count > 0) {
 642     log_info(gc)("Uncommitted " SIZE_FORMAT "M. Heap: " SIZE_FORMAT "M reserved, " SIZE_FORMAT "M committed, " SIZE_FORMAT "M used",
 643                  count * ShenandoahHeapRegion::region_size_bytes() / M, capacity() / M, committed() / M, used() / M);
 644   }
 645 }
 646 
 647 HeapWord* ShenandoahHeap::allocate_from_gclab_slow(Thread* thread, size_t size) {
 648   // Retain tlab and allocate object in shared space if
 649   // the amount free in the tlab is too large to discard.
 650   if (thread->gclab().free() > thread->gclab().refill_waste_limit()) {
 651     thread->gclab().record_slow_allocation(size);
 652     return NULL;
 653   }
 654 
 655   // Discard gclab and allocate a new one.
 656   // To minimize fragmentation, the last GCLAB may be smaller than the rest.
 657   size_t new_gclab_size = thread->gclab().compute_size(size);
 658 
 659   thread->gclab().clear_before_allocation();
 660 
 661   if (new_gclab_size == 0) {
 662     return NULL;
 663   }
 664 
 665   // Allocate a new GCLAB...
 666   HeapWord* obj = allocate_new_gclab(new_gclab_size);
 667   if (obj == NULL) {
 668     return NULL;
 669   }
 670 
 671   if (ZeroTLAB) {
 672     // ..and clear it.
 673     Copy::zero_to_words(obj, new_gclab_size);
 674   } else {
 675     // ...and zap just allocated object.
 676 #ifdef ASSERT
 677     // Skip mangling the space corresponding to the object header to
 678     // ensure that the returned space is not considered parsable by
 679     // any concurrent GC thread.
 680     size_t hdr_size = oopDesc::header_size();
 681     Copy::fill_to_words(obj + hdr_size, new_gclab_size - hdr_size, badHeapWordVal);
 682 #endif // ASSERT
 683   }
 684   thread->gclab().fill(obj, obj + size, new_gclab_size);
 685   return obj;
 686 }
 687 
 688 HeapWord* ShenandoahHeap::allocate_new_tlab(size_t word_size) {
 689 #ifdef ASSERT
 690   log_debug(gc, alloc)("Allocate new tlab, requested size = " SIZE_FORMAT " bytes", word_size * HeapWordSize);
 691 #endif
 692   return allocate_new_lab(word_size, _alloc_tlab);
 693 }
 694 
 695 HeapWord* ShenandoahHeap::allocate_new_gclab(size_t word_size) {
 696 #ifdef ASSERT
 697   log_debug(gc, alloc)("Allocate new gclab, requested size = " SIZE_FORMAT " bytes", word_size * HeapWordSize);
 698 #endif
 699   return allocate_new_lab(word_size, _alloc_gclab);
 700 }
 701 
 702 HeapWord* ShenandoahHeap::allocate_new_lab(size_t word_size, AllocType type) {
 703   HeapWord* result = allocate_memory(word_size, type);
 704 
 705   if (result != NULL) {
 706     assert(! in_collection_set(result), "Never allocate in collection set");
 707     _bytes_allocated_since_cm += word_size * HeapWordSize;
 708 
 709     log_develop_trace(gc, tlab)("allocating new tlab of size "SIZE_FORMAT" at addr "PTR_FORMAT, word_size, p2i(result));
 710 
 711   }
 712   return result;
 713 }
 714 
 715 ShenandoahHeap* ShenandoahHeap::heap() {
 716   CollectedHeap* heap = Universe::heap();
 717   assert(heap != NULL, "Unitialized access to ShenandoahHeap::heap()");
 718   assert(heap->kind() == CollectedHeap::ShenandoahHeap, "not a shenandoah heap");
 719   return (ShenandoahHeap*) heap;
 720 }
 721 
 722 ShenandoahHeap* ShenandoahHeap::heap_no_check() {
 723   CollectedHeap* heap = Universe::heap();
 724   return (ShenandoahHeap*) heap;
 725 }
 726 
 727 HeapWord* ShenandoahHeap::allocate_memory(size_t word_size, AllocType type) {
 728   ShenandoahAllocTrace trace_alloc(word_size, type);
 729 
 730   bool in_new_region = false;
 731   HeapWord* result = allocate_memory_under_lock(word_size, type, in_new_region);
 732 
 733   if (type == _alloc_tlab || type == _alloc_shared) {
 734     // Allocation failed, try full-GC, then retry allocation.
 735     //
 736     // It might happen that one of the threads requesting allocation would unblock
 737     // way later after full-GC happened, only to fail the second allocation, because
 738     // other threads have already depleted the free storage. In this case, a better
 739     // strategy would be to try full-GC again.
 740     //
 741     // Lacking the way to detect progress from "collect" call, we are left with blindly
 742     // retrying for some bounded number of times.
 743     // TODO: Poll if Full GC made enough progress to warrant retry.
 744     int tries = 0;
 745     while ((result == NULL) && (tries++ < ShenandoahFullGCTries)) {
 746       log_debug(gc)("[" PTR_FORMAT " Failed to allocate " SIZE_FORMAT " bytes, doing full GC, try %d",
 747                     p2i(Thread::current()), word_size * HeapWordSize, tries);
 748       collect(GCCause::_allocation_failure);
 749       result = allocate_memory_under_lock(word_size, type, in_new_region);
 750     }
 751   }
 752 
 753   if (in_new_region) {
 754     // Update monitoring counters when we took a new region. This amortizes the
 755     // update costs on slow path.
 756     concurrent_thread()->trigger_counters_update();
 757   }
 758 
 759   log_develop_trace(gc, alloc)("allocate memory chunk of size "SIZE_FORMAT" at addr "PTR_FORMAT " by thread %d ",
 760                                word_size, p2i(result), Thread::current()->osthread()->thread_id());
 761 
 762   return result;
 763 }
 764 
 765 HeapWord* ShenandoahHeap::allocate_memory_under_lock(size_t word_size, AllocType type, bool& in_new_region) {
 766   ShenandoahHeapLocker locker(lock());
 767   return _free_regions->allocate(word_size, type, in_new_region);
 768 }
 769 
 770 HeapWord*  ShenandoahHeap::mem_allocate(size_t size,
 771                                         bool*  gc_overhead_limit_was_exceeded) {
 772   HeapWord* filler = allocate_memory(size + BrooksPointer::word_size(), _alloc_shared);
 773   HeapWord* result = filler + BrooksPointer::word_size();
 774   if (filler != NULL) {
 775     BrooksPointer::initialize(oop(result));
 776     _bytes_allocated_since_cm += size * HeapWordSize;
 777 
 778     assert(! in_collection_set(result), "never allocate in targetted region");
 779     return result;
 780   } else {
 781     return NULL;
 782   }
 783 }
 784 
 785 class ShenandoahEvacuateUpdateRootsClosure: public ExtendedOopClosure {
 786 private:
 787   ShenandoahHeap* _heap;
 788   Thread* _thread;
 789 public:
 790   ShenandoahEvacuateUpdateRootsClosure() :
 791           _heap(ShenandoahHeap::heap()), _thread(Thread::current()) {
 792   }
 793 
 794 private:
 795   template <class T>
 796   void do_oop_work(T* p) {
 797     assert(_heap->is_evacuation_in_progress(), "Only do this when evacuation is in progress");
 798 
 799     T o = oopDesc::load_heap_oop(p);
 800     if (! oopDesc::is_null(o)) {
 801       oop obj = oopDesc::decode_heap_oop_not_null(o);
 802       if (_heap->in_collection_set(obj)) {
 803         assert(_heap->is_marked_complete(obj), "only evacuate marked objects %d %d",
 804                _heap->is_marked_complete(obj), _heap->is_marked_complete(ShenandoahBarrierSet::resolve_oop_static_not_null(obj)));
 805         oop resolved = ShenandoahBarrierSet::resolve_oop_static_not_null(obj);
 806         if (oopDesc::unsafe_equals(resolved, obj)) {
 807           bool evac;
 808           resolved = _heap->evacuate_object(obj, _thread, evac);
 809         }
 810         oopDesc::encode_store_heap_oop(p, resolved);
 811       }
 812     }
 813   }
 814 
 815 public:
 816   void do_oop(oop* p) {
 817     do_oop_work(p);
 818   }
 819   void do_oop(narrowOop* p) {
 820     do_oop_work(p);
 821   }
 822 };
 823 
 824 class ShenandoahEvacuateRootsClosure: public ExtendedOopClosure {
 825 private:
 826   ShenandoahHeap* _heap;
 827   Thread* _thread;
 828 public:
 829   ShenandoahEvacuateRootsClosure() :
 830           _heap(ShenandoahHeap::heap()), _thread(Thread::current()) {
 831   }
 832 
 833 private:
 834   template <class T>
 835   void do_oop_work(T* p) {
 836     T o = oopDesc::load_heap_oop(p);
 837     if (! oopDesc::is_null(o)) {
 838       oop obj = oopDesc::decode_heap_oop_not_null(o);
 839       if (_heap->in_collection_set(obj)) {
 840         oop resolved = ShenandoahBarrierSet::resolve_oop_static_not_null(obj);
 841         if (oopDesc::unsafe_equals(resolved, obj)) {
 842           bool evac;
 843           _heap->evacuate_object(obj, _thread, evac);
 844         }
 845       }
 846     }
 847   }
 848 
 849 public:
 850   void do_oop(oop* p) {
 851     do_oop_work(p);
 852   }
 853   void do_oop(narrowOop* p) {
 854     do_oop_work(p);
 855   }
 856 };
 857 
 858 class ShenandoahParallelEvacuateRegionObjectClosure : public ObjectClosure {
 859 private:
 860   ShenandoahHeap* const _heap;
 861   Thread* const _thread;
 862 public:
 863   ShenandoahParallelEvacuateRegionObjectClosure(ShenandoahHeap* heap) :
 864     _heap(heap), _thread(Thread::current()) {}
 865 
 866   void do_object(oop p) {
 867     assert(_heap->is_marked_complete(p), "expect only marked objects");
 868     if (oopDesc::unsafe_equals(p, ShenandoahBarrierSet::resolve_oop_static_not_null(p))) {
 869       bool evac;
 870       _heap->evacuate_object(p, _thread, evac);
 871     }
 872   }
 873 };
 874 
 875 class ShenandoahParallelEvacuationTask : public AbstractGangTask {
 876 private:
 877   ShenandoahHeap* const _sh;
 878   ShenandoahCollectionSet* const _cs;
 879   volatile jbyte _claimed_codecache;
 880 
 881   bool claim_codecache() {
 882     jbyte old = Atomic::cmpxchg((jbyte)1, &_claimed_codecache, (jbyte)0);
 883     return old == 0;
 884   }
 885 public:
 886   ShenandoahParallelEvacuationTask(ShenandoahHeap* sh,
 887                          ShenandoahCollectionSet* cs) :
 888     AbstractGangTask("Parallel Evacuation Task"),
 889     _cs(cs),
 890     _sh(sh),
 891     _claimed_codecache(0)
 892   {}
 893 
 894   void work(uint worker_id) {
 895 
 896     SuspendibleThreadSetJoiner stsj(ShenandoahSuspendibleWorkers);
 897 
 898     // If concurrent code cache evac is enabled, evacuate it here.
 899     // Note we cannot update the roots here, because we risk non-atomic stores to the alive
 900     // nmethods. The update would be handled elsewhere.
 901     if (ShenandoahConcurrentEvacCodeRoots && claim_codecache()) {
 902       ShenandoahEvacuateRootsClosure cl;
 903       MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
 904       CodeBlobToOopClosure blobs(&cl, !CodeBlobToOopClosure::FixRelocations);
 905       CodeCache::blobs_do(&blobs);
 906     }
 907 
 908     ShenandoahParallelEvacuateRegionObjectClosure cl(_sh);
 909     ShenandoahHeapRegion* r;
 910     while ((r =_cs->claim_next()) != NULL) {
 911       log_develop_trace(gc, region)("Thread "INT32_FORMAT" claimed Heap Region "SIZE_FORMAT,
 912                                     worker_id,
 913                                     r->region_number());
 914 
 915       assert(r->has_live(), "all-garbage regions are reclaimed early");
 916       _sh->marked_object_iterate(r, &cl);
 917 
 918       if (_sh->check_cancelled_concgc_and_yield()) {
 919         log_develop_trace(gc, region)("Cancelled concgc while evacuating region " SIZE_FORMAT, r->region_number());
 920         break;
 921       }
 922     }
 923   }
 924 };
 925 
 926 void ShenandoahHeap::trash_cset_regions() {
 927   ShenandoahHeapLocker locker(lock());
 928 
 929   ShenandoahCollectionSet* set = collection_set();
 930   ShenandoahHeapRegion* r;
 931   set->clear_current_index();
 932   while ((r = set->next()) != NULL) {
 933     r->make_trash();
 934   }
 935   collection_set()->clear();
 936 }
 937 
 938 void ShenandoahHeap::print_heap_regions_on(outputStream* st) const {
 939   st->print_cr("Heap Regions:");
 940   st->print_cr("EU=empty-uncommitted, EC=empty-committed, R=regular, H=humongous start, HC=humongous continuation, CS=collection set, T=trash, P=pinned");
 941   st->print_cr("BTE=bottom/top/end, U=used, T=TLAB allocs, G=GCLAB allocs, S=shared allocs, L=live data");
 942   st->print_cr("R=root, CP=critical pins, TAMS=top-at-mark-start (previous, next)");
 943   st->print_cr("FTS=first use timestamp, LTS=last use timestamp");
 944 
 945   _ordered_regions->print_on(st);
 946 }
 947 
 948 size_t ShenandoahHeap::trash_humongous_region_at(ShenandoahHeapRegion* start) {
 949   assert(start->is_humongous_start(), "reclaim regions starting with the first one");
 950 
 951   oop humongous_obj = oop(start->bottom() + BrooksPointer::word_size());
 952   size_t size = humongous_obj->size() + BrooksPointer::word_size();
 953   size_t required_regions = ShenandoahHeapRegion::required_regions(size * HeapWordSize);
 954   size_t index = start->region_number() + required_regions - 1;
 955 
 956   assert(!start->has_live(), "liveness must be zero");
 957   log_trace(gc, humongous)("Reclaiming "SIZE_FORMAT" humongous regions for object of size: "SIZE_FORMAT" words", required_regions, size);
 958 
 959   for(size_t i = 0; i < required_regions; i++) {
 960     // Reclaim from tail. Otherwise, assertion fails when printing region to trace log,
 961     // as it expects that every region belongs to a humongous region starting with a humongous start region.
 962     ShenandoahHeapRegion* region = _ordered_regions->get(index --);
 963 
 964     LogTarget(Trace, gc, humongous) lt;
 965     if (lt.is_enabled()) {
 966       ResourceMark rm;
 967       LogStream ls(lt);
 968       region->print_on(&ls);
 969     }
 970 
 971     assert(region->is_humongous(), "expect correct humongous start or continuation");
 972     assert(!in_collection_set(region), "Humongous region should not be in collection set");
 973 
 974     region->make_trash();
 975   }
 976   return required_regions;
 977 }
 978 
 979 #ifdef ASSERT
 980 class ShenandoahCheckCollectionSetClosure: public ShenandoahHeapRegionClosure {
 981   bool heap_region_do(ShenandoahHeapRegion* r) {
 982     assert(! ShenandoahHeap::heap()->in_collection_set(r), "Should have been cleared by now");
 983     return false;
 984   }
 985 };
 986 #endif
 987 
 988 void ShenandoahHeap::prepare_for_concurrent_evacuation() {
 989   assert(_ordered_regions->get(0)->region_number() == 0, "FIXME CHF. FIXME CHF!");
 990 
 991   log_develop_trace(gc)("Thread %d started prepare_for_concurrent_evacuation", Thread::current()->osthread()->thread_id());
 992 
 993   if (!cancelled_concgc()) {
 994     // Allocations might have happened before we STWed here, record peak:
 995     shenandoahPolicy()->record_peak_occupancy();
 996 
 997     make_tlabs_parsable(true);
 998 
 999     if (ShenandoahVerify) {
1000       verifier()->verify_after_concmark();
1001     }
1002 
1003     trash_cset_regions();
1004 
1005     // NOTE: This needs to be done during a stop the world pause, because
1006     // putting regions into the collection set concurrently with Java threads
1007     // will create a race. In particular, acmp could fail because when we
1008     // resolve the first operand, the containing region might not yet be in
1009     // the collection set, and thus return the original oop. When the 2nd
1010     // operand gets resolved, the region could be in the collection set
1011     // and the oop gets evacuated. If both operands have originally been
1012     // the same, we get false negatives.
1013 
1014     {
1015       ShenandoahHeapLocker locker(lock());
1016       _collection_set->clear();
1017       _free_regions->clear();
1018 
1019 #ifdef ASSERT
1020       ShenandoahCheckCollectionSetClosure ccsc;
1021       _ordered_regions->heap_region_iterate(&ccsc);
1022 #endif
1023 
1024       _shenandoah_policy->choose_collection_set(_collection_set);
1025 
1026       _shenandoah_policy->choose_free_set(_free_regions);
1027     }
1028 
1029     _bytes_allocated_since_cm = 0;
1030 
1031     Universe::update_heap_info_at_gc();
1032 
1033     if (ShenandoahVerify) {
1034       verifier()->verify_before_evacuation();
1035     }
1036   }
1037 }
1038 
1039 
1040 class ShenandoahRetireTLABClosure : public ThreadClosure {
1041 private:
1042   bool _retire;
1043 
1044 public:
1045   ShenandoahRetireTLABClosure(bool retire) : _retire(retire) {}
1046 
1047   void do_thread(Thread* thread) {
1048     assert(thread->gclab().is_initialized(), "GCLAB should be initialized for %s", thread->name());
1049     thread->gclab().make_parsable(_retire);
1050   }
1051 };
1052 
1053 void ShenandoahHeap::make_tlabs_parsable(bool retire_tlabs) {
1054   if (UseTLAB) {
1055     CollectedHeap::ensure_parsability(retire_tlabs);
1056     ShenandoahRetireTLABClosure cl(retire_tlabs);
1057     Threads::java_threads_do(&cl);
1058     gc_threads_do(&cl);
1059   }
1060 }
1061 
1062 
1063 class ShenandoahEvacuateUpdateRootsTask : public AbstractGangTask {
1064   ShenandoahRootEvacuator* _rp;
1065 public:
1066 
1067   ShenandoahEvacuateUpdateRootsTask(ShenandoahRootEvacuator* rp) :
1068     AbstractGangTask("Shenandoah evacuate and update roots"),
1069     _rp(rp)
1070   {
1071     // Nothing else to do.
1072   }
1073 
1074   void work(uint worker_id) {
1075     ShenandoahEvacuateUpdateRootsClosure cl;
1076 
1077     if (ShenandoahConcurrentEvacCodeRoots) {
1078       _rp->process_evacuate_roots(&cl, NULL, worker_id);
1079     } else {
1080       MarkingCodeBlobClosure blobsCl(&cl, CodeBlobToOopClosure::FixRelocations);
1081       _rp->process_evacuate_roots(&cl, &blobsCl, worker_id);
1082     }
1083   }
1084 };
1085 
1086 class ShenandoahFixRootsTask : public AbstractGangTask {
1087   ShenandoahRootEvacuator* _rp;
1088 public:
1089 
1090   ShenandoahFixRootsTask(ShenandoahRootEvacuator* rp) :
1091     AbstractGangTask("Shenandoah update roots"),
1092     _rp(rp)
1093   {
1094     // Nothing else to do.
1095   }
1096 
1097   void work(uint worker_id) {
1098     ShenandoahUpdateRefsClosure cl;
1099     MarkingCodeBlobClosure blobsCl(&cl, CodeBlobToOopClosure::FixRelocations);
1100 
1101     _rp->process_evacuate_roots(&cl, &blobsCl, worker_id);
1102   }
1103 };
1104 
1105 void ShenandoahHeap::evacuate_and_update_roots() {
1106 
1107 #if defined(COMPILER2) || INCLUDE_JVMCI
1108   DerivedPointerTable::clear();
1109 #endif
1110   assert(SafepointSynchronize::is_at_safepoint(), "Only iterate roots while world is stopped");
1111 
1112   {
1113     ShenandoahRootEvacuator rp(this, workers()->active_workers(), ShenandoahPhaseTimings::init_evac);
1114     ShenandoahEvacuateUpdateRootsTask roots_task(&rp);
1115     workers()->run_task(&roots_task);
1116   }
1117 
1118 #if defined(COMPILER2) || INCLUDE_JVMCI
1119   DerivedPointerTable::update_pointers();
1120 #endif
1121   if (cancelled_concgc()) {
1122     fixup_roots();
1123   }
1124 }
1125 
1126 
1127 void ShenandoahHeap::fixup_roots() {
1128     assert(cancelled_concgc(), "Only after concurrent cycle failed");
1129 
1130     // If initial evacuation has been cancelled, we need to update all references
1131     // after all workers have finished. Otherwise we might run into the following problem:
1132     // GC thread 1 cannot allocate anymore, thus evacuation fails, leaves from-space ptr of object X.
1133     // GC thread 2 evacuates the same object X to to-space
1134     // which leaves a truly dangling from-space reference in the first root oop*. This must not happen.
1135     // clear() and update_pointers() must always be called in pairs,
1136     // cannot nest with above clear()/update_pointers().
1137 #if defined(COMPILER2) || INCLUDE_JVMCI
1138     DerivedPointerTable::clear();
1139 #endif
1140     ShenandoahRootEvacuator rp(this, workers()->active_workers(), ShenandoahPhaseTimings::init_evac);
1141     ShenandoahFixRootsTask update_roots_task(&rp);
1142     workers()->run_task(&update_roots_task);
1143 #if defined(COMPILER2) || INCLUDE_JVMCI
1144     DerivedPointerTable::update_pointers();
1145 #endif
1146 }
1147 
1148 void ShenandoahHeap::do_evacuation() {
1149   ShenandoahGCPhase conc_evac_phase(ShenandoahPhaseTimings::conc_evac);
1150 
1151   LogTarget(Trace, gc, region) lt_region;
1152   LogTarget(Trace, gc, cset) lt_cset;
1153 
1154   if (lt_region.is_enabled()) {
1155     ResourceMark rm;
1156     LogStream ls(lt_region);
1157     ls.print_cr("All available regions:");
1158     print_heap_regions_on(&ls);
1159   }
1160 
1161   if (lt_cset.is_enabled()) {
1162     ResourceMark rm;
1163     LogStream ls(lt_cset);
1164     ls.print_cr("Collection set ("SIZE_FORMAT" regions):", _collection_set->count());
1165     _collection_set->print_on(&ls);
1166 
1167     ls.print_cr("Free set:");
1168     _free_regions->print_on(&ls);
1169   }
1170 
1171   ShenandoahParallelEvacuationTask task(this, _collection_set);
1172   workers()->run_task(&task);
1173 
1174   if (lt_cset.is_enabled()) {
1175     ResourceMark rm;
1176     LogStream ls(lt_cset);
1177     ls.print_cr("After evacuation collection set ("SIZE_FORMAT" regions):",
1178                _collection_set->count());
1179     _collection_set->print_on(&ls);
1180 
1181     ls.print_cr("After evacuation free set:");
1182     _free_regions->print_on(&ls);
1183   }
1184 
1185   if (lt_region.is_enabled()) {
1186     ResourceMark rm;
1187     LogStream ls(lt_region);
1188     ls.print_cr("All regions after evacuation:");
1189     print_heap_regions_on(&ls);
1190   }
1191 }
1192 
1193 void ShenandoahHeap::roots_iterate(OopClosure* cl) {
1194   assert(SafepointSynchronize::is_at_safepoint(), "Only iterate roots while world is stopped");
1195 
1196   CodeBlobToOopClosure blobsCl(cl, false);
1197   CLDToOopClosure cldCl(cl);
1198 
1199   ShenandoahRootProcessor rp(this, 1, ShenandoahPhaseTimings::_num_phases);
1200   rp.process_all_roots(cl, NULL, &cldCl, &blobsCl, 0);
1201 }
1202 
1203 bool ShenandoahHeap::supports_tlab_allocation() const {
1204   return true;
1205 }
1206 
1207 size_t  ShenandoahHeap::unsafe_max_tlab_alloc(Thread *thread) const {
1208   return MIN2(_free_regions->unsafe_peek_free(), max_tlab_size());
1209 }
1210 
1211 size_t ShenandoahHeap::max_tlab_size() const {
1212   return ShenandoahHeapRegion::max_tlab_size_bytes();
1213 }
1214 
1215 class ShenandoahResizeGCLABClosure : public ThreadClosure {
1216 public:
1217   void do_thread(Thread* thread) {
1218     assert(thread->gclab().is_initialized(), "GCLAB should be initialized for %s", thread->name());
1219     thread->gclab().resize();
1220   }
1221 };
1222 
1223 void ShenandoahHeap::resize_all_tlabs() {
1224   CollectedHeap::resize_all_tlabs();
1225 
1226   ShenandoahResizeGCLABClosure cl;
1227   Threads::java_threads_do(&cl);
1228   gc_threads_do(&cl);
1229 }
1230 
1231 class ShenandoahAccumulateStatisticsGCLABClosure : public ThreadClosure {
1232 public:
1233   void do_thread(Thread* thread) {
1234     assert(thread->gclab().is_initialized(), "GCLAB should be initialized for %s", thread->name());
1235     thread->gclab().accumulate_statistics();
1236     thread->gclab().initialize_statistics();
1237   }
1238 };
1239 
1240 void ShenandoahHeap::accumulate_statistics_all_gclabs() {
1241   ShenandoahAccumulateStatisticsGCLABClosure cl;
1242   Threads::java_threads_do(&cl);
1243   gc_threads_do(&cl);
1244 }
1245 
1246 bool  ShenandoahHeap::can_elide_tlab_store_barriers() const {
1247   return true;
1248 }
1249 
1250 oop ShenandoahHeap::new_store_pre_barrier(JavaThread* thread, oop new_obj) {
1251   // Overridden to do nothing.
1252   return new_obj;
1253 }
1254 
1255 bool  ShenandoahHeap::can_elide_initializing_store_barrier(oop new_obj) {
1256   return true;
1257 }
1258 
1259 bool ShenandoahHeap::card_mark_must_follow_store() const {
1260   return false;
1261 }
1262 
1263 void ShenandoahHeap::collect(GCCause::Cause cause) {
1264   assert(cause != GCCause::_gc_locker, "no JNI critical callback");
1265   if (GCCause::is_user_requested_gc(cause)) {
1266     if (!DisableExplicitGC) {
1267       if (ExplicitGCInvokesConcurrent) {
1268         _concurrent_gc_thread->do_conc_gc();
1269       } else {
1270         _concurrent_gc_thread->do_full_gc(cause);
1271       }
1272     }
1273   } else if (cause == GCCause::_allocation_failure) {
1274     collector_policy()->set_should_clear_all_soft_refs(true);
1275     _concurrent_gc_thread->do_full_gc(cause);
1276   }
1277 }
1278 
1279 void ShenandoahHeap::do_full_collection(bool clear_all_soft_refs) {
1280   //assert(false, "Shouldn't need to do full collections");
1281 }
1282 
1283 AdaptiveSizePolicy* ShenandoahHeap::size_policy() {
1284   Unimplemented();
1285   return NULL;
1286 
1287 }
1288 
1289 CollectorPolicy* ShenandoahHeap::collector_policy() const {
1290   return _shenandoah_policy;
1291 }
1292 
1293 
1294 HeapWord* ShenandoahHeap::block_start(const void* addr) const {
1295   Space* sp = heap_region_containing(addr);
1296   if (sp != NULL) {
1297     return sp->block_start(addr);
1298   }
1299   return NULL;
1300 }
1301 
1302 size_t ShenandoahHeap::block_size(const HeapWord* addr) const {
1303   Space* sp = heap_region_containing(addr);
1304   assert(sp != NULL, "block_size of address outside of heap");
1305   return sp->block_size(addr);
1306 }
1307 
1308 bool ShenandoahHeap::block_is_obj(const HeapWord* addr) const {
1309   Space* sp = heap_region_containing(addr);
1310   return sp->block_is_obj(addr);
1311 }
1312 
1313 jlong ShenandoahHeap::millis_since_last_gc() {
1314   return 0;
1315 }
1316 
1317 void ShenandoahHeap::prepare_for_verify() {
1318   if (SafepointSynchronize::is_at_safepoint() || ! UseTLAB) {
1319     make_tlabs_parsable(false);
1320   }
1321 }
1322 
1323 void ShenandoahHeap::print_gc_threads_on(outputStream* st) const {
1324   workers()->print_worker_threads_on(st);
1325 }
1326 
1327 void ShenandoahHeap::gc_threads_do(ThreadClosure* tcl) const {
1328   workers()->threads_do(tcl);
1329 }
1330 
1331 void ShenandoahHeap::print_tracing_info() const {
1332   LogTarget(Info, gc, stats) lt;
1333   if (lt.is_enabled()) {
1334     ResourceMark rm;
1335     LogStream ls(lt);
1336 
1337     phase_timings()->print_on(&ls);
1338 
1339     ls.cr();
1340     ls.cr();
1341 
1342     shenandoahPolicy()->print_gc_stats(&ls);
1343 
1344     ls.cr();
1345     ls.cr();
1346 
1347     if (ShenandoahAllocationTrace) {
1348       assert(alloc_tracker() != NULL, "Must be");
1349       alloc_tracker()->print_on(&ls);
1350     } else {
1351       ls.print_cr("  Allocation tracing is disabled, use -XX:+ShenandoahAllocationTrace to enable.");
1352     }
1353   }
1354 }
1355 
1356 void ShenandoahHeap::verify(VerifyOption vo) {
1357   if (SafepointSynchronize::is_at_safepoint() || ! UseTLAB) {
1358     if (ShenandoahVerify) {
1359       verifier()->verify_generic(vo);
1360     } else {
1361       // TODO: Consider allocating verification bitmaps on demand,
1362       // and turn this on unconditionally.
1363     }
1364   }
1365 }
1366 size_t ShenandoahHeap::tlab_capacity(Thread *thr) const {
1367   return _free_regions->capacity();
1368 }
1369 
1370 class ObjectIterateScanRootClosure : public ExtendedOopClosure {
1371 private:
1372   MarkBitMap* _bitmap;
1373   Stack<oop,mtGC>* _oop_stack;
1374 
1375   template <class T>
1376   void do_oop_work(T* p) {
1377     T o = oopDesc::load_heap_oop(p);
1378     if (!oopDesc::is_null(o)) {
1379       oop obj = oopDesc::decode_heap_oop_not_null(o);
1380       obj = ShenandoahBarrierSet::resolve_oop_static_not_null(obj);
1381       assert(oopDesc::is_oop(obj), "must be a valid oop");
1382       if (!_bitmap->isMarked((HeapWord*) obj)) {
1383         _bitmap->mark((HeapWord*) obj);
1384         _oop_stack->push(obj);
1385       }
1386     }
1387   }
1388 public:
1389   ObjectIterateScanRootClosure(MarkBitMap* bitmap, Stack<oop,mtGC>* oop_stack) :
1390     _bitmap(bitmap), _oop_stack(oop_stack) {}
1391   void do_oop(oop* p)       { do_oop_work(p); }
1392   void do_oop(narrowOop* p) { do_oop_work(p); }
1393 };
1394 
1395 /*
1396  * This is public API, used in preparation of object_iterate().
1397  * Since we don't do linear scan of heap in object_iterate() (see comment below), we don't
1398  * need to make the heap parsable. For Shenandoah-internal linear heap scans that we can
1399  * control, we call SH::make_tlabs_parsable().
1400  */
1401 void ShenandoahHeap::ensure_parsability(bool retire_tlabs) {
1402   // No-op.
1403 }
1404 
1405 /*
1406  * Iterates objects in the heap. This is public API, used for, e.g., heap dumping.
1407  *
1408  * We cannot safely iterate objects by doing a linear scan at random points in time. Linear
1409  * scanning needs to deal with dead objects, which may have dead Klass* pointers (e.g.
1410  * calling oopDesc::size() would crash) or dangling reference fields (crashes) etc. Linear
1411  * scanning therefore depends on having a valid marking bitmap to support it. However, we only
1412  * have a valid marking bitmap after successful marking. In particular, we *don't* have a valid
1413  * marking bitmap during marking, after aborted marking or during/after cleanup (when we just
1414  * wiped the bitmap in preparation for next marking).
1415  *
1416  * For all those reasons, we implement object iteration as a single marking traversal, reporting
1417  * objects as we mark+traverse through the heap, starting from GC roots. JVMTI IterateThroughHeap
1418  * is allowed to report dead objects, but is not required to do so.
1419  */
1420 void ShenandoahHeap::object_iterate(ObjectClosure* cl) {
1421   assert(SafepointSynchronize::is_at_safepoint(), "safe iteration is only available during safepoints");
1422   if (!os::commit_memory((char*)_aux_bitmap_region.start(), _aux_bitmap_region.byte_size(), false)) {
1423     log_warning(gc)("Could not commit native memory for auxiliary marking bitmap for heap iteration");
1424     return;
1425   }
1426 
1427   Stack<oop,mtGC> oop_stack;
1428 
1429   // First, we process all GC roots. This populates the work stack with initial objects.
1430   ShenandoahRootProcessor rp(this, 1, ShenandoahPhaseTimings::_num_phases);
1431   ObjectIterateScanRootClosure oops(&_aux_bit_map, &oop_stack);
1432   CLDToOopClosure clds(&oops, false);
1433   CodeBlobToOopClosure blobs(&oops, false);
1434   rp.process_all_roots(&oops, &oops, &clds, &blobs, 0);
1435 
1436   // Work through the oop stack to traverse heap.
1437   while (! oop_stack.is_empty()) {
1438     oop obj = oop_stack.pop();
1439     assert(oopDesc::is_oop(obj), "must be a valid oop");
1440     cl->do_object(obj);
1441     obj->oop_iterate(&oops);
1442   }
1443 
1444   assert(oop_stack.is_empty(), "should be empty");
1445 
1446   if (!os::uncommit_memory((char*)_aux_bitmap_region.start(), _aux_bitmap_region.byte_size())) {
1447     log_warning(gc)("Could not uncommit native memory for auxiliary marking bitmap for heap iteration");
1448   }
1449 }
1450 
1451 void ShenandoahHeap::safe_object_iterate(ObjectClosure* cl) {
1452   assert(SafepointSynchronize::is_at_safepoint(), "safe iteration is only available during safepoints");
1453   object_iterate(cl);
1454 }
1455 
1456 // Apply blk->heap_region_do() on all committed regions in address order,
1457 // terminating the iteration early if heap_region_do() returns true.
1458 void ShenandoahHeap::heap_region_iterate(ShenandoahHeapRegionClosure* blk, bool skip_cset_regions, bool skip_humongous_continuation) const {
1459   for (size_t i = 0; i < num_regions(); i++) {
1460     ShenandoahHeapRegion* current  = _ordered_regions->get(i);
1461     if (skip_humongous_continuation && current->is_humongous_continuation()) {
1462       continue;
1463     }
1464     if (skip_cset_regions && in_collection_set(current)) {
1465       continue;
1466     }
1467     if (blk->heap_region_do(current)) {
1468       return;
1469     }
1470   }
1471 }
1472 
1473 class ShenandoahClearLivenessClosure : public ShenandoahHeapRegionClosure {
1474 private:
1475   ShenandoahHeap* sh;
1476 public:
1477   ShenandoahClearLivenessClosure(ShenandoahHeap* heap) : sh(heap) {}
1478 
1479   bool heap_region_do(ShenandoahHeapRegion* r) {
1480     r->clear_live_data();
1481     sh->set_next_top_at_mark_start(r->bottom(), r->top());
1482     return false;
1483   }
1484 };
1485 
1486 void ShenandoahHeap::start_concurrent_marking() {
1487   if (ShenandoahVerify) {
1488     verifier()->verify_before_concmark();
1489   }
1490 
1491   {
1492     ShenandoahGCPhase phase(ShenandoahPhaseTimings::accumulate_stats);
1493     accumulate_statistics_all_tlabs();
1494   }
1495 
1496   set_concurrent_mark_in_progress(true);
1497   // We need to reset all TLABs because we'd lose marks on all objects allocated in them.
1498   if (UseTLAB) {
1499     ShenandoahGCPhase phase(ShenandoahPhaseTimings::make_parsable);
1500     make_tlabs_parsable(true);
1501   }
1502 
1503   _shenandoah_policy->record_bytes_allocated(_bytes_allocated_since_cm);
1504   _used_start_gc = used();
1505 
1506   {
1507     ShenandoahGCPhase phase(ShenandoahPhaseTimings::clear_liveness);
1508     ShenandoahClearLivenessClosure clc(this);
1509     heap_region_iterate(&clc);
1510   }
1511 
1512   // Make above changes visible to worker threads
1513   OrderAccess::fence();
1514 
1515   concurrentMark()->init_mark_roots();
1516 
1517   if (UseTLAB) {
1518     ShenandoahGCPhase phase(ShenandoahPhaseTimings::resize_tlabs);
1519     resize_all_tlabs();
1520   }
1521 }
1522 
1523 void ShenandoahHeap::swap_mark_bitmaps() {
1524   // Swap bitmaps.
1525   MarkBitMap* tmp1 = _complete_mark_bit_map;
1526   _complete_mark_bit_map = _next_mark_bit_map;
1527   _next_mark_bit_map = tmp1;
1528 
1529   // Swap top-at-mark-start pointers
1530   HeapWord** tmp2 = _complete_top_at_mark_starts;
1531   _complete_top_at_mark_starts = _next_top_at_mark_starts;
1532   _next_top_at_mark_starts = tmp2;
1533 
1534   HeapWord** tmp3 = _complete_top_at_mark_starts_base;
1535   _complete_top_at_mark_starts_base = _next_top_at_mark_starts_base;
1536   _next_top_at_mark_starts_base = tmp3;
1537 }
1538 
1539 
1540 void ShenandoahHeap::stop_concurrent_marking() {
1541   assert(concurrent_mark_in_progress(), "How else could we get here?");
1542   if (! cancelled_concgc()) {
1543     // If we needed to update refs, and concurrent marking has been cancelled,
1544     // we need to finish updating references.
1545     set_need_update_refs(false);
1546     swap_mark_bitmaps();
1547   }
1548   set_concurrent_mark_in_progress(false);
1549 
1550   LogTarget(Trace, gc, region) lt;
1551   if (lt.is_enabled()) {
1552     ResourceMark rm;
1553     LogStream ls(lt);
1554     ls.print_cr("Regions at stopping the concurrent mark:");
1555     print_heap_regions_on(&ls);
1556   }
1557 }
1558 
1559 void ShenandoahHeap::set_concurrent_mark_in_progress(bool in_progress) {
1560   _concurrent_mark_in_progress = in_progress ? 1 : 0;
1561   JavaThread::satb_mark_queue_set().set_active_all_threads(in_progress, !in_progress);
1562 }
1563 
1564 void ShenandoahHeap::set_concurrent_partial_in_progress(bool in_progress) {
1565   _concurrent_partial_in_progress = in_progress;
1566   JavaThread::satb_mark_queue_set().set_active_all_threads(in_progress, !in_progress);
1567   set_evacuation_in_progress_at_safepoint(in_progress);
1568 }
1569 
1570 void ShenandoahHeap::set_evacuation_in_progress_concurrently(bool in_progress) {
1571   // Note: it is important to first release the _evacuation_in_progress flag here,
1572   // so that Java threads can get out of oom_during_evacuation() and reach a safepoint,
1573   // in case a VM task is pending.
1574   set_evacuation_in_progress(in_progress);
1575   MutexLocker mu(Threads_lock);
1576   JavaThread::set_evacuation_in_progress_all_threads(in_progress);
1577 }
1578 
1579 void ShenandoahHeap::set_evacuation_in_progress_at_safepoint(bool in_progress) {
1580   assert(SafepointSynchronize::is_at_safepoint(), "Only call this at safepoint");
1581   set_evacuation_in_progress(in_progress);
1582   JavaThread::set_evacuation_in_progress_all_threads(in_progress);
1583 }
1584 
1585 void ShenandoahHeap::set_evacuation_in_progress(bool in_progress) {
1586   _evacuation_in_progress = in_progress ? 1 : 0;
1587   OrderAccess::fence();
1588 }
1589 
1590 void ShenandoahHeap::oom_during_evacuation() {
1591   log_develop_trace(gc)("Out of memory during evacuation, cancel evacuation, schedule full GC by thread %d",
1592                         Thread::current()->osthread()->thread_id());
1593 
1594   // We ran out of memory during evacuation. Cancel evacuation, and schedule a full-GC.
1595   collector_policy()->set_should_clear_all_soft_refs(true);
1596   concurrent_thread()->try_set_full_gc();
1597   cancel_concgc(_oom_evacuation);
1598 
1599   if ((! Thread::current()->is_GC_task_thread()) && (! Thread::current()->is_ConcurrentGC_thread())) {
1600     assert(! Threads_lock->owned_by_self()
1601            || SafepointSynchronize::is_at_safepoint(), "must not hold Threads_lock here");
1602     log_warning(gc)("OOM during evacuation. Let Java thread wait until evacuation finishes.");
1603     while (_evacuation_in_progress) { // wait.
1604       Thread::current()->_ParkEvent->park(1);
1605     }
1606   }
1607 
1608 }
1609 
1610 HeapWord* ShenandoahHeap::tlab_post_allocation_setup(HeapWord* obj) {
1611   // Initialize Brooks pointer for the next object
1612   HeapWord* result = obj + BrooksPointer::word_size();
1613   BrooksPointer::initialize(oop(result));
1614   return result;
1615 }
1616 
1617 uint ShenandoahHeap::oop_extra_words() {
1618   return BrooksPointer::word_size();
1619 }
1620 
1621 ShenandoahForwardedIsAliveClosure::ShenandoahForwardedIsAliveClosure() :
1622   _heap(ShenandoahHeap::heap_no_check()) {
1623 }
1624 
1625 bool ShenandoahForwardedIsAliveClosure::do_object_b(oop obj) {
1626   assert(_heap != NULL, "sanity");
1627   obj = ShenandoahBarrierSet::resolve_oop_static_not_null(obj);
1628 #ifdef ASSERT
1629   if (_heap->concurrent_mark_in_progress()) {
1630     assert(oopDesc::unsafe_equals(obj, ShenandoahBarrierSet::resolve_oop_static_not_null(obj)), "only query to-space");
1631   }
1632 #endif
1633   assert(!oopDesc::is_null(obj), "null");
1634   return _heap->is_marked_next(obj);
1635 }
1636 
1637 ShenandoahIsAliveClosure::ShenandoahIsAliveClosure() :
1638   _heap(ShenandoahHeap::heap_no_check()) {
1639 }
1640 
1641 bool ShenandoahIsAliveClosure::do_object_b(oop obj) {
1642   assert(_heap != NULL, "sanity");
1643   assert(!oopDesc::is_null(obj), "null");
1644   assert(oopDesc::unsafe_equals(obj, ShenandoahBarrierSet::resolve_oop_static_not_null(obj)), "only query to-space");
1645   return _heap->is_marked_next(obj);
1646 }
1647 
1648 BoolObjectClosure* ShenandoahHeap::is_alive_closure() {
1649   return need_update_refs() ?
1650          (BoolObjectClosure*) &_forwarded_is_alive :
1651          (BoolObjectClosure*) &_is_alive;
1652 }
1653 
1654 void ShenandoahHeap::ref_processing_init() {
1655   MemRegion mr = reserved_region();
1656 
1657   _forwarded_is_alive.init(ShenandoahHeap::heap());
1658   _is_alive.init(ShenandoahHeap::heap());
1659   assert(_max_workers > 0, "Sanity");
1660 
1661   _ref_processor =
1662     new ReferenceProcessor(mr,    // span
1663                            ParallelRefProcEnabled,  // MT processing
1664                            _max_workers,            // Degree of MT processing
1665                            true,                    // MT discovery
1666                            _max_workers,            // Degree of MT discovery
1667                            false,                   // Reference discovery is not atomic
1668                            &_forwarded_is_alive);   // Pessimistically assume "forwarded"
1669 }
1670 
1671 
1672 GCTracer* ShenandoahHeap::tracer() {
1673   return shenandoahPolicy()->tracer();
1674 }
1675 
1676 size_t ShenandoahHeap::tlab_used(Thread* thread) const {
1677   return _free_regions->used();
1678 }
1679 
1680 void ShenandoahHeap::cancel_concgc(GCCause::Cause cause) {
1681   if (try_cancel_concgc()) {
1682     log_info(gc)("Cancelling concurrent GC: %s", GCCause::to_string(cause));
1683     _shenandoah_policy->report_concgc_cancelled();
1684   }
1685 }
1686 
1687 void ShenandoahHeap::cancel_concgc(ShenandoahCancelCause cause) {
1688   if (try_cancel_concgc()) {
1689     log_info(gc)("Cancelling concurrent GC: %s", cancel_cause_to_string(cause));
1690     _shenandoah_policy->report_concgc_cancelled();
1691   }
1692 }
1693 
1694 const char* ShenandoahHeap::cancel_cause_to_string(ShenandoahCancelCause cause) {
1695   switch (cause) {
1696     case _oom_evacuation:
1697       return "Out of memory for evacuation";
1698     case _vm_stop:
1699       return "Stopping VM";
1700     default:
1701       return "Unknown";
1702   }
1703 }
1704 
1705 uint ShenandoahHeap::max_workers() {
1706   return _max_workers;
1707 }
1708 
1709 void ShenandoahHeap::stop() {
1710   // The shutdown sequence should be able to terminate when GC is running.
1711 
1712   // Step 0. Notify policy to disable event recording.
1713   _shenandoah_policy->record_shutdown();
1714 
1715   // Step 1. Notify control thread that we are in shutdown.
1716   // Note that we cannot do that with stop(), because stop() is blocking and waits for the actual shutdown.
1717   // Doing stop() here would wait for the normal GC cycle to complete, never falling through to cancel below.
1718   _concurrent_gc_thread->prepare_for_graceful_shutdown();
1719 
1720   // Step 2. Notify GC workers that we are cancelling GC.
1721   cancel_concgc(_vm_stop);
1722 
1723   // Step 3. Wait until GC worker exits normally.
1724   _concurrent_gc_thread->stop();
1725 }
1726 
1727 void ShenandoahHeap::unload_classes_and_cleanup_tables(bool full_gc) {
1728   ShenandoahPhaseTimings::Phase phase_root =
1729           full_gc ?
1730           ShenandoahPhaseTimings::full_gc_purge :
1731           ShenandoahPhaseTimings::purge;
1732 
1733   ShenandoahPhaseTimings::Phase phase_unload =
1734           full_gc ?
1735           ShenandoahPhaseTimings::full_gc_purge_class_unload :
1736           ShenandoahPhaseTimings::purge_class_unload;
1737 
1738   ShenandoahPhaseTimings::Phase phase_cldg =
1739           full_gc ?
1740           ShenandoahPhaseTimings::full_gc_purge_cldg :
1741           ShenandoahPhaseTimings::purge_cldg;
1742 
1743   ShenandoahPhaseTimings::Phase phase_par =
1744           full_gc ?
1745           ShenandoahPhaseTimings::full_gc_purge_par :
1746           ShenandoahPhaseTimings::purge_par;
1747 
1748   ShenandoahPhaseTimings::Phase phase_par_classes =
1749           full_gc ?
1750           ShenandoahPhaseTimings::full_gc_purge_par_classes :
1751           ShenandoahPhaseTimings::purge_par_classes;
1752 
1753   ShenandoahPhaseTimings::Phase phase_par_codecache =
1754           full_gc ?
1755           ShenandoahPhaseTimings::full_gc_purge_par_codecache :
1756           ShenandoahPhaseTimings::purge_par_codecache;
1757 
1758   ShenandoahPhaseTimings::Phase phase_par_rmt =
1759           full_gc ?
1760           ShenandoahPhaseTimings::full_gc_purge_par_rmt :
1761           ShenandoahPhaseTimings::purge_par_rmt;
1762 
1763   ShenandoahPhaseTimings::Phase phase_par_symbstring =
1764           full_gc ?
1765           ShenandoahPhaseTimings::full_gc_purge_par_symbstring :
1766           ShenandoahPhaseTimings::purge_par_symbstring;
1767 
1768   ShenandoahPhaseTimings::Phase phase_par_sync =
1769           full_gc ?
1770           ShenandoahPhaseTimings::full_gc_purge_par_sync :
1771           ShenandoahPhaseTimings::purge_par_sync;
1772 
1773   ShenandoahGCPhase root_phase(phase_root);
1774 
1775   BoolObjectClosure* is_alive = is_alive_closure();
1776 
1777   bool purged_class;
1778 
1779   // Unload classes and purge SystemDictionary.
1780   {
1781     ShenandoahGCPhase phase(phase_unload);
1782     purged_class = SystemDictionary::do_unloading(is_alive,
1783                                                   full_gc ? ShenandoahMarkCompact::gc_timer() : gc_timer(),
1784                                                   true);
1785   }
1786 
1787   {
1788     ShenandoahGCPhase phase(phase_par);
1789     uint active = _workers->active_workers();
1790     ParallelCleaningTask unlink_task(is_alive, true, true, active, purged_class);
1791     _workers->run_task(&unlink_task);
1792 
1793     ShenandoahPhaseTimings* p = ShenandoahHeap::heap()->phase_timings();
1794     ParallelCleaningTimes times = unlink_task.times();
1795 
1796     // "times" report total time, phase_tables_cc reports wall time. Divide total times
1797     // by active workers to get average time per worker, that would add up to wall time.
1798     p->record_phase_time(phase_par_classes,    times.klass_work_us() / active);
1799     p->record_phase_time(phase_par_codecache,  times.codecache_work_us() / active);
1800     p->record_phase_time(phase_par_rmt,        times.rmt_work_us() / active);
1801     p->record_phase_time(phase_par_symbstring, times.tables_work_us() / active);
1802     p->record_phase_time(phase_par_sync,       times.sync_us() / active);
1803   }
1804 
1805   {
1806     ShenandoahGCPhase phase(phase_cldg);
1807     ClassLoaderDataGraph::purge();
1808   }
1809 }
1810 
1811 void ShenandoahHeap::set_need_update_refs(bool need_update_refs) {
1812   _need_update_refs = need_update_refs;
1813 }
1814 
1815 //fixme this should be in heapregionset
1816 ShenandoahHeapRegion* ShenandoahHeap::next_compaction_region(const ShenandoahHeapRegion* r) {
1817   size_t region_idx = r->region_number() + 1;
1818   ShenandoahHeapRegion* next = _ordered_regions->get(region_idx);
1819   guarantee(next->region_number() == region_idx, "region number must match");
1820   while (next->is_humongous()) {
1821     region_idx = next->region_number() + 1;
1822     next = _ordered_regions->get(region_idx);
1823     guarantee(next->region_number() == region_idx, "region number must match");
1824   }
1825   return next;
1826 }
1827 
1828 ShenandoahMonitoringSupport* ShenandoahHeap::monitoring_support() {
1829   return _monitoring_support;
1830 }
1831 
1832 MarkBitMap* ShenandoahHeap::complete_mark_bit_map() {
1833   return _complete_mark_bit_map;
1834 }
1835 
1836 MarkBitMap* ShenandoahHeap::next_mark_bit_map() {
1837   return _next_mark_bit_map;
1838 }
1839 
1840 void ShenandoahHeap::add_free_region(ShenandoahHeapRegion* r) {
1841   _free_regions->add_region(r);
1842 }
1843 
1844 void ShenandoahHeap::clear_free_regions() {
1845   _free_regions->clear();
1846 }
1847 
1848 address ShenandoahHeap::in_cset_fast_test_addr() {
1849   ShenandoahHeap* heap = ShenandoahHeap::heap();
1850   assert(heap->collection_set() != NULL, "Sanity");
1851   return (address) heap->collection_set()->biased_map_address();
1852 }
1853 
1854 address ShenandoahHeap::cancelled_concgc_addr() {
1855   return (address) &(ShenandoahHeap::heap()->_cancelled_concgc);
1856 }
1857 
1858 
1859 size_t ShenandoahHeap::conservative_max_heap_alignment() {
1860   return ShenandoahMaxRegionSize;
1861 }
1862 
1863 size_t ShenandoahHeap::bytes_allocated_since_cm() {
1864   return _bytes_allocated_since_cm;
1865 }
1866 
1867 void ShenandoahHeap::set_bytes_allocated_since_cm(size_t bytes) {
1868   _bytes_allocated_since_cm = bytes;
1869 }
1870 
1871 void ShenandoahHeap::set_next_top_at_mark_start(HeapWord* region_base, HeapWord* addr) {
1872   uintx index = ((uintx) region_base) >> ShenandoahHeapRegion::region_size_bytes_shift();
1873   _next_top_at_mark_starts[index] = addr;
1874 }
1875 
1876 HeapWord* ShenandoahHeap::next_top_at_mark_start(HeapWord* region_base) {
1877   uintx index = ((uintx) region_base) >> ShenandoahHeapRegion::region_size_bytes_shift();
1878   return _next_top_at_mark_starts[index];
1879 }
1880 
1881 void ShenandoahHeap::set_complete_top_at_mark_start(HeapWord* region_base, HeapWord* addr) {
1882   uintx index = ((uintx) region_base) >> ShenandoahHeapRegion::region_size_bytes_shift();
1883   _complete_top_at_mark_starts[index] = addr;
1884 }
1885 
1886 HeapWord* ShenandoahHeap::complete_top_at_mark_start(HeapWord* region_base) {
1887   uintx index = ((uintx) region_base) >> ShenandoahHeapRegion::region_size_bytes_shift();
1888   return _complete_top_at_mark_starts[index];
1889 }
1890 
1891 void ShenandoahHeap::set_full_gc_in_progress(bool in_progress) {
1892   _full_gc_in_progress = in_progress;
1893 }
1894 
1895 bool ShenandoahHeap::is_full_gc_in_progress() const {
1896   return _full_gc_in_progress;
1897 }
1898 
1899 void ShenandoahHeap::set_update_refs_in_progress(bool in_progress) {
1900   _update_refs_in_progress = in_progress;
1901 }
1902 
1903 bool ShenandoahHeap::is_update_refs_in_progress() const {
1904   return _update_refs_in_progress;
1905 }
1906 
1907 void ShenandoahHeap::register_nmethod(nmethod* nm) {
1908   ShenandoahCodeRoots::add_nmethod(nm);
1909 }
1910 
1911 void ShenandoahHeap::unregister_nmethod(nmethod* nm) {
1912   ShenandoahCodeRoots::remove_nmethod(nm);
1913 }
1914 
1915 void ShenandoahHeap::pin_object(oop o) {
1916   ShenandoahHeapLocker locker(lock());
1917   heap_region_containing(o)->make_pinned();
1918 }
1919 
1920 void ShenandoahHeap::unpin_object(oop o) {
1921   ShenandoahHeapLocker locker(lock());
1922   heap_region_containing(o)->make_unpinned();
1923 }
1924 
1925 GCTimer* ShenandoahHeap::gc_timer() const {
1926   return _gc_timer;
1927 }
1928 
1929 #ifdef ASSERT
1930 void ShenandoahHeap::assert_gc_workers(uint nworkers) {
1931   assert(nworkers > 0 && nworkers <= max_workers(), "Sanity");
1932 
1933   if (SafepointSynchronize::is_at_safepoint()) {
1934     if (UseDynamicNumberOfGCThreads ||
1935         (FLAG_IS_DEFAULT(ParallelGCThreads) && ForceDynamicNumberOfGCThreads)) {
1936       assert(nworkers <= ParallelGCThreads, "Cannot use more than it has");
1937     } else {
1938       // Use ParallelGCThreads inside safepoints
1939       assert(nworkers == ParallelGCThreads, "Use ParalleGCThreads within safepoints");
1940     }
1941   } else {
1942     if (UseDynamicNumberOfGCThreads ||
1943         (FLAG_IS_DEFAULT(ConcGCThreads) && ForceDynamicNumberOfGCThreads)) {
1944       assert(nworkers <= ConcGCThreads, "Cannot use more than it has");
1945     } else {
1946       // Use ConcGCThreads outside safepoints
1947       assert(nworkers == ConcGCThreads, "Use ConcGCThreads outside safepoints");
1948     }
1949   }
1950 }
1951 #endif
1952 
1953 class ShenandoahCountGarbageClosure : public ShenandoahHeapRegionClosure {
1954 private:
1955   size_t            _garbage;
1956 public:
1957   ShenandoahCountGarbageClosure() : _garbage(0) {
1958   }
1959 
1960   bool heap_region_do(ShenandoahHeapRegion* r) {
1961     if (r->is_regular()) {
1962       _garbage += r->garbage();
1963     }
1964     return false;
1965   }
1966 
1967   size_t garbage() {
1968     return _garbage;
1969   }
1970 };
1971 
1972 size_t ShenandoahHeap::garbage() {
1973   ShenandoahCountGarbageClosure cl;
1974   heap_region_iterate(&cl);
1975   return cl.garbage();
1976 }
1977 
1978 ShenandoahConnectionMatrix* ShenandoahHeap::connection_matrix() const {
1979   return _connection_matrix;
1980 }
1981 
1982 ShenandoahPartialGC* ShenandoahHeap::partial_gc() {
1983   return _partial_gc;
1984 }
1985 
1986 ShenandoahVerifier* ShenandoahHeap::verifier() {
1987   guarantee(ShenandoahVerify, "Should be enabled");
1988   assert (_verifier != NULL, "sanity");
1989   return _verifier;
1990 }
1991 
1992 template<class T>
1993 class ShenandoahUpdateHeapRefsTask : public AbstractGangTask {
1994 private:
1995   T cl;
1996   ShenandoahHeap* _heap;
1997   ShenandoahHeapRegionSet* _regions;
1998   bool _concurrent;
1999 public:
2000   ShenandoahUpdateHeapRefsTask(ShenandoahHeapRegionSet* regions, bool concurrent) :
2001     AbstractGangTask("Concurrent Update References Task"),
2002     cl(T()),
2003     _heap(ShenandoahHeap::heap()),
2004     _regions(regions),
2005     _concurrent(concurrent) {
2006   }
2007 
2008   void work(uint worker_id) {
2009     SuspendibleThreadSetJoiner stsj(_concurrent && ShenandoahSuspendibleWorkers);
2010     ShenandoahHeapRegion* r = _regions->claim_next();
2011     while (r != NULL) {
2012       if (_heap->in_collection_set(r)) {
2013         HeapWord* bottom = r->bottom();
2014         HeapWord* top = _heap->complete_top_at_mark_start(r->bottom());
2015         if (top > bottom) {
2016           _heap->complete_mark_bit_map()->clear_range_large(MemRegion(bottom, top));
2017         }
2018       } else {
2019         if (r->is_active()) {
2020           _heap->marked_object_oop_safe_iterate(r, &cl);
2021         }
2022       }
2023       if (_heap->check_cancelled_concgc_and_yield(_concurrent)) {
2024         return;
2025       }
2026       r = _regions->claim_next();
2027     }
2028   }
2029 };
2030 
2031 void ShenandoahHeap::update_heap_references(ShenandoahHeapRegionSet* update_regions, bool concurrent) {
2032   if (UseShenandoahMatrix) {
2033     ShenandoahUpdateHeapRefsTask<ShenandoahUpdateHeapRefsMatrixClosure> task(update_regions, concurrent);
2034     workers()->run_task(&task);
2035   } else {
2036     ShenandoahUpdateHeapRefsTask<ShenandoahUpdateHeapRefsClosure> task(update_regions, concurrent);
2037     workers()->run_task(&task);
2038   }
2039 }
2040 
2041 void ShenandoahHeap::concurrent_update_heap_references() {
2042   ShenandoahGCPhase phase(ShenandoahPhaseTimings::conc_update_refs);
2043   ShenandoahHeapRegionSet* update_regions = regions();
2044   update_regions->clear_current_index();
2045   update_heap_references(update_regions, true);
2046 }
2047 
2048 void ShenandoahHeap::prepare_update_refs() {
2049   assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint");
2050 
2051   if (ShenandoahVerify) {
2052     verifier()->verify_before_updaterefs();
2053   }
2054 
2055   set_evacuation_in_progress_at_safepoint(false);
2056   set_update_refs_in_progress(true);
2057   make_tlabs_parsable(true);
2058   if (UseShenandoahMatrix) {
2059     connection_matrix()->clear_all();
2060   }
2061   for (uint i = 0; i < num_regions(); i++) {
2062     ShenandoahHeapRegion* r = _ordered_regions->get(i);
2063     r->set_concurrent_iteration_safe_limit(r->top());
2064   }
2065 }
2066 
2067 void ShenandoahHeap::finish_update_refs() {
2068   assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint");
2069 
2070   if (cancelled_concgc()) {
2071     ShenandoahGCPhase final_work(ShenandoahPhaseTimings::final_update_refs_finish_work);
2072 
2073     // Finish updating references where we left off.
2074     clear_cancelled_concgc();
2075     ShenandoahHeapRegionSet* update_regions = regions();
2076     update_heap_references(update_regions, false);
2077   }
2078 
2079   assert(! cancelled_concgc(), "Should have been done right before");
2080   concurrentMark()->update_roots(ShenandoahPhaseTimings::final_update_refs_roots);
2081 
2082   if (ShenandoahStringDedup::is_enabled()) {
2083     ShenandoahGCPhase final_str_dedup_table(ShenandoahPhaseTimings::final_update_refs_dedup_table);
2084     ShenandoahStringDedup::parallel_update_or_unlink();
2085   }
2086 
2087   // Allocations might have happened before we STWed here, record peak:
2088   shenandoahPolicy()->record_peak_occupancy();
2089 
2090   ShenandoahGCPhase final_update_refs(ShenandoahPhaseTimings::final_update_refs_recycle);
2091 
2092   trash_cset_regions();
2093   set_need_update_refs(false);
2094 
2095   if (ShenandoahVerify) {
2096     verifier()->verify_after_updaterefs();
2097   }
2098 
2099   {
2100     // Rebuild the free set
2101     ShenandoahHeapLocker locker(lock());
2102     _free_regions->clear();
2103     size_t end = _ordered_regions->active_regions();
2104     for (size_t i = 0; i < end; i++) {
2105       ShenandoahHeapRegion* r = _ordered_regions->get(i);
2106       if (r->is_alloc_allowed()) {
2107         assert (!in_collection_set(r), "collection set should be clear");
2108         _free_regions->add_region(r);
2109       }
2110     }
2111   }
2112   set_update_refs_in_progress(false);
2113 }
2114 
2115 void ShenandoahHeap::set_alloc_seq_gc_start() {
2116   // Take next number, the start seq number is inclusive
2117   _alloc_seq_at_last_gc_start = ShenandoahHeapRegion::alloc_seq_num() + 1;
2118 }
2119 
2120 void ShenandoahHeap::set_alloc_seq_gc_end() {
2121   // Take current number, the end seq number is also inclusive
2122   _alloc_seq_at_last_gc_end = ShenandoahHeapRegion::alloc_seq_num();
2123 }
2124 
2125 
2126 #ifdef ASSERT
2127 void ShenandoahHeap::assert_heaplock_owned_by_current_thread() {
2128   _lock.assert_owned_by_current_thread();
2129 }
2130 
2131 void ShenandoahHeap::assert_heaplock_not_owned_by_current_thread() {
2132   _lock.assert_not_owned_by_current_thread();
2133 }
2134 
2135 void ShenandoahHeap::assert_heaplock_or_safepoint() {
2136   _lock.assert_owned_by_current_thread_or_safepoint();
2137 }
2138 #endif
2139 
2140 void ShenandoahHeap::recycle_trash_assist(size_t limit) {
2141   assert_heaplock_owned_by_current_thread();
2142 
2143   size_t count = 0;
2144   for (size_t i = 0; (i < num_regions()) && (count < limit); i++) {
2145     ShenandoahHeapRegion *r = _ordered_regions->get(i);
2146     if (r->is_trash()) {
2147       decrease_used(r->used());
2148       r->recycle();
2149       _free_regions->add_region(r);
2150       count++;
2151     }
2152   }
2153 }
2154 
2155 void ShenandoahHeap::recycle_trash() {
2156   // lock is not reentrable, check we don't have it
2157   assert_heaplock_not_owned_by_current_thread();
2158 
2159   size_t bytes_reclaimed = 0;
2160 
2161   for (size_t i = 0; i < num_regions(); i++) {
2162     ShenandoahHeapRegion* r = _ordered_regions->get(i);
2163     if (r->is_trash()) {
2164       ShenandoahHeapLocker locker(lock());
2165       if (r->is_trash()) {
2166         bytes_reclaimed += r->used();
2167         decrease_used(r->used());
2168         r->recycle();
2169         _free_regions->add_region(r);
2170       }
2171     }
2172     SpinPause(); // allow allocators to take the lock
2173   }
2174 
2175   _shenandoah_policy->record_bytes_reclaimed(bytes_reclaimed);
2176 }
2177 
2178 void ShenandoahHeap::print_extended_on(outputStream *st) const {
2179   print_on(st);
2180   print_heap_regions_on(st);
2181 }
2182 
2183 address ShenandoahHeap::concurrent_mark_in_progress_addr() {
2184   return (address) &(ShenandoahHeap::heap()->_concurrent_mark_in_progress);
2185 }
2186 
2187 bool ShenandoahHeap::is_bitmap_slice_committed(ShenandoahHeapRegion* r, bool skip_self) {
2188   size_t slice = r->region_number() / _bitmap_regions_per_slice;
2189 
2190   size_t regions_from = _bitmap_regions_per_slice * slice;
2191   size_t regions_to   = MIN2(num_regions(), _bitmap_regions_per_slice * (slice + 1));
2192   for (size_t g = regions_from; g < regions_to; g++) {
2193     assert (g / _bitmap_regions_per_slice == slice, "same slice");
2194     if (skip_self && g == r->region_number()) continue;
2195     if (_ordered_regions->get(g)->is_committed()) {
2196       return true;
2197     }
2198   }
2199   return false;
2200 }
2201 
2202 bool ShenandoahHeap::commit_bitmap_slice(ShenandoahHeapRegion* r) {
2203   assert_heaplock_owned_by_current_thread();
2204 
2205   if (is_bitmap_slice_committed(r, true)) {
2206     // Some other region from the group is already committed, meaning the bitmap
2207     // slice is already committed, we exit right away.
2208     return true;
2209   }
2210 
2211   // Commit the bitmap slice:
2212   size_t slice = r->region_number() / _bitmap_regions_per_slice;
2213   size_t off = _bitmap_bytes_per_slice * slice;
2214   size_t len = _bitmap_bytes_per_slice;
2215   if (!os::commit_memory((char*)_bitmap0_region.start() + off, len, false)) {
2216     return false;
2217   }
2218   if (!os::commit_memory((char*)_bitmap1_region.start() + off, len, false)) {
2219     return false;
2220   }
2221   return true;
2222 }
2223 
2224 bool ShenandoahHeap::uncommit_bitmap_slice(ShenandoahHeapRegion *r) {
2225   assert_heaplock_owned_by_current_thread();
2226 
2227   if (is_bitmap_slice_committed(r, true)) {
2228     // Some other region from the group is still committed, meaning the bitmap
2229     // slice is should stay committed, exit right away.
2230     return true;
2231   }
2232 
2233   // Uncommit the bitmap slice:
2234   size_t slice = r->region_number() / _bitmap_regions_per_slice;
2235   size_t off = _bitmap_bytes_per_slice * slice;
2236   size_t len = _bitmap_bytes_per_slice;
2237   if (!os::uncommit_memory((char*)_bitmap0_region.start() + off, len)) {
2238     return false;
2239   }
2240   if (!os::uncommit_memory((char*)_bitmap1_region.start() + off, len)) {
2241     return false;
2242   }
2243   return true;
2244 }