< prev index next >

src/share/vm/gc_implementation/shenandoah/shenandoahHeap.cpp

Print this page
rev 11156 : [backport] 8220350: Refactor ShenandoahHeap::initialize
Reviewed-by: rkennke, zgu
rev 11157 : [backport] 8220153: Shenandoah does not work with TransparentHugePages properly
Reviewed-by: rkennke, zgu
rev 11158 : [backport] 8220162: Shenandoah should not commit HugeTLBFS memory
Reviewed-by: rkennke, zgu

*** 77,104 **** void ShenandoahAssertToSpaceClosure::do_oop(narrowOop* p) { do_oop_nv(p); } void ShenandoahAssertToSpaceClosure::do_oop(oop* p) { do_oop_nv(p); } #endif ! class ShenandoahPretouchTask : public AbstractGangTask { private: ShenandoahRegionIterator _regions; - const size_t _bitmap_size; const size_t _page_size; - char* _bitmap_base; public: ! ShenandoahPretouchTask(char* bitmap_base, size_t bitmap_size, size_t page_size) : ! AbstractGangTask("Shenandoah PreTouch"), ! _bitmap_size(bitmap_size), ! _page_size(page_size), ! _bitmap_base(bitmap_base) {} virtual void work(uint worker_id) { ShenandoahHeapRegion* r = _regions.next(); while (r != NULL) { os::pretouch_memory((char*) r->bottom(), (char*) r->end()); size_t start = r->region_number() * ShenandoahHeapRegion::region_size_bytes() / MarkBitMap::heap_map_factor(); size_t end = (r->region_number() + 1) * ShenandoahHeapRegion::region_size_bytes() / MarkBitMap::heap_map_factor(); assert (end <= _bitmap_size, err_msg("end is sane: " SIZE_FORMAT " < " SIZE_FORMAT, end, _bitmap_size)); os::pretouch_memory(_bitmap_base + start, _bitmap_base + end); --- 77,120 ---- void ShenandoahAssertToSpaceClosure::do_oop(narrowOop* p) { do_oop_nv(p); } void ShenandoahAssertToSpaceClosure::do_oop(oop* p) { do_oop_nv(p); } #endif ! class ShenandoahPretouchHeapTask : public AbstractGangTask { private: ShenandoahRegionIterator _regions; const size_t _page_size; public: ! ShenandoahPretouchHeapTask(size_t page_size) : ! AbstractGangTask("Shenandoah Pretouch Heap"), ! _page_size(page_size) {} virtual void work(uint worker_id) { ShenandoahHeapRegion* r = _regions.next(); while (r != NULL) { os::pretouch_memory((char*) r->bottom(), (char*) r->end()); + r = _regions.next(); + } + } + }; + + class ShenandoahPretouchBitmapTask : public AbstractGangTask { + private: + ShenandoahRegionIterator _regions; + char* _bitmap_base; + const size_t _bitmap_size; + const size_t _page_size; + public: + ShenandoahPretouchBitmapTask(char* bitmap_base, size_t bitmap_size, size_t page_size) : + AbstractGangTask("Shenandoah Pretouch Bitmap"), + _bitmap_base(bitmap_base), + _bitmap_size(bitmap_size), + _page_size(page_size) {} + virtual void work(uint worker_id) { + ShenandoahHeapRegion* r = _regions.next(); + while (r != NULL) { size_t start = r->region_number() * ShenandoahHeapRegion::region_size_bytes() / MarkBitMap::heap_map_factor(); size_t end = (r->region_number() + 1) * ShenandoahHeapRegion::region_size_bytes() / MarkBitMap::heap_map_factor(); assert (end <= _bitmap_size, err_msg("end is sane: " SIZE_FORMAT " < " SIZE_FORMAT, end, _bitmap_size)); os::pretouch_memory(_bitmap_base + start, _bitmap_base + end);
*** 113,193 **** ShenandoahBrooksPointer::initial_checks(); initialize_heuristics(); size_t init_byte_size = collector_policy()->initial_heap_byte_size(); size_t max_byte_size = collector_policy()->max_heap_byte_size(); size_t heap_alignment = collector_policy()->heap_alignment(); if (ShenandoahAlwaysPreTouch) { // Enabled pre-touch means the entire heap is committed right away. init_byte_size = max_byte_size; } ! Universe::check_alignment(max_byte_size, ! ShenandoahHeapRegion::region_size_bytes(), ! "shenandoah heap"); ! Universe::check_alignment(init_byte_size, ! ShenandoahHeapRegion::region_size_bytes(), ! "shenandoah heap"); ! ! ReservedSpace heap_rs = Universe::reserve_heap(max_byte_size, ! heap_alignment); ! ! _reserved.set_word_size(0); ! _reserved.set_start((HeapWord*)heap_rs.base()); ! _reserved.set_end((HeapWord*)(heap_rs.base() + heap_rs.size())); ! ! set_barrier_set(new ShenandoahBarrierSet(this)); ! ReservedSpace pgc_rs = heap_rs.first_part(max_byte_size); _num_regions = ShenandoahHeapRegion::region_count(); ! size_t num_committed_regions = init_byte_size / ShenandoahHeapRegion::region_size_bytes(); num_committed_regions = MIN2(num_committed_regions, _num_regions); assert(num_committed_regions <= _num_regions, "sanity"); ! _initial_size = num_committed_regions * ShenandoahHeapRegion::region_size_bytes(); _committed = _initial_size; ! log_info(gc, heap)("Initialize Shenandoah heap with initial size " SIZE_FORMAT "%s", ! byte_size_in_proper_unit(_initial_size), proper_unit_for_byte_size(_initial_size)); ! if (!os::commit_memory(pgc_rs.base(), _initial_size, false)) { ! vm_exit_out_of_memory(_initial_size, OOM_MMAP_ERROR, "Shenandoah failed to initialize heap"); ! } ! size_t reg_size_words = ShenandoahHeapRegion::region_size_words(); ! size_t reg_size_bytes = ShenandoahHeapRegion::region_size_bytes(); ! _regions = NEW_C_HEAP_ARRAY(ShenandoahHeapRegion*, _num_regions, mtGC); ! _free_set = new ShenandoahFreeSet(this, _num_regions); ! _collection_set = new ShenandoahCollectionSet(this, (HeapWord*)pgc_rs.base()); ! if (ShenandoahPacing) { ! _pacer = new ShenandoahPacer(this); ! _pacer->setup_for_idle(); ! } else { ! _pacer = NULL; } ! assert((((size_t) base()) & ShenandoahHeapRegion::region_size_bytes_mask()) == 0, ! err_msg("misaligned heap: " PTR_FORMAT, p2i(base()))); ! ! // The call below uses stuff (the SATB* things) that are in G1, but probably ! // belong into a shared location. ! JavaThread::satb_mark_queue_set().initialize(SATB_Q_CBL_mon, ! SATB_Q_FL_lock, ! 20 /*G1SATBProcessCompletedThreshold */, ! Shared_SATB_Q_lock); - // Reserve space for prev and next bitmap. - size_t bitmap_page_size = UseLargePages ? (size_t)os::large_page_size() : (size_t)os::vm_page_size(); _bitmap_size = MarkBitMap::compute_size(heap_rs.size()); _bitmap_size = align_size_up(_bitmap_size, bitmap_page_size); - _heap_region = MemRegion((HeapWord*) heap_rs.base(), heap_rs.size() / HeapWordSize); size_t bitmap_bytes_per_region = reg_size_bytes / MarkBitMap::heap_map_factor(); guarantee(bitmap_bytes_per_region != 0, err_msg("Bitmap bytes per region should not be zero")); --- 129,194 ---- ShenandoahBrooksPointer::initial_checks(); initialize_heuristics(); + // + // Figure out heap sizing + // + size_t init_byte_size = collector_policy()->initial_heap_byte_size(); size_t max_byte_size = collector_policy()->max_heap_byte_size(); size_t heap_alignment = collector_policy()->heap_alignment(); + size_t reg_size_bytes = ShenandoahHeapRegion::region_size_bytes(); + if (ShenandoahAlwaysPreTouch) { // Enabled pre-touch means the entire heap is committed right away. init_byte_size = max_byte_size; } ! Universe::check_alignment(max_byte_size, reg_size_bytes, "Shenandoah heap"); ! Universe::check_alignment(init_byte_size, reg_size_bytes, "Shenandoah heap"); _num_regions = ShenandoahHeapRegion::region_count(); ! size_t num_committed_regions = init_byte_size / reg_size_bytes; num_committed_regions = MIN2(num_committed_regions, _num_regions); assert(num_committed_regions <= _num_regions, "sanity"); ! _initial_size = num_committed_regions * reg_size_bytes; _committed = _initial_size; ! size_t heap_page_size = UseLargePages ? (size_t)os::large_page_size() : (size_t)os::vm_page_size(); ! size_t bitmap_page_size = UseLargePages ? (size_t)os::large_page_size() : (size_t)os::vm_page_size(); ! // ! // Reserve and commit memory for heap ! // ! ReservedSpace heap_rs = Universe::reserve_heap(max_byte_size, heap_alignment); ! _reserved.set_word_size(0); ! _reserved.set_start((HeapWord*)heap_rs.base()); ! _reserved.set_end((HeapWord*)(heap_rs.base() + heap_rs.size())); ! _heap_region = MemRegion((HeapWord*)heap_rs.base(), heap_rs.size() / HeapWordSize); ! _heap_region_special = heap_rs.special(); ! assert((((size_t) base()) & ShenandoahHeapRegion::region_size_bytes_mask()) == 0, ! err_msg("Misaligned heap: " PTR_FORMAT, p2i(base()))); ! ReservedSpace sh_rs = heap_rs.first_part(max_byte_size); ! if (!_heap_region_special) { ! os::commit_memory_or_exit(sh_rs.base(), _initial_size, heap_alignment, false, ! "Cannot commit heap memory"); } ! // ! // Reserve and commit memory for bitmap(s) ! // _bitmap_size = MarkBitMap::compute_size(heap_rs.size()); _bitmap_size = align_size_up(_bitmap_size, bitmap_page_size); size_t bitmap_bytes_per_region = reg_size_bytes / MarkBitMap::heap_map_factor(); guarantee(bitmap_bytes_per_region != 0, err_msg("Bitmap bytes per region should not be zero"));
*** 208,249 **** guarantee(((_bitmap_bytes_per_slice) % bitmap_page_size) == 0, err_msg("Bitmap slices should be page-granular: bps = " SIZE_FORMAT ", page size = " SIZE_FORMAT, _bitmap_bytes_per_slice, bitmap_page_size)); ! ReservedSpace bitmap0(_bitmap_size, bitmap_page_size); ! MemTracker::record_virtual_memory_type(bitmap0.base(), mtGC); ! _bitmap_region = MemRegion((HeapWord*) bitmap0.base(), bitmap0.size() / HeapWordSize); size_t bitmap_init_commit = _bitmap_bytes_per_slice * align_size_up(num_committed_regions, _bitmap_regions_per_slice) / _bitmap_regions_per_slice; bitmap_init_commit = MIN2(_bitmap_size, bitmap_init_commit); ! os::commit_memory_or_exit((char *) (_bitmap_region.start()), bitmap_init_commit, false, ! "couldn't allocate initial bitmap"); ! size_t page_size = UseLargePages ? (size_t)os::large_page_size() : (size_t)os::vm_page_size(); if (ShenandoahVerify) { ! ReservedSpace verify_bitmap(_bitmap_size, page_size); ! os::commit_memory_or_exit(verify_bitmap.base(), verify_bitmap.size(), false, ! "couldn't allocate verification bitmap"); MemTracker::record_virtual_memory_type(verify_bitmap.base(), mtGC); MemRegion verify_bitmap_region = MemRegion((HeapWord *) verify_bitmap.base(), verify_bitmap.size() / HeapWordSize); _verification_bit_map.initialize(_heap_region, verify_bitmap_region); _verifier = new ShenandoahVerifier(this, &_verification_bit_map); } ! _marking_context = new ShenandoahMarkingContext(_heap_region, _bitmap_region, _num_regions); { ShenandoahHeapLocker locker(lock()); for (size_t i = 0; i < _num_regions; i++) { ! ShenandoahHeapRegion* r = new ShenandoahHeapRegion(this, ! (HeapWord*) pgc_rs.base() + reg_size_words * i, ! reg_size_words, ! i, ! i < num_committed_regions); _marking_context->initialize_top_at_mark_start(r); _regions[i] = r; assert(!collection_set()->is_in(i), "New region should not be in collection set"); } --- 209,269 ---- guarantee(((_bitmap_bytes_per_slice) % bitmap_page_size) == 0, err_msg("Bitmap slices should be page-granular: bps = " SIZE_FORMAT ", page size = " SIZE_FORMAT, _bitmap_bytes_per_slice, bitmap_page_size)); ! ReservedSpace bitmap(_bitmap_size, bitmap_page_size); ! MemTracker::record_virtual_memory_type(bitmap.base(), mtGC); ! _bitmap_region = MemRegion((HeapWord*) bitmap.base(), bitmap.size() / HeapWordSize); ! _bitmap_region_special = bitmap.special(); size_t bitmap_init_commit = _bitmap_bytes_per_slice * align_size_up(num_committed_regions, _bitmap_regions_per_slice) / _bitmap_regions_per_slice; bitmap_init_commit = MIN2(_bitmap_size, bitmap_init_commit); ! if (!_bitmap_region_special) { ! os::commit_memory_or_exit((char *) _bitmap_region.start(), bitmap_init_commit, bitmap_page_size, false, ! "Cannot commit bitmap memory"); ! } ! _marking_context = new ShenandoahMarkingContext(_heap_region, _bitmap_region, _num_regions); if (ShenandoahVerify) { ! ReservedSpace verify_bitmap(_bitmap_size, bitmap_page_size); ! if (!verify_bitmap.special()) { ! os::commit_memory_or_exit(verify_bitmap.base(), verify_bitmap.size(), bitmap_page_size, false, ! "Cannot commit verification bitmap memory"); ! } MemTracker::record_virtual_memory_type(verify_bitmap.base(), mtGC); MemRegion verify_bitmap_region = MemRegion((HeapWord *) verify_bitmap.base(), verify_bitmap.size() / HeapWordSize); _verification_bit_map.initialize(_heap_region, verify_bitmap_region); _verifier = new ShenandoahVerifier(this, &_verification_bit_map); } ! // Reserve aux bitmap for use in object_iterate(). We don't commit it here. ! ReservedSpace aux_bitmap(_bitmap_size, bitmap_page_size); ! MemTracker::record_virtual_memory_type(aux_bitmap.base(), mtGC); ! _aux_bitmap_region = MemRegion((HeapWord*) aux_bitmap.base(), aux_bitmap.size() / HeapWordSize); ! _aux_bitmap_region_special = aux_bitmap.special(); ! _aux_bit_map.initialize(_heap_region, _aux_bitmap_region); ! ! // ! // Create regions and region sets ! // ! ! _regions = NEW_C_HEAP_ARRAY(ShenandoahHeapRegion*, _num_regions, mtGC); ! _free_set = new ShenandoahFreeSet(this, _num_regions); ! _collection_set = new ShenandoahCollectionSet(this, (HeapWord*)sh_rs.base()); { ShenandoahHeapLocker locker(lock()); + + size_t size_words = ShenandoahHeapRegion::region_size_words(); + for (size_t i = 0; i < _num_regions; i++) { ! HeapWord* start = (HeapWord*)sh_rs.base() + size_words * i; ! bool is_committed = i < num_committed_regions; ! ShenandoahHeapRegion* r = new ShenandoahHeapRegion(this, start, size_words, i, is_committed); _marking_context->initialize_top_at_mark_start(r); _regions[i] = r; assert(!collection_set()->is_in(i), "New region should not be in collection set"); }
*** 253,300 **** _free_set->rebuild(); } if (ShenandoahAlwaysPreTouch) { ! assert (!AlwaysPreTouch, "Should have been overridden"); // For NUMA, it is important to pre-touch the storage under bitmaps with worker threads, // before initialize() below zeroes it with initializing thread. For any given region, // we touch the region and the corresponding bitmaps from the same thread. ShenandoahPushWorkerScope scope(workers(), _max_workers, false); ! log_info(gc, heap)("Parallel pretouch " SIZE_FORMAT " regions with " SIZE_FORMAT " byte pages", ! _num_regions, page_size); ! ShenandoahPretouchTask cl(bitmap0.base(), _bitmap_size, page_size); ! _workers->run_task(&cl); } ! // Reserve aux bitmap for use in object_iterate(). We don't commit it here. ! ReservedSpace aux_bitmap(_bitmap_size, bitmap_page_size); ! MemTracker::record_virtual_memory_type(aux_bitmap.base(), mtGC); ! _aux_bitmap_region = MemRegion((HeapWord*) aux_bitmap.base(), aux_bitmap.size() / HeapWordSize); ! _aux_bit_map.initialize(_heap_region, _aux_bitmap_region); ! _monitoring_support = new ShenandoahMonitoringSupport(this); _phase_timings = new ShenandoahPhaseTimings(); if (ShenandoahAllocationTrace) { _alloc_tracker = new ShenandoahAllocTracker(); } ! ShenandoahStringDedup::initialize(); _control_thread = new ShenandoahControlThread(); ! ShenandoahCodeRoots::initialize(); ! ! _liveness_cache = NEW_C_HEAP_ARRAY(jushort*, _max_workers, mtGC); ! for (uint worker = 0; worker < _max_workers; worker++) { ! _liveness_cache[worker] = NEW_C_HEAP_ARRAY(jushort, _num_regions, mtGC); ! Copy::fill_to_bytes(_liveness_cache[worker], _num_regions * sizeof(jushort)); ! } return JNI_OK; } #ifdef _MSC_VER --- 273,355 ---- _free_set->rebuild(); } if (ShenandoahAlwaysPreTouch) { ! assert(!AlwaysPreTouch, "Should have been overridden"); // For NUMA, it is important to pre-touch the storage under bitmaps with worker threads, // before initialize() below zeroes it with initializing thread. For any given region, // we touch the region and the corresponding bitmaps from the same thread. ShenandoahPushWorkerScope scope(workers(), _max_workers, false); ! size_t pretouch_heap_page_size = heap_page_size; ! size_t pretouch_bitmap_page_size = bitmap_page_size; ! ! #ifdef LINUX ! // UseTransparentHugePages would madvise that backing memory can be coalesced into huge ! // pages. But, the kernel needs to know that every small page is used, in order to coalesce ! // them into huge one. Therefore, we need to pretouch with smaller pages. ! if (UseTransparentHugePages) { ! pretouch_heap_page_size = (size_t)os::vm_page_size(); ! pretouch_bitmap_page_size = (size_t)os::vm_page_size(); } + #endif ! // OS memory managers may want to coalesce back-to-back pages. Make their jobs ! // simpler by pre-touching continuous spaces (heap and bitmap) separately. ! log_info(gc, init)("Pretouch bitmap: " SIZE_FORMAT " regions, " SIZE_FORMAT " bytes page", ! _num_regions, pretouch_bitmap_page_size); ! ShenandoahPretouchBitmapTask bcl(bitmap.base(), _bitmap_size, pretouch_bitmap_page_size); ! _workers->run_task(&bcl); ! ! log_info(gc, init)("Pretouch heap: " SIZE_FORMAT " regions, " SIZE_FORMAT " bytes page", ! _num_regions, pretouch_heap_page_size); ! ShenandoahPretouchHeapTask hcl(pretouch_heap_page_size); ! _workers->run_task(&hcl); ! } ! ! // ! // Initialize the rest of GC subsystems ! // ! ! set_barrier_set(new ShenandoahBarrierSet(this)); ! ! _liveness_cache = NEW_C_HEAP_ARRAY(jushort*, _max_workers, mtGC); ! for (uint worker = 0; worker < _max_workers; worker++) { ! _liveness_cache[worker] = NEW_C_HEAP_ARRAY(jushort, _num_regions, mtGC); ! Copy::fill_to_bytes(_liveness_cache[worker], _num_regions * sizeof(jushort)); ! } + // The call below uses stuff (the SATB* things) that are in G1, but probably + // belong into a shared location. + JavaThread::satb_mark_queue_set().initialize(SATB_Q_CBL_mon, + SATB_Q_FL_lock, + 20 /*G1SATBProcessCompletedThreshold */, + Shared_SATB_Q_lock); + + _monitoring_support = new ShenandoahMonitoringSupport(this); _phase_timings = new ShenandoahPhaseTimings(); + ShenandoahStringDedup::initialize(); + ShenandoahCodeRoots::initialize(); if (ShenandoahAllocationTrace) { _alloc_tracker = new ShenandoahAllocTracker(); } ! if (ShenandoahPacing) { ! _pacer = new ShenandoahPacer(this); ! _pacer->setup_for_idle(); ! } else { ! _pacer = NULL; ! } _control_thread = new ShenandoahControlThread(); ! log_info(gc, init)("Initialize Shenandoah heap with initial size " SIZE_FORMAT "%s", ! byte_size_in_proper_unit(_initial_size), proper_unit_for_byte_size(_initial_size)); return JNI_OK; } #ifdef _MSC_VER
*** 336,353 **** --- 391,415 ---- } ShenandoahHeap::ShenandoahHeap(ShenandoahCollectorPolicy* policy) : SharedHeap(policy), _shenandoah_policy(policy), + _heap_region_special(false), _regions(NULL), _free_set(NULL), _collection_set(NULL), _update_refs_iterator(this), _bytes_allocated_since_gc_start(0), _max_workers((uint)MAX2(ConcGCThreads, ParallelGCThreads)), _ref_processor(NULL), _marking_context(NULL), + _bitmap_size(0), + _bitmap_regions_per_slice(0), + _bitmap_bytes_per_slice(0), + _bitmap_region_special(false), + _aux_bitmap_region_special(false), + _liveness_cache(NULL), _aux_bit_map(), _verifier(NULL), _pacer(NULL), _gc_timer(new (ResourceObj::C_HEAP, mtGC) ConcurrentGCTimer()), _phase_timings(NULL),
*** 1143,1153 **** * objects as we mark+traverse through the heap, starting from GC roots. JVMTI IterateThroughHeap * is allowed to report dead objects, but is not required to do so. */ void ShenandoahHeap::object_iterate(ObjectClosure* cl) { assert(SafepointSynchronize::is_at_safepoint(), "safe iteration is only available during safepoints"); ! if (!os::commit_memory((char*)_aux_bitmap_region.start(), _aux_bitmap_region.byte_size(), false)) { log_warning(gc)("Could not commit native memory for auxiliary marking bitmap for heap iteration"); return; } // Reset bitmap --- 1205,1215 ---- * objects as we mark+traverse through the heap, starting from GC roots. JVMTI IterateThroughHeap * is allowed to report dead objects, but is not required to do so. */ void ShenandoahHeap::object_iterate(ObjectClosure* cl) { assert(SafepointSynchronize::is_at_safepoint(), "safe iteration is only available during safepoints"); ! if (!_aux_bitmap_region_special && !os::commit_memory((char*)_aux_bitmap_region.start(), _aux_bitmap_region.byte_size(), false)) { log_warning(gc)("Could not commit native memory for auxiliary marking bitmap for heap iteration"); return; } // Reset bitmap
*** 1170,1180 **** obj->oop_iterate(&oops); } assert(oop_stack.is_empty(), "should be empty"); ! if (!os::uncommit_memory((char*)_aux_bitmap_region.start(), _aux_bitmap_region.byte_size())) { log_warning(gc)("Could not uncommit native memory for auxiliary marking bitmap for heap iteration"); } } void ShenandoahHeap::safe_object_iterate(ObjectClosure* cl) { --- 1232,1242 ---- obj->oop_iterate(&oops); } assert(oop_stack.is_empty(), "should be empty"); ! if (!_aux_bitmap_region_special && !os::uncommit_memory((char*)_aux_bitmap_region.start(), _aux_bitmap_region.byte_size())) { log_warning(gc)("Could not uncommit native memory for auxiliary marking bitmap for heap iteration"); } } void ShenandoahHeap::safe_object_iterate(ObjectClosure* cl) {
*** 2096,2105 **** --- 2158,2172 ---- } bool ShenandoahHeap::commit_bitmap_slice(ShenandoahHeapRegion* r) { assert_heaplock_owned_by_current_thread(); + // Bitmaps in special regions do not need commits + if (_bitmap_region_special) { + return true; + } + if (is_bitmap_slice_committed(r, true)) { // Some other region from the group is already committed, meaning the bitmap // slice is already committed, we exit right away. return true; }
*** 2115,2124 **** --- 2182,2196 ---- } bool ShenandoahHeap::uncommit_bitmap_slice(ShenandoahHeapRegion *r) { assert_heaplock_owned_by_current_thread(); + // Bitmaps in special regions do not need uncommits + if (_bitmap_region_special) { + return true; + } + if (is_bitmap_slice_committed(r, true)) { // Some other region from the group is still committed, meaning the bitmap // slice is should stay committed, exit right away. return true; }
*** 2537,2556 **** --- 2609,2630 ---- } } jushort* ShenandoahHeap::get_liveness_cache(uint worker_id) { #ifdef ASSERT + assert(_liveness_cache != NULL, "sanity"); assert(worker_id < _max_workers, "sanity"); for (uint i = 0; i < num_regions(); i++) { assert(_liveness_cache[worker_id][i] == 0, "liveness cache should be empty"); } #endif return _liveness_cache[worker_id]; } void ShenandoahHeap::flush_liveness_cache(uint worker_id) { assert(worker_id < _max_workers, "sanity"); + assert(_liveness_cache != NULL, "sanity"); jushort* ld = _liveness_cache[worker_id]; for (uint i = 0; i < num_regions(); i++) { ShenandoahHeapRegion* r = get_region(i); jushort live = ld[i]; if (live > 0) {
< prev index next >