< prev index next >

src/share/vm/gc_implementation/shenandoah/shenandoahHeapRegion.cpp

Print this page
rev 10494 : Fix build failure: signedness mismatch in assert
Contributed-by: Michal Vala <mvala@redhat.com>
rev 10496 : [backport] Rename "cancel_concgc" to "cancel_gc"
rev 10538 : [backport] VSC++ requires space(s) in between two string literals
rev 10540 : [backport] CollectedHeap::max_tlab_size is measured in words
rev 10565 : [backport] Elastic TLABs support for Shenandoah
rev 10567 : [backport] Heap region count selection should only consider max heap size
rev 10580 : [backport] Refactor to group marking bitmap and TAMS structure in one class ShenandoahMarkingContext
rev 10587 : [backport] Refactor gc+init logging
rev 10597 : [backport] Report heap region stats in proper units
rev 10598 : [backport] Shenandoah changes to allow enabling -Wreorder
rev 10608 : [backport] Avoid using uintx in ShenandoahHeapRegion
rev 10614 : [backport] Replace custom asserts with shenandoah_assert_*
rev 10623 : JDK8u: Silence compilation warnings on implicit type conversion

@@ -25,42 +25,45 @@
 #include "memory/allocation.hpp"
 #include "gc_implementation/shenandoah/brooksPointer.hpp"
 #include "gc_implementation/shenandoah/shenandoahHeap.hpp"
 #include "gc_implementation/shenandoah/shenandoahHeap.inline.hpp"
 #include "gc_implementation/shenandoah/shenandoahHeapRegion.hpp"
+#include "gc_implementation/shenandoah/shenandoahMarkingContext.inline.hpp"
 #include "memory/space.inline.hpp"
 #include "memory/universe.hpp"
 #include "oops/oop.inline.hpp"
 #include "runtime/java.hpp"
 #include "runtime/mutexLocker.hpp"
 #include "runtime/os.hpp"
 #include "runtime/safepoint.hpp"
 
+size_t ShenandoahHeapRegion::RegionCount = 0;
 size_t ShenandoahHeapRegion::RegionSizeBytes = 0;
 size_t ShenandoahHeapRegion::RegionSizeWords = 0;
 size_t ShenandoahHeapRegion::RegionSizeBytesShift = 0;
 size_t ShenandoahHeapRegion::RegionSizeWordsShift = 0;
 size_t ShenandoahHeapRegion::RegionSizeBytesMask = 0;
 size_t ShenandoahHeapRegion::RegionSizeWordsMask = 0;
 size_t ShenandoahHeapRegion::HumongousThresholdBytes = 0;
 size_t ShenandoahHeapRegion::HumongousThresholdWords = 0;
 size_t ShenandoahHeapRegion::MaxTLABSizeBytes = 0;
+size_t ShenandoahHeapRegion::MaxTLABSizeWords = 0;
 
 ShenandoahHeapRegion::ShenandoahHeapRegion(ShenandoahHeap* heap, HeapWord* start,
                                            size_t size_words, size_t index, bool committed) :
   _heap(heap),
-  _pacer(ShenandoahPacing ? heap->pacer() : NULL),
   _region_number(index),
   _live_data(0),
+  _reserved(MemRegion(start, size_words)),
   _tlab_allocs(0),
   _gclab_allocs(0),
   _shared_allocs(0),
-  _reserved(MemRegion(start, size_words)),
   _new_top(NULL),
+  _critical_pins(0),
   _state(committed ? _empty_committed : _empty_uncommitted),
   _empty_time(os::elapsedTime()),
-  _critical_pins(0) {
+  _pacer(ShenandoahPacing ? heap->pacer() : NULL) {
 
   ContiguousSpace::initialize(_reserved, true, committed);
 }
 
 size_t ShenandoahHeapRegion::region_number() const {

@@ -187,11 +190,11 @@
       _state = _pinned_humongous_start;
     case _pinned_humongous_start:
       _critical_pins++;
       return;
     case _cset:
-      guarantee(_heap->cancelled_concgc(), "only valid when evac has been cancelled");
+      guarantee(_heap->cancelled_gc(), "only valid when evac has been cancelled");
       assert (_critical_pins == 0, "sanity");
       _state = _pinned_cset;
       _critical_pins++;
       return;
     default:

@@ -212,11 +215,11 @@
     case _regular:
     case _humongous_start:
       assert (_critical_pins == 0, "sanity");
       return;
     case _pinned_cset:
-      guarantee(_heap->cancelled_concgc(), "only valid when evac has been cancelled");
+      guarantee(_heap->cancelled_gc(), "only valid when evac has been cancelled");
       assert (_critical_pins > 0, "sanity");
       _critical_pins--;
       if (_critical_pins == 0) {
         _state = _cset;
       }

@@ -338,11 +341,11 @@
 }
 
 void ShenandoahHeapRegion::set_live_data(size_t s) {
   assert(Thread::current()->is_VM_thread(), "by VM thread");
   size_t v = s >> LogHeapWordSize;
-  assert(v < max_jint, "sanity");
+  assert(v < (size_t)max_jint, "sanity");
   _live_data = (jint)v;
 }
 
 size_t ShenandoahHeapRegion::get_live_data_words() const {
   jint v = OrderAccess::load_acquire((volatile jint*)&_live_data);

@@ -408,46 +411,22 @@
       ShouldNotReachHere();
   }
   st->print("|BTE " INTPTR_FORMAT_W(12) ", " INTPTR_FORMAT_W(12) ", " INTPTR_FORMAT_W(12),
             p2i(bottom()), p2i(top()), p2i(end()));
   st->print("|TAMS " INTPTR_FORMAT_W(12) ", " INTPTR_FORMAT_W(12),
-            p2i(_heap->complete_top_at_mark_start(_bottom)),
-            p2i(_heap->next_top_at_mark_start(_bottom)));
-  st->print("|U %3d%%", (int) ((double) used() * 100 / capacity()));
-  st->print("|T %3d%%", (int) ((double) get_tlab_allocs() * 100 / capacity()));
-  st->print("|G %3d%%", (int) ((double) get_gclab_allocs() * 100 / capacity()));
-  st->print("|S %3d%%", (int) ((double) get_shared_allocs() * 100 / capacity()));
-  st->print("|L %3d%%", (int) ((double) get_live_data_bytes() * 100 / capacity()));
+            p2i(_heap->complete_marking_context()->top_at_mark_start(region_number())),
+            p2i(_heap->next_marking_context()->top_at_mark_start(region_number())));
+  st->print("|U " SIZE_FORMAT_W(5) "%1s", byte_size_in_proper_unit(used()),                proper_unit_for_byte_size(used()));
+  st->print("|T " SIZE_FORMAT_W(5) "%1s", byte_size_in_proper_unit(get_tlab_allocs()),     proper_unit_for_byte_size(get_tlab_allocs()));
+  st->print("|G " SIZE_FORMAT_W(5) "%1s", byte_size_in_proper_unit(get_gclab_allocs()),    proper_unit_for_byte_size(get_gclab_allocs()));
+  st->print("|S " SIZE_FORMAT_W(5) "%1s", byte_size_in_proper_unit(get_shared_allocs()),   proper_unit_for_byte_size(get_shared_allocs()));
+  st->print("|L " SIZE_FORMAT_W(5) "%1s", byte_size_in_proper_unit(get_live_data_bytes()), proper_unit_for_byte_size(get_live_data_bytes()));
   st->print("|CP " SIZE_FORMAT_W(3), _critical_pins);
 
   st->cr();
 }
 
-
-class ShenandoahSkipUnreachableObjectToOopClosure: public ObjectClosure {
-  ExtendedOopClosure* _cl;
-  bool _skip_unreachable_objects;
-  ShenandoahHeap* _heap;
-
-public:
-  ShenandoahSkipUnreachableObjectToOopClosure(ExtendedOopClosure* cl, bool skip_unreachable_objects) :
-    _cl(cl), _skip_unreachable_objects(skip_unreachable_objects), _heap(ShenandoahHeap::heap()) {}
-
-  void do_object(oop obj) {
-
-    if ((! _skip_unreachable_objects) || _heap->is_marked_complete(obj)) {
-#ifdef ASSERT
-      if (_skip_unreachable_objects) {
-        assert(_heap->is_marked_complete(obj), "obj must be live");
-      }
-#endif
-      obj->oop_iterate(_cl);
-    }
-
-  }
-};
-
 void ShenandoahHeapRegion::fill_region() {
   if (free() > (BrooksPointer::word_size() + CollectedHeap::min_fill_size())) {
     HeapWord* filler = allocate(BrooksPointer::word_size(), ShenandoahHeap::_alloc_shared);
     HeapWord* obj = allocate(end() - top(), ShenandoahHeap::_alloc_shared);
     _heap->fill_with_object(obj, end() - obj);

@@ -474,15 +453,18 @@
   if (ZapUnusedHeapArea) {
     ContiguousSpace::mangle_unused_area_complete();
   }
   clear_live_data();
   reset_alloc_metadata();
+
+  ShenandoahMarkingContext* const compl_ctx = _heap->complete_marking_context();
+
   // Reset C-TAMS pointer to ensure size-based iteration, everything
   // in that regions is going to be new objects.
-  _heap->set_complete_top_at_mark_start(bottom(), bottom());
+  compl_ctx->set_top_at_mark_start(region_number(), bottom());
   // We can only safely reset the C-TAMS pointer if the bitmap is clear for that region.
-  assert(_heap->is_complete_bitmap_clear_range(bottom(), end()), "must be clear");
+  assert(compl_ctx->is_bitmap_clear_range(bottom(), end()), "must be clear");
 
   make_empty();
 }
 
 HeapWord* ShenandoahHeapRegion::block_start_const(const void* p) const {

@@ -496,22 +478,21 @@
     HeapWord* cur = last;
     while (cur <= p) {
       last = cur;
       cur += oop(cur)->size() + BrooksPointer::word_size();
     }
-    assert(oop(last)->is_oop(),
-           err_msg(PTR_FORMAT" should be an object start", p2i(last)));
+    shenandoah_assert_correct(NULL, oop(last));
     return last;
   }
 }
 
-void ShenandoahHeapRegion::setup_heap_region_size(size_t initial_heap_size, size_t max_heap_size) {
+void ShenandoahHeapRegion::setup_sizes(size_t initial_heap_size, size_t max_heap_size) {
   // Absolute minimums we should not ever break:
   static const size_t MIN_REGION_SIZE = 256*K;
   static const size_t MIN_NUM_REGIONS = 10;
 
-  uintx region_size;
+  size_t region_size;
   if (FLAG_IS_DEFAULT(ShenandoahHeapRegionSize)) {
     if (ShenandoahMinRegionSize > initial_heap_size / MIN_NUM_REGIONS) {
       err_msg message("Initial heap size (" SIZE_FORMAT "K) is too low to afford the minimum number "
                       "of regions (" SIZE_FORMAT ") of minimum region size (" SIZE_FORMAT "K).",
                       initial_heap_size/K, MIN_NUM_REGIONS, ShenandoahMinRegionSize/K);

@@ -535,13 +516,14 @@
     if (ShenandoahMinRegionSize > ShenandoahMaxRegionSize) {
       err_msg message("Minimum (" SIZE_FORMAT "K) should be larger than maximum (" SIZE_FORMAT "K).",
                       ShenandoahMinRegionSize/K, ShenandoahMaxRegionSize/K);
       vm_exit_during_initialization("Invalid -XX:ShenandoahMinRegionSize or -XX:ShenandoahMaxRegionSize", message);
     }
-    size_t average_heap_size = (initial_heap_size + max_heap_size) / 2;
-    region_size = MAX2(average_heap_size / ShenandoahTargetNumRegions,
-                       ShenandoahMinRegionSize);
+
+    // We rapidly expand to max_heap_size in most scenarios, so that is the measure
+    // for usual heap sizes. Do not depend on initial_heap_size here.
+    region_size = max_heap_size / ShenandoahTargetNumRegions;
 
     // Now make sure that we don't go over or under our limits.
     region_size = MAX2(ShenandoahMinRegionSize, region_size);
     region_size = MIN2(ShenandoahMaxRegionSize, region_size);
 

@@ -578,30 +560,33 @@
 
   int region_size_log = log2_long((jlong) region_size);
   // Recalculate the region size to make sure it's a power of
   // 2. This means that region_size is the largest power of 2 that's
   // <= what we've calculated so far.
-  region_size = ((uintx)1 << region_size_log);
+  region_size = (size_t(1) << region_size_log);
 
   // Now, set up the globals.
   guarantee(RegionSizeBytesShift == 0, "we should only set it once");
   RegionSizeBytesShift = (size_t)region_size_log;
 
   guarantee(RegionSizeWordsShift == 0, "we should only set it once");
   RegionSizeWordsShift = RegionSizeBytesShift - LogHeapWordSize;
 
   guarantee(RegionSizeBytes == 0, "we should only set it once");
-  RegionSizeBytes = (size_t)region_size;
+  RegionSizeBytes = region_size;
   RegionSizeWords = RegionSizeBytes >> LogHeapWordSize;
   assert (RegionSizeWords*HeapWordSize == RegionSizeBytes, "sanity");
 
   guarantee(RegionSizeWordsMask == 0, "we should only set it once");
   RegionSizeWordsMask = RegionSizeWords - 1;
 
   guarantee(RegionSizeBytesMask == 0, "we should only set it once");
   RegionSizeBytesMask = RegionSizeBytes - 1;
 
+  guarantee(RegionCount == 0, "we should only set it once");
+  RegionCount = max_heap_size / RegionSizeBytes;
+
   guarantee(HumongousThresholdWords == 0, "we should only set it once");
   HumongousThresholdWords = RegionSizeWords * ShenandoahHumongousThreshold / 100;
   assert (HumongousThresholdWords <= RegionSizeWords, "sanity");
 
   guarantee(HumongousThresholdBytes == 0, "we should only set it once");

@@ -621,20 +606,27 @@
   // With current divisor, we will waste no more than 1/8 of region size in the worst
   // case. This also has a secondary effect on collection set selection: even under
   // the race, the regions would be at least 7/8 used, which allows relying on
   // "used" - "live" for cset selection. Otherwise, we can get the fragmented region
   // below the garbage threshold that would never be considered for collection.
+  //
+  // The whole thing would be mitigated if Elastic TLABs were enabled, but there
+  // is no support in this JDK.
+  //
   guarantee(MaxTLABSizeBytes == 0, "we should only set it once");
   MaxTLABSizeBytes = MIN2(RegionSizeBytes / 8, HumongousThresholdBytes);
   assert (MaxTLABSizeBytes > MinTLABSize, "should be larger");
 
-  log_info(gc, heap)("Heap region size: " SIZE_FORMAT "M", RegionSizeBytes / M);
-  log_info(gc, init)("Region size in bytes: "SIZE_FORMAT, RegionSizeBytes);
-  log_info(gc, init)("Region size byte shift: "SIZE_FORMAT, RegionSizeBytesShift);
-  log_info(gc, init)("Humongous threshold in bytes: "SIZE_FORMAT, HumongousThresholdBytes);
-  log_info(gc, init)("Max TLAB size in bytes: "SIZE_FORMAT, MaxTLABSizeBytes);
-  log_info(gc, init)("Number of regions: "SIZE_FORMAT, max_heap_size / RegionSizeBytes);
+  guarantee(MaxTLABSizeWords == 0, "we should only set it once");
+  MaxTLABSizeWords = MaxTLABSizeBytes / HeapWordSize;
+
+  log_info(gc, init)("Regions: " SIZE_FORMAT " x " SIZE_FORMAT "%s",
+                     RegionCount, byte_size_in_proper_unit(RegionSizeBytes), proper_unit_for_byte_size(RegionSizeBytes));
+  log_info(gc, init)("Humongous object threshold: " SIZE_FORMAT "%s",
+                     byte_size_in_proper_unit(HumongousThresholdBytes), proper_unit_for_byte_size(HumongousThresholdBytes));
+  log_info(gc, init)("Max TLAB size: " SIZE_FORMAT "%s",
+                     byte_size_in_proper_unit(MaxTLABSizeBytes), proper_unit_for_byte_size(MaxTLABSizeBytes));
 }
 
 void ShenandoahHeapRegion::do_commit() {
   if (!os::commit_memory((char *) _reserved.start(), _reserved.byte_size(), false)) {
     report_java_out_of_memory("Unable to commit region");
< prev index next >