< prev index next >

src/hotspot/share/gc/g1/g1ConcurrentMark.cpp

Print this page
rev 52719 : [mq]: 8159440-marking-of-promoted-objects-to-concurrent

@@ -254,46 +254,59 @@
   _chunk_list = NULL;
   _free_list = NULL;
 }
 
 G1CMRootRegions::G1CMRootRegions() :
-  _survivors(NULL), _cm(NULL), _scan_in_progress(false),
-  _should_abort(false), _claimed_survivor_index(0) { }
-
-void G1CMRootRegions::init(const G1SurvivorRegions* survivors, G1ConcurrentMark* cm) {
-  _survivors = survivors;
-  _cm = cm;
+  _root_regions(NULL),
+  _max_regions(0),
+  _cur_regions(0),
+  _scan_in_progress(false),
+  _should_abort(false),
+  _claimed_root_regions(0) { }
+
+void G1CMRootRegions::reset(const uint max_regions) {
+  _root_regions = NEW_C_HEAP_ARRAY(HeapRegion*, max_regions, mtGC);
+  _max_regions = max_regions;
+  _cur_regions = 0;
+}
+
+void G1CMRootRegions::add(HeapRegion* hr) {
+  assert_at_safepoint();
+  size_t idx = Atomic::add((size_t)1, &_cur_regions) - 1;
+  assert(idx < _max_regions, "Trying to add more root regions than there is space %u", _max_regions);
+  _root_regions[idx] = hr;
 }
 
 void G1CMRootRegions::prepare_for_scan() {
   assert(!scan_in_progress(), "pre-condition");
 
-  // Currently, only survivors can be root regions.
-  _claimed_survivor_index = 0;
-  _scan_in_progress = _survivors->regions()->is_nonempty();
+  _scan_in_progress = _cur_regions > 0;
+
+  _claimed_root_regions = 0;
   _should_abort = false;
 }
 
 HeapRegion* G1CMRootRegions::claim_next() {
   if (_should_abort) {
     // If someone has set the should_abort flag, we return NULL to
     // force the caller to bail out of their loop.
     return NULL;
   }
 
-  // Currently, only survivors can be root regions.
-  const GrowableArray<HeapRegion*>* survivor_regions = _survivors->regions();
+  if (_claimed_root_regions >= _cur_regions) {
+    return NULL;
+  }
 
-  int claimed_index = Atomic::add(1, &_claimed_survivor_index) - 1;
-  if (claimed_index < survivor_regions->length()) {
-    return survivor_regions->at(claimed_index);
+  size_t claimed_index = Atomic::add((size_t)1, &_claimed_root_regions) - 1;
+  if (claimed_index < _cur_regions) {
+    return _root_regions[claimed_index];
   }
   return NULL;
 }
 
 uint G1CMRootRegions::num_root_regions() const {
-  return (uint)_survivors->regions()->length();
+  return (uint)_cur_regions;
 }
 
 void G1CMRootRegions::notify_scan_done() {
   MutexLockerEx x(RootRegionScan_lock, Mutex::_no_safepoint_check_flag);
   _scan_in_progress = false;

@@ -305,18 +318,21 @@
 }
 
 void G1CMRootRegions::scan_finished() {
   assert(scan_in_progress(), "pre-condition");
 
-  // Currently, only survivors can be root regions.
   if (!_should_abort) {
-    assert(_claimed_survivor_index >= 0, "otherwise comparison is invalid: %d", _claimed_survivor_index);
-    assert((uint)_claimed_survivor_index >= _survivors->length(),
-           "we should have claimed all survivors, claimed index = %u, length = %u",
-           (uint)_claimed_survivor_index, _survivors->length());
+    assert(_claimed_root_regions >= num_root_regions(),
+           "we should have claimed all root regions, claimed " SIZE_FORMAT ", length = %u",
+           _claimed_root_regions, num_root_regions());
   }
 
+  FREE_C_HEAP_ARRAY(HeapRegion*, _root_regions);
+  _root_regions = NULL;
+  _max_regions = 0;
+  _cur_regions = 0;
+
   notify_scan_done();
 }
 
 bool G1CMRootRegions::wait_until_scan_finished() {
   if (!scan_in_progress()) {

@@ -404,12 +420,10 @@
     vm_shutdown_during_initialization("Could not create ConcurrentMarkThread");
   }
 
   assert(CGC_lock != NULL, "CGC_lock must be initialized");
 
-  _root_regions.init(_g1h->survivor(), this);
-
   if (FLAG_IS_DEFAULT(ConcGCThreads) || ConcGCThreads == 0) {
     // Calculate the number of concurrent worker threads by scaling
     // the number of parallel GC threads.
     uint marking_thread_num = scale_concurrent_worker_threads(ParallelGCThreads);
     FLAG_SET_ERGO(uint, ConcGCThreads, marking_thread_num);

@@ -726,10 +740,12 @@
   reset();
 
   // For each region note start of marking.
   NoteStartOfMarkHRClosure startcl;
   _g1h->heap_region_iterate(&startcl);
+
+  _root_regions.reset(_g1h->max_regions());
 }
 
 
 void G1ConcurrentMark::post_initial_mark() {
   // Start Concurrent Marking weak-reference discovery.

@@ -857,16 +873,16 @@
          _max_concurrent_workers, result);
   return result;
 }
 
 void G1ConcurrentMark::scan_root_region(HeapRegion* hr, uint worker_id) {
-  // Currently, only survivors can be root regions.
-  assert(hr->next_top_at_mark_start() == hr->bottom(), "invariant");
+  assert(hr->is_old() || (hr->is_survivor() && hr->next_top_at_mark_start() == hr->bottom()),
+         "Root regions must be old or survivor but region %u is %s", hr->hrm_index(), hr->get_type_str());
   G1RootRegionScanClosure cl(_g1h, this, worker_id);
 
   const uintx interval = PrefetchScanIntervalInBytes;
-  HeapWord* curr = hr->bottom();
+  HeapWord* curr = hr->next_top_at_mark_start();
   const HeapWord* end = hr->top();
   while (curr < end) {
     Prefetch::read(curr, interval);
     oop obj = oop(curr);
     int size = obj->oop_iterate_size(&cl);
< prev index next >