< prev index next >

src/share/vm/gc/g1/g1ConcurrentMark.cpp

Print this page
rev 12311 : 8057003: Large reference arrays cause extremely long synchronization times
Summary: Slice large object arrays into parts so that the synchronization of marking threads with an STW pause request does not take long.
Reviewed-by:

@@ -2007,14 +2007,14 @@
     _phase(phase),
     _info(info)
   { }
 
   void operator()(oop obj) const {
-    guarantee(obj->is_oop(),
+    guarantee(G1CMObjArrayProcessor::is_array_slice(obj) || obj->is_oop(),
               "Non-oop " PTR_FORMAT ", phase: %s, info: %d",
               p2i(obj), _phase, _info);
-    guarantee(!_g1h->obj_in_cs(obj),
+    guarantee(G1CMObjArrayProcessor::is_array_slice(obj) || !_g1h->obj_in_cs(obj),
               "obj: " PTR_FORMAT " in CSet, phase: %s, info: %d",
               p2i(obj), _phase, _info);
   }
 };
 

@@ -2434,10 +2434,11 @@
   for (size_t i = 0; i < G1CMMarkStack::OopsPerChunk; ++i) {
     oop elem = buffer[i];
     if (elem == NULL) {
       break;
     }
+    assert(G1CMObjArrayProcessor::is_array_slice(elem) || elem->is_oop(), "Element " PTR_FORMAT " must be an array slice or oop", p2i(elem));
     bool success = _task_queue->push(elem);
     // We only call this when the local queue is empty or under a
     // given target limit. So, we do not expect this push to fail.
     assert(success, "invariant");
   }

@@ -2445,12 +2446,27 @@
   // This operation was quite expensive, so decrease the limits
   decrease_limits();
   return true;
 }
 
+void G1CMTask::process_object(oop obj) {
+  if (!G1CMObjArrayProcessor::is_array_slice(obj)) {
+    assert(_g1h->is_in_g1_reserved((HeapWord*) obj), "invariant" );
+    assert(!_g1h->is_on_master_free_list(
+           _g1h->heap_region_containing((HeapWord*) obj)), "invariant");
+
+    scan_object(obj);
+  } else {
+    _words_scanned += _objArray_processor.process_slice_reference(obj);
+    check_limits();
+  }
+}
+
 void G1CMTask::drain_local_queue(bool partially) {
-  if (has_aborted()) return;
+  if (has_aborted()) {
+    return;
+  }
 
   // Decide what the target size is, depending whether we're going to
   // drain it partially (so that other tasks can steal if they run out
   // of things to do) or totally (at the very end).
   size_t target_size;

@@ -2462,16 +2478,11 @@
 
   if (_task_queue->size() > target_size) {
     oop obj;
     bool ret = _task_queue->pop_local(obj);
     while (ret) {
-      assert(_g1h->is_in_g1_reserved((HeapWord*) obj), "invariant" );
-      assert(!_g1h->is_on_master_free_list(
-                  _g1h->heap_region_containing((HeapWord*) obj)), "invariant");
-
-      scan_object(obj);
-
+      process_object(obj);
       if (_task_queue->size() <= target_size || has_aborted()) {
         ret = false;
       } else {
         ret = _task_queue->pop_local(obj);
       }

@@ -2775,13 +2786,11 @@
       if (mr.is_empty()) {
         giveup_current_region();
         regular_clock_call();
       } else if (_curr_region->is_humongous() && mr.start() == _curr_region->bottom()) {
         if (_nextMarkBitMap->isMarked(mr.start())) {
-          // The object is marked - apply the closure
-          BitMap::idx_t offset = _nextMarkBitMap->heapWordToOffset(mr.start());
-          bitmap_closure.do_bit(offset);
+          scan_object((oop)mr.start());
         }
         // Even if this task aborted while scanning the humongous object
         // we can (and should) give up the current region.
         giveup_current_region();
         regular_clock_call();

@@ -2878,13 +2887,13 @@
     assert(_cm->out_of_regions() && _task_queue->size() == 0,
            "only way to reach here");
     while (!has_aborted()) {
       oop obj;
       if (_cm->try_stealing(_worker_id, &_hash_seed, obj)) {
-        assert(_nextMarkBitMap->isMarked((HeapWord*) obj),
-               "any stolen object should be marked");
-        scan_object(obj);
+        assert(G1CMObjArrayProcessor::is_array_slice(obj) || _nextMarkBitMap->isMarked((HeapWord*) obj),
+               "Any stolen object should be a slice or marked");
+        process_object(obj);
 
         // And since we're towards the end, let's totally drain the
         // local queue and global stack.
         drain_local_queue(false);
         drain_global_stack(false);

@@ -3001,10 +3010,11 @@
                    G1ConcurrentMark* cm,
                    G1CMTaskQueue* task_queue,
                    G1CMTaskQueueSet* task_queues)
   : _g1h(G1CollectedHeap::heap()),
     _worker_id(worker_id), _cm(cm),
+    _objArray_processor(this),
     _claimed(false),
     _nextMarkBitMap(NULL), _hash_seed(17),
     _task_queue(task_queue),
     _task_queues(task_queues),
     _cm_oop_closure(NULL) {
< prev index next >