src/share/vm/gc/g1/g1ConcurrentMark.cpp

Print this page

        

@@ -134,11 +134,10 @@
 
 G1CMMarkStack::G1CMMarkStack() :
   _max_chunk_capacity(0),
   _base(NULL),
   _chunk_capacity(0),
-  _out_of_memory(false),
   _should_expand(false) {
   set_empty();
 }
 
 bool G1CMMarkStack::resize(size_t new_capacity) {

@@ -276,16 +275,15 @@
   OopChunk* new_chunk = remove_chunk_from_free_list();
 
   if (new_chunk == NULL) {
     // Did not get a chunk from the free list. Allocate from backing memory.
     new_chunk = allocate_new_chunk();
-  }
 
   if (new_chunk == NULL) {
-    _out_of_memory = true;
     return false;
   }
+  }
 
   Copy::conjoint_memory_atomic(ptr_arr, new_chunk->data, OopsPerChunk * sizeof(oop));
 
   add_chunk_to_chunk_list(new_chunk);
 

@@ -306,11 +304,10 @@
 }
 
 void G1CMMarkStack::set_empty() {
   _chunks_in_chunk_list = 0;
   _hwm = 0;
-  clear_out_of_memory();
   _chunk_list = NULL;
   _free_list = NULL;
 }
 
 G1CMRootRegions::G1CMRootRegions() :

@@ -590,18 +587,14 @@
   // pause with initial mark piggy-backed
   set_concurrent_marking_in_progress();
 }
 
 
-void G1ConcurrentMark::reset_marking_state(bool clear_overflow) {
+void G1ConcurrentMark::reset_marking_state() {
   _global_mark_stack.set_should_expand(has_overflown());
-  _global_mark_stack.set_empty();        // Also clears the overflow stack's overflow flag
-  if (clear_overflow) {
+  _global_mark_stack.set_empty();
     clear_has_overflown();
-  } else {
-    assert(has_overflown(), "pre-condition");
-  }
   _finger = _heap_start;
 
   for (uint i = 0; i < _max_worker_id; ++i) {
     G1CMTaskQueue* queue = _task_queues->queue(i);
     queue->set_empty();

@@ -881,11 +874,11 @@
       // task 0 is responsible for clearing the global data structures
       // We should be here because of an overflow. During STW we should
       // not clear the overflow flag since we rely on it being true when
       // we exit this method to abort the pause and restart concurrent
       // marking.
-      reset_marking_state(true /* clear_overflow */);
+      reset_marking_state();
 
       log_info(gc, marking)("Concurrent Mark reset for overflow");
     }
   }
 

@@ -1747,19 +1740,13 @@
 
     // The do_oop work routines of the keep_alive and drain_marking_stack
     // oop closures will set the has_overflown flag if we overflow the
     // global marking stack.
 
-    assert(_global_mark_stack.is_out_of_memory() || _global_mark_stack.is_empty(),
+    assert(has_overflown() || _global_mark_stack.is_empty(),
             "Mark stack should be empty (unless it is out of memory)");
 
-    if (_global_mark_stack.is_out_of_memory()) {
-      // This should have been done already when we tried to push an
-      // entry on to the global mark stack. But let's do it again.
-      set_has_overflown();
-    }
-
     assert(rp->num_q() == active_workers, "why not");
 
     rp->enqueue_discovered_references(executor);
 
     rp->verify_no_references_recorded();

@@ -2929,11 +2916,10 @@
       // which one.
       guarantee(_cm->out_of_regions(), "only way to reach here");
       guarantee(_cm->mark_stack_empty(), "only way to reach here");
       guarantee(_task_queue->size() == 0, "only way to reach here");
       guarantee(!_cm->has_overflown(), "only way to reach here");
-      guarantee(!_cm->mark_stack_overflow(), "only way to reach here");
     } else {
       // Apparently there's more work to do. Let's abort this task. It
       // will restart it and we can hopefully find more things to do.
       set_has_aborted();
     }