index

src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp

Print this page
rev 7211 : 6979279

@@ -1436,11 +1436,10 @@
         hot_card_cache->reset_card_counts();
         hot_card_cache->reset_hot_cache();
       }
 
       // Rebuild remembered sets of all regions.
-      if (G1CollectedHeap::use_parallel_gc_threads()) {
         uint n_workers =
           AdaptiveSizePolicy::calc_active_workers(workers()->total_workers(),
                                                   workers()->active_workers(),
                                                   Threads::number_of_non_daemon_threads());
         assert(UseDynamicNumberOfGCThreads ||

@@ -1462,14 +1461,10 @@
         assert(workers()->active_workers() > 0,
                "Active workers not properly set");
         set_par_threads(workers()->active_workers());
         workers()->run_task(&rebuild_rs_task);
         set_par_threads(0);
-      } else {
-        RebuildRSOutOfRegionClosure rebuild_rs(this);
-        heap_region_iterate(&rebuild_rs);
-      }
 
       // Rebuild the strong code root lists for each region
       rebuild_strong_code_roots();
 
       if (true) { // FIXME

@@ -2676,11 +2671,10 @@
   //          n collection set regions
   //          p threads
   // Then thread t will start at region floor ((t * n) / p)
 
   result = g1_policy()->collection_set();
-  if (G1CollectedHeap::use_parallel_gc_threads()) {
     uint cs_size = g1_policy()->cset_region_length();
     uint active_workers = workers()->active_workers();
     assert(UseDynamicNumberOfGCThreads ||
              active_workers == workers()->total_workers(),
              "Unless dynamic should use total workers");

@@ -2697,11 +2691,10 @@
     }
 
     for (uint i = start_ind; i < end_ind; i++) {
       result = result->next_in_collection_set();
     }
-  }
 
   // Note: the calculated starting heap region may be NULL
   // (when the collection set is empty).
   assert(result == NULL || result->in_collection_set(), "sanity");
   assert(_worker_cset_start_region_time_stamp[worker_i] != gc_time_stamp,

@@ -3371,26 +3364,22 @@
     _cm->print_on_error(st);
   }
 }
 
 void G1CollectedHeap::print_gc_threads_on(outputStream* st) const {
-  if (G1CollectedHeap::use_parallel_gc_threads()) {
     workers()->print_worker_threads_on(st);
-  }
   _cmThread->print_on(st);
   st->cr();
   _cm->print_worker_threads_on(st);
   _cg1r->print_worker_threads_on(st);
   if (G1StringDedup::is_enabled()) {
     G1StringDedup::print_worker_threads_on(st);
   }
 }
 
 void G1CollectedHeap::gc_threads_do(ThreadClosure* tc) const {
-  if (G1CollectedHeap::use_parallel_gc_threads()) {
     workers()->threads_do(tc);
-  }
   tc->do_thread(_cmThread);
   _cg1r->threads_do(tc);
   if (G1StringDedup::is_enabled()) {
     G1StringDedup::threads_do(tc);
   }

@@ -3681,22 +3670,22 @@
 
 void G1CollectedHeap::print_taskqueue_stats(outputStream* const st) const {
   print_taskqueue_stats_hdr(st);
 
   TaskQueueStats totals;
-  const int n = workers() != NULL ? workers()->total_workers() : 1;
+  const int n = workers()->total_workers();
   for (int i = 0; i < n; ++i) {
     st->print("%3d ", i); task_queue(i)->stats.print(st); st->cr();
     totals += task_queue(i)->stats;
   }
   st->print_raw("tot "); totals.print(st); st->cr();
 
   DEBUG_ONLY(totals.verify());
 }
 
 void G1CollectedHeap::reset_taskqueue_stats() {
-  const int n = workers() != NULL ? workers()->total_workers() : 1;
+  const int n = workers()->total_workers();
   for (int i = 0; i < n; ++i) {
     task_queue(i)->stats.reset();
   }
 }
 #endif // TASKQUEUE_STATS

@@ -3790,12 +3779,11 @@
 
     _gc_tracer_stw->report_yc_type(yc_type());
 
     TraceCPUTime tcpu(G1Log::finer(), true, gclog_or_tty);
 
-    int active_workers = (G1CollectedHeap::use_parallel_gc_threads() ?
-                                workers()->active_workers() : 1);
+    int active_workers = workers()->active_workers();
     double pause_start_sec = os::elapsedTime();
     g1_policy()->phase_times()->note_gc_start(active_workers);
     log_gc_header();
 
     TraceCollectorStats tcs(g1mm()->incremental_collection_counters());

@@ -4785,16 +4773,14 @@
 
   bool  _process_symbols;
   int _symbols_processed;
   int _symbols_removed;
 
-  bool _do_in_parallel;
 public:
   G1StringSymbolTableUnlinkTask(BoolObjectClosure* is_alive, bool process_strings, bool process_symbols) :
     AbstractGangTask("String/Symbol Unlinking"),
     _is_alive(is_alive),
-    _do_in_parallel(G1CollectedHeap::use_parallel_gc_threads()),
     _process_strings(process_strings), _strings_processed(0), _strings_removed(0),
     _process_symbols(process_symbols), _symbols_processed(0), _symbols_removed(0) {
 
     _initial_string_table_size = StringTable::the_table()->table_size();
     _initial_symbol_table_size = SymbolTable::the_table()->table_size();

@@ -4805,14 +4791,14 @@
       SymbolTable::clear_parallel_claimed_index();
     }
   }
 
   ~G1StringSymbolTableUnlinkTask() {
-    guarantee(!_process_strings || !_do_in_parallel || StringTable::parallel_claimed_index() >= _initial_string_table_size,
+    guarantee(!_process_strings || StringTable::parallel_claimed_index() >= _initial_string_table_size,
               err_msg("claim value %d after unlink less than initial string table size %d",
                       StringTable::parallel_claimed_index(), _initial_string_table_size));
-    guarantee(!_process_symbols || !_do_in_parallel || SymbolTable::parallel_claimed_index() >= _initial_symbol_table_size,
+    guarantee(!_process_symbols || SymbolTable::parallel_claimed_index() >= _initial_symbol_table_size,
               err_msg("claim value %d after unlink less than initial symbol table size %d",
                       SymbolTable::parallel_claimed_index(), _initial_symbol_table_size));
 
     if (G1TraceStringSymbolTableScrubbing) {
       gclog_or_tty->print_cr("Cleaned string and symbol table, "

@@ -4822,11 +4808,10 @@
                              symbols_processed(), symbols_removed());
     }
   }
 
   void work(uint worker_id) {
-    if (_do_in_parallel) {
       int strings_processed = 0;
       int strings_removed = 0;
       int symbols_processed = 0;
       int symbols_removed = 0;
       if (_process_strings) {

@@ -4837,18 +4822,10 @@
       if (_process_symbols) {
         SymbolTable::possibly_parallel_unlink(&symbols_processed, &symbols_removed);
         Atomic::add(symbols_processed, &_symbols_processed);
         Atomic::add(symbols_removed, &_symbols_removed);
       }
-    } else {
-      if (_process_strings) {
-        StringTable::unlink(_is_alive, &_strings_processed, &_strings_removed);
-      }
-      if (_process_symbols) {
-        SymbolTable::unlink(&_symbols_processed, &_symbols_removed);
-      }
-    }
   }
 
   size_t strings_processed() const { return (size_t)_strings_processed; }
   size_t strings_removed()   const { return (size_t)_strings_removed; }
 

@@ -5125,37 +5102,27 @@
 
 void G1CollectedHeap::parallel_cleaning(BoolObjectClosure* is_alive,
                                         bool process_strings,
                                         bool process_symbols,
                                         bool class_unloading_occurred) {
-  uint n_workers = (G1CollectedHeap::use_parallel_gc_threads() ?
-                    workers()->active_workers() : 1);
+  uint n_workers = workers()->active_workers();
 
   G1ParallelCleaningTask g1_unlink_task(is_alive, process_strings, process_symbols,
                                         n_workers, class_unloading_occurred);
-  if (G1CollectedHeap::use_parallel_gc_threads()) {
     set_par_threads(n_workers);
     workers()->run_task(&g1_unlink_task);
     set_par_threads(0);
-  } else {
-    g1_unlink_task.work(0);
-  }
 }
 
 void G1CollectedHeap::unlink_string_and_symbol_table(BoolObjectClosure* is_alive,
                                                      bool process_strings, bool process_symbols) {
   {
-    uint n_workers = (G1CollectedHeap::use_parallel_gc_threads() ?
-                     _g1h->workers()->active_workers() : 1);
+    uint n_workers = _g1h->workers()->active_workers();
     G1StringSymbolTableUnlinkTask g1_unlink_task(is_alive, process_strings, process_symbols);
-    if (G1CollectedHeap::use_parallel_gc_threads()) {
       set_par_threads(n_workers);
       workers()->run_task(&g1_unlink_task);
       set_par_threads(0);
-    } else {
-      g1_unlink_task.work(0);
-    }
   }
 
   if (G1StringDedup::is_enabled()) {
     G1StringDedup::unlink(is_alive);
   }

@@ -5169,37 +5136,28 @@
 
   virtual void work(uint worker_id) {
     double start_time = os::elapsedTime();
 
     RedirtyLoggedCardTableEntryClosure cl;
-    if (G1CollectedHeap::heap()->use_parallel_gc_threads()) {
       _queue->par_apply_closure_to_all_completed_buffers(&cl);
-    } else {
-      _queue->apply_closure_to_all_completed_buffers(&cl);
-    }
 
     G1GCPhaseTimes* timer = G1CollectedHeap::heap()->g1_policy()->phase_times();
     timer->record_redirty_logged_cards_time_ms(worker_id, (os::elapsedTime() - start_time) * 1000.0);
     timer->record_redirty_logged_cards_processed_cards(worker_id, cl.num_processed());
   }
 };
 
 void G1CollectedHeap::redirty_logged_cards() {
   double redirty_logged_cards_start = os::elapsedTime();
 
-  uint n_workers = (G1CollectedHeap::use_parallel_gc_threads() ?
-                   _g1h->workers()->active_workers() : 1);
+  uint n_workers = _g1h->workers()->active_workers();
 
   G1RedirtyLoggedCardsTask redirty_task(&dirty_card_queue_set());
   dirty_card_queue_set().reset_for_par_iteration();
-  if (use_parallel_gc_threads()) {
     set_par_threads(n_workers);
     workers()->run_task(&redirty_task);
     set_par_threads(0);
-  } else {
-    redirty_task.work(0);
-  }
 
   DirtyCardQueueSet& dcq = JavaThread::dirty_card_queue_set();
   dcq.merge_bufferlists(&dirty_card_queue_set());
   assert(dirty_card_queue_set().completed_buffers_num() == 0, "All should be consumed");
 

@@ -5585,24 +5543,18 @@
   // We also need to do this copying before we process the reference
   // objects discovered by the STW ref processor in case one of these
   // referents points to another object which is also referenced by an
   // object discovered by the STW ref processor.
 
-  assert(!G1CollectedHeap::use_parallel_gc_threads() ||
-           no_of_gc_workers == workers()->active_workers(),
-           "Need to reset active GC workers");
+  assert(no_of_gc_workers == workers()->active_workers(), "Need to reset active GC workers");
 
   set_par_threads(no_of_gc_workers);
   G1ParPreserveCMReferentsTask keep_cm_referents(this,
                                                  no_of_gc_workers,
                                                  _task_queues);
 
-  if (G1CollectedHeap::use_parallel_gc_threads()) {
     workers()->run_task(&keep_cm_referents);
-  } else {
-    keep_cm_referents.work(0);
-  }
 
   set_par_threads(0);
 
   // Closure to test whether a referent is alive.
   G1STWIsAliveClosure is_alive(this);

@@ -5725,25 +5677,19 @@
   G1HotCardCache* hot_card_cache = _cg1r->hot_card_cache();
   hot_card_cache->reset_hot_cache_claimed_index();
   hot_card_cache->set_use_cache(false);
 
   uint n_workers;
-  if (G1CollectedHeap::use_parallel_gc_threads()) {
     n_workers =
       AdaptiveSizePolicy::calc_active_workers(workers()->total_workers(),
                                      workers()->active_workers(),
                                      Threads::number_of_non_daemon_threads());
     assert(UseDynamicNumberOfGCThreads ||
            n_workers == workers()->total_workers(),
            "If not dynamic should be using all the  workers");
     workers()->set_active_workers(n_workers);
     set_par_threads(n_workers);
-  } else {
-    assert(n_par_threads() == 0,
-           "Should be the original non-parallel value");
-    n_workers = 1;
-  }
 
   G1ParTask g1_par_task(this, _task_queues);
 
   init_for_evac_failure(NULL);
 

@@ -5758,22 +5704,17 @@
     // InitialMark needs claim bits to keep track of the marked-through CLDs.
     if (g1_policy()->during_initial_mark_pause()) {
       ClassLoaderDataGraph::clear_claimed_marks();
     }
 
-    if (G1CollectedHeap::use_parallel_gc_threads()) {
       // The individual threads will set their evac-failure closures.
       if (PrintTerminationStats) G1ParScanThreadState::print_termination_stats_hdr();
       // These tasks use ShareHeap::_process_strong_tasks
       assert(UseDynamicNumberOfGCThreads ||
              workers()->active_workers() == workers()->total_workers(),
              "If not dynamic should be using all the  workers");
       workers()->run_task(&g1_par_task);
-    } else {
-      g1_par_task.set_for_termination(n_workers);
-      g1_par_task.work(0);
-    }
     end_par_time_sec = os::elapsedTime();
 
     // Closing the inner scope will execute the destructor
     // for the StrongRootsScope object. We record the current
     // elapsed time before closing the scope so that time

@@ -6075,26 +6016,13 @@
 
   {
     // Iterate over the dirty cards region list.
     G1ParCleanupCTTask cleanup_task(ct_bs, this);
 
-    if (G1CollectedHeap::use_parallel_gc_threads()) {
       set_par_threads();
       workers()->run_task(&cleanup_task);
       set_par_threads(0);
-    } else {
-      while (_dirty_cards_region_list) {
-        HeapRegion* r = _dirty_cards_region_list;
-        cleanup_task.clear_cards(r);
-        _dirty_cards_region_list = r->get_next_dirty_cards_region();
-        if (_dirty_cards_region_list == r) {
-          // The last region.
-          _dirty_cards_region_list = NULL;
-        }
-        r->set_next_dirty_cards_region(NULL);
-      }
-    }
 #ifndef PRODUCT
     if (G1VerifyCTCleanup || VerifyAfterGC) {
       G1VerifyCardTableCleanup cleanup_verifier(this, ct_bs);
       heap_region_iterate(&cleanup_verifier);
     }

@@ -6630,11 +6558,10 @@
 }
 
 void G1CollectedHeap::set_par_threads() {
   // Don't change the number of workers.  Use the value previously set
   // in the workgroup.
-  assert(G1CollectedHeap::use_parallel_gc_threads(), "shouldn't be here otherwise");
   uint n_workers = workers()->active_workers();
   assert(UseDynamicNumberOfGCThreads ||
            n_workers == workers()->total_workers(),
       "Otherwise should be using the total number of workers");
   if (n_workers == 0) {
index