1 /*
   2  * Copyright (c) 2017, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "gc/g1/g1CollectedHeap.inline.hpp"
  27 #include "gc/g1/g1FullGCScope.hpp"
  28 #include "gc/g1/g1MarkSweep.hpp"
  29 #include "gc/g1/g1RemSet.inline.hpp"
  30 #include "gc/g1/g1SerialCollector.hpp"
  31 #include "gc/g1/heapRegionRemSet.hpp"
  32 #include "gc/shared/referenceProcessor.hpp"
  33 
  34 G1SerialCollector::G1SerialCollector(G1FullGCScope* scope,
  35                                      ReferenceProcessor* reference_processor) :
  36     _scope(scope),
  37     _reference_processor(reference_processor),
  38     _is_alive_mutator(_reference_processor, NULL),
  39     _mt_discovery_mutator(_reference_processor, false) {
  40   // Temporarily make discovery by the STW ref processor single threaded (non-MT)
  41   // and clear the STW ref processor's _is_alive_non_header field.
  42 }
  43 
  44 void G1SerialCollector::prepare_collection() {
  45   _reference_processor->enable_discovery();
  46   _reference_processor->setup_policy(_scope->should_clear_soft_refs());
  47 }
  48 
  49 void G1SerialCollector::complete_collection() {
  50   // Enqueue any discovered reference objects that have
  51   // not been removed from the discovered lists.
  52   _reference_processor->enqueue_discovered_references();
  53 
  54   // Iterate the heap and rebuild the remembered sets.
  55   rebuild_remembered_sets();
  56 }
  57 
  58 void G1SerialCollector::collect() {
  59   // Do the actual collection work.
  60   G1MarkSweep::invoke_at_safepoint(_reference_processor, _scope->should_clear_soft_refs());
  61 }
  62 
  63 class PostMCRemSetClearClosure: public HeapRegionClosure {
  64   G1CollectedHeap* _g1h;
  65   ModRefBarrierSet* _mr_bs;
  66 public:
  67   PostMCRemSetClearClosure(G1CollectedHeap* g1h, ModRefBarrierSet* mr_bs) :
  68     _g1h(g1h), _mr_bs(mr_bs) {}
  69 
  70   bool doHeapRegion(HeapRegion* r) {
  71     HeapRegionRemSet* hrrs = r->rem_set();
  72 
  73     _g1h->reset_gc_time_stamps(r);
  74 
  75     if (r->is_continues_humongous()) {
  76       // We'll assert that the strong code root list and RSet is empty
  77       assert(hrrs->strong_code_roots_list_length() == 0, "sanity");
  78       assert(hrrs->occupied() == 0, "RSet should be empty");
  79     } else {
  80       hrrs->clear();
  81     }
  82     // You might think here that we could clear just the cards
  83     // corresponding to the used region.  But no: if we leave a dirty card
  84     // in a region we might allocate into, then it would prevent that card
  85     // from being enqueued, and cause it to be missed.
  86     // Re: the performance cost: we shouldn't be doing full GC anyway!
  87     _mr_bs->clear(MemRegion(r->bottom(), r->end()));
  88 
  89     return false;
  90   }
  91 };
  92 
  93 
  94 class RebuildRSOutOfRegionClosure: public HeapRegionClosure {
  95   G1CollectedHeap*   _g1h;
  96   UpdateRSOopClosure _cl;
  97 public:
  98   RebuildRSOutOfRegionClosure(G1CollectedHeap* g1, uint worker_i = 0) :
  99     _cl(g1->g1_rem_set(), worker_i),
 100     _g1h(g1)
 101   { }
 102 
 103   bool doHeapRegion(HeapRegion* r) {
 104     if (!r->is_continues_humongous()) {
 105       _cl.set_from(r);
 106       r->oop_iterate(&_cl);
 107     }
 108     return false;
 109   }
 110 };
 111 
 112 class ParRebuildRSTask: public AbstractGangTask {
 113   G1CollectedHeap* _g1;
 114   HeapRegionClaimer _hrclaimer;
 115 
 116 public:
 117   ParRebuildRSTask(G1CollectedHeap* g1) :
 118       AbstractGangTask("ParRebuildRSTask"), _g1(g1), _hrclaimer(g1->workers()->active_workers()) {}
 119 
 120   void work(uint worker_id) {
 121     RebuildRSOutOfRegionClosure rebuild_rs(_g1, worker_id);
 122     _g1->heap_region_par_iterate(&rebuild_rs, worker_id, &_hrclaimer);
 123   }
 124 };
 125 
 126 void G1SerialCollector::rebuild_remembered_sets() {
 127   G1CollectedHeap* g1h = G1CollectedHeap::heap();
 128   // First clear the stale remembered sets.
 129   PostMCRemSetClearClosure rs_clear(g1h, g1h->g1_barrier_set());
 130   g1h->heap_region_iterate(&rs_clear);
 131 
 132   // Rebuild remembered sets of all regions.
 133   uint n_workers = AdaptiveSizePolicy::calc_active_workers(g1h->workers()->total_workers(),
 134                                                            g1h->workers()->active_workers(),
 135                                                            Threads::number_of_non_daemon_threads());
 136   g1h->workers()->update_active_workers(n_workers);
 137   log_info(gc,task)("Using %u workers of %u to rebuild remembered set", n_workers, g1h->workers()->total_workers());
 138 
 139   ParRebuildRSTask rebuild_rs_task(g1h);
 140   g1h->workers()->run_task(&rebuild_rs_task);
 141 }