1 /*
   2  * Copyright (c) 2000, 2012, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "classfile/symbolTable.hpp"
  27 #include "classfile/systemDictionary.hpp"
  28 #include "code/codeCache.hpp"
  29 #include "gc_interface/collectedHeap.inline.hpp"
  30 #include "memory/sharedHeap.hpp"
  31 #include "oops/oop.inline.hpp"
  32 #include "runtime/fprofiler.hpp"
  33 #include "runtime/java.hpp"
  34 #include "services/management.hpp"
  35 #include "utilities/copy.hpp"
  36 #include "utilities/workgroup.hpp"
  37 
  38 SharedHeap* SharedHeap::_sh;
  39 
  40 // The set of potentially parallel tasks in strong root scanning.
  41 enum SH_process_strong_roots_tasks {
  42   SH_PS_Universe_oops_do,
  43   SH_PS_JNIHandles_oops_do,
  44   SH_PS_ObjectSynchronizer_oops_do,
  45   SH_PS_FlatProfiler_oops_do,
  46   SH_PS_Management_oops_do,
  47   SH_PS_SystemDictionary_oops_do,
  48   SH_PS_jvmti_oops_do,
  49   SH_PS_StringTable_oops_do,
  50   SH_PS_CodeCache_oops_do,
  51   // Leave this one last.
  52   SH_PS_NumElements
  53 };
  54 
  55 SharedHeap::SharedHeap(CollectorPolicy* policy_) :
  56   CollectedHeap(),
  57   _collector_policy(policy_),
  58   _rem_set(NULL),
  59   _strong_roots_parity(0),
  60   _process_strong_tasks(new SubTasksDone(SH_PS_NumElements)),
  61   _workers(NULL)
  62 {
  63   if (_process_strong_tasks == NULL || !_process_strong_tasks->valid()) {
  64     vm_exit_during_initialization("Failed necessary allocation.");
  65   }
  66   _sh = this;  // ch is static, should be set only once.
  67   if ((UseParNewGC ||
  68       (UseConcMarkSweepGC && (CMSParallelInitialMarkEnabled ||
  69                               CMSParallelRemarkEnabled)) ||
  70        UseG1GC) &&
  71       ParallelGCThreads > 0) {
  72     _workers = new FlexibleWorkGang("Parallel GC Threads", ParallelGCThreads,
  73                             /* are_GC_task_threads */true,
  74                             /* are_ConcurrentGC_threads */false);
  75     if (_workers == NULL) {
  76       vm_exit_during_initialization("Failed necessary allocation.");
  77     } else {
  78       _workers->initialize_workers();
  79     }
  80   }
  81 }
  82 
  83 int SharedHeap::n_termination() {
  84   return _process_strong_tasks->n_threads();
  85 }
  86 
  87 void SharedHeap::set_n_termination(int t) {
  88   _process_strong_tasks->set_n_threads(t);
  89 }
  90 
  91 bool SharedHeap::heap_lock_held_for_gc() {
  92   Thread* t = Thread::current();
  93   return    Heap_lock->owned_by_self()
  94          || (   (t->is_GC_task_thread() ||  t->is_VM_thread())
  95              && _thread_holds_heap_lock_for_gc);
  96 }
  97 
  98 void SharedHeap::set_par_threads(uint t) {
  99   assert(t == 0 || !UseSerialGC, "Cannot have parallel threads");
 100   _n_par_threads = t;
 101   _process_strong_tasks->set_n_threads(t);
 102 }
 103 
 104 #ifdef ASSERT
 105 class AssertNonScavengableClosure: public OopClosure {
 106 public:
 107   virtual void do_oop(oop* p) {
 108     assert(!Universe::heap()->is_in_partial_collection(*p),
 109       "Referent should not be scavengable.");  }
 110   virtual void do_oop(narrowOop* p) { ShouldNotReachHere(); }
 111 };
 112 static AssertNonScavengableClosure assert_is_non_scavengable_closure;
 113 #endif
 114 
 115 void SharedHeap::change_strong_roots_parity() {
 116   // Also set the new collection parity.
 117   assert(_strong_roots_parity >= 0 && _strong_roots_parity <= 2,
 118          "Not in range.");
 119   _strong_roots_parity++;
 120   if (_strong_roots_parity == 3) _strong_roots_parity = 1;
 121   assert(_strong_roots_parity >= 1 && _strong_roots_parity <= 2,
 122          "Not in range.");
 123 }
 124 
 125 SharedHeap::StrongRootsScope::StrongRootsScope(SharedHeap* outer, bool activate)
 126   : MarkScope(activate)
 127 {
 128   if (_active) {
 129     outer->change_strong_roots_parity();
 130   }
 131 }
 132 
 133 SharedHeap::StrongRootsScope::~StrongRootsScope() {
 134   // nothing particular
 135 }
 136 
 137 void SharedHeap::process_strong_roots(bool activate_scope,
 138                                       bool is_scavenging,
 139                                       ScanningOption so,
 140                                       OopClosure* roots,
 141                                       CodeBlobClosure* code_roots,
 142                                       KlassClosure* klass_closure) {
 143   StrongRootsScope srs(this, activate_scope);
 144 
 145   // General strong roots.
 146   assert(_strong_roots_parity != 0, "must have called prologue code");
 147   // _n_termination for _process_strong_tasks should be set up stream
 148   // in a method not running in a GC worker.  Otherwise the GC worker
 149   // could be trying to change the termination condition while the task
 150   // is executing in another GC worker.
 151   if (!_process_strong_tasks->is_task_claimed(SH_PS_Universe_oops_do)) {
 152     Universe::oops_do(roots);
 153   }
 154   // Global (strong) JNI handles
 155   if (!_process_strong_tasks->is_task_claimed(SH_PS_JNIHandles_oops_do))
 156     JNIHandles::oops_do(roots);
 157   // All threads execute this; the individual threads are task groups.
 158   CLDToOopClosure roots_from_clds(roots);
 159   CLDToOopClosure* roots_from_clds_p = (is_scavenging ? NULL : &roots_from_clds);
 160   if (ParallelGCThreads > 0) {
 161     Threads::possibly_parallel_oops_do(roots, roots_from_clds_p ,code_roots);
 162   } else {
 163     Threads::oops_do(roots, roots_from_clds_p, code_roots);
 164   }
 165   if (!_process_strong_tasks-> is_task_claimed(SH_PS_ObjectSynchronizer_oops_do))
 166     ObjectSynchronizer::oops_do(roots);
 167   if (!_process_strong_tasks->is_task_claimed(SH_PS_FlatProfiler_oops_do))
 168     FlatProfiler::oops_do(roots);
 169   if (!_process_strong_tasks->is_task_claimed(SH_PS_Management_oops_do))
 170     Management::oops_do(roots);
 171   if (!_process_strong_tasks->is_task_claimed(SH_PS_jvmti_oops_do))
 172     JvmtiExport::oops_do(roots);
 173 
 174   if (!_process_strong_tasks->is_task_claimed(SH_PS_SystemDictionary_oops_do)) {
 175     if (so & SO_AllClasses) {
 176       SystemDictionary::oops_do(roots);
 177       ClassLoaderDataGraph::oops_do(roots, klass_closure, !is_scavenging);
 178     } else if (so & SO_SystemClasses) {
 179       SystemDictionary::always_strong_oops_do(roots);
 180       ClassLoaderDataGraph::always_strong_oops_do(roots, klass_closure, !is_scavenging);
 181     } else {
 182       fatal("We should always have selected either SO_AllClasses or SO_SystemClasses");
 183     }
 184   }
 185 
 186   if (!_process_strong_tasks->is_task_claimed(SH_PS_StringTable_oops_do)) {
 187     if (so & SO_Strings) {
 188       StringTable::oops_do(roots);
 189     }
 190   }
 191 
 192   if (!_process_strong_tasks->is_task_claimed(SH_PS_CodeCache_oops_do)) {
 193     if (so & SO_CodeCache) {
 194       assert(code_roots != NULL, "must supply closure for code cache");
 195 
 196       if (is_scavenging) {
 197         // We only visit parts of the CodeCache when scavenging.
 198         CodeCache::scavenge_root_nmethods_do(code_roots);
 199       } else {
 200         // CMSCollector uses this to do intermediate-strength collections.
 201         // We scan the entire code cache, since CodeCache::do_unloading is not called.
 202         CodeCache::blobs_do(code_roots);
 203       }
 204     }
 205     // Verify that the code cache contents are not subject to
 206     // movement by a scavenging collection.
 207     DEBUG_ONLY(CodeBlobToOopClosure assert_code_is_non_scavengable(&assert_is_non_scavengable_closure, /*do_marking=*/ false));
 208     DEBUG_ONLY(CodeCache::asserted_non_scavengable_nmethods_do(&assert_code_is_non_scavengable));
 209   }
 210 
 211   _process_strong_tasks->all_tasks_completed();
 212 }
 213 
 214 class AlwaysTrueClosure: public BoolObjectClosure {
 215 public:
 216   void do_object(oop p) { ShouldNotReachHere(); }
 217   bool do_object_b(oop p) { return true; }
 218 };
 219 static AlwaysTrueClosure always_true;
 220 
 221 void SharedHeap::process_weak_roots(OopClosure* root_closure,
 222                                     CodeBlobClosure* code_roots) {
 223   // Global (weak) JNI handles
 224   JNIHandles::weak_oops_do(&always_true, root_closure);
 225 
 226   CodeCache::blobs_do(code_roots);
 227   StringTable::oops_do(root_closure);
 228 }
 229 
 230 void SharedHeap::set_barrier_set(BarrierSet* bs) {
 231   _barrier_set = bs;
 232   // Cached barrier set for fast access in oops
 233   oopDesc::set_bs(bs);
 234 }
 235 
 236 void SharedHeap::post_initialize() {
 237   ref_processing_init();
 238 }
 239 
 240 void SharedHeap::ref_processing_init() {}
 241 
 242 // Some utilities.
 243 void SharedHeap::print_size_transition(outputStream* out,
 244                                        size_t bytes_before,
 245                                        size_t bytes_after,
 246                                        size_t capacity) {
 247   out->print(" %d%s->%d%s(%d%s)",
 248              byte_size_in_proper_unit(bytes_before),
 249              proper_unit_for_byte_size(bytes_before),
 250              byte_size_in_proper_unit(bytes_after),
 251              proper_unit_for_byte_size(bytes_after),
 252              byte_size_in_proper_unit(capacity),
 253              proper_unit_for_byte_size(capacity));
 254 }