1 /*
   2  * Copyright (c) 2000, 2015, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "classfile/stringTable.hpp"
  27 #include "classfile/systemDictionary.hpp"
  28 #include "code/codeCache.hpp"
  29 #include "gc_interface/collectedHeap.inline.hpp"
  30 #include "memory/sharedHeap.hpp"
  31 #include "oops/oop.inline.hpp"
  32 #include "runtime/atomic.inline.hpp"
  33 #include "runtime/fprofiler.hpp"
  34 #include "runtime/java.hpp"
  35 #include "services/management.hpp"
  36 #include "utilities/copy.hpp"
  37 #include "utilities/workgroup.hpp"
  38 
  39 PRAGMA_FORMAT_MUTE_WARNINGS_FOR_GCC
  40 
  41 SharedHeap* SharedHeap::_sh;
  42 
  43 // The set of potentially parallel tasks in root scanning.
  44 enum SH_process_roots_tasks {
  45   SH_PS_Universe_oops_do,
  46   SH_PS_JNIHandles_oops_do,
  47   SH_PS_ObjectSynchronizer_oops_do,
  48   SH_PS_FlatProfiler_oops_do,
  49   SH_PS_Management_oops_do,
  50   SH_PS_SystemDictionary_oops_do,
  51   SH_PS_ClassLoaderDataGraph_oops_do,
  52   SH_PS_jvmti_oops_do,
  53   SH_PS_CodeCache_oops_do,
  54   // Leave this one last.
  55   SH_PS_NumElements
  56 };
  57 
  58 SharedHeap::SharedHeap(CollectorPolicy* policy_) :
  59   CollectedHeap(),
  60   _collector_policy(policy_),
  61   _strong_roots_scope(NULL),
  62   _strong_roots_parity(0),
  63   _process_strong_tasks(new SubTasksDone(SH_PS_NumElements)),
  64   _workers(NULL)
  65 {
  66   if (_process_strong_tasks == NULL || !_process_strong_tasks->valid()) {
  67     vm_exit_during_initialization("Failed necessary allocation.");
  68   }
  69   _sh = this;  // ch is static, should be set only once.
  70   if (UseConcMarkSweepGC || UseG1GC) {
  71     _workers = new FlexibleWorkGang("GC Thread", ParallelGCThreads,
  72                             /* are_GC_task_threads */true,
  73                             /* are_ConcurrentGC_threads */false);
  74     if (_workers == NULL) {
  75       vm_exit_during_initialization("Failed necessary allocation.");
  76     } else {
  77       _workers->initialize_workers();
  78     }
  79   }
  80 }
  81 
  82 int SharedHeap::n_termination() {
  83   return _process_strong_tasks->n_threads();
  84 }
  85 
  86 void SharedHeap::set_n_termination(int t) {
  87   _process_strong_tasks->set_n_threads(t);
  88 }
  89 
  90 bool SharedHeap::heap_lock_held_for_gc() {
  91   Thread* t = Thread::current();
  92   return    Heap_lock->owned_by_self()
  93          || (   (t->is_GC_task_thread() ||  t->is_VM_thread())
  94              && _thread_holds_heap_lock_for_gc);
  95 }
  96 
  97 void SharedHeap::set_par_threads(uint t) {
  98   assert(t == 0 || !UseSerialGC, "Cannot have parallel threads");
  99   _n_par_threads = t;
 100   _process_strong_tasks->set_n_threads(t);
 101 }
 102 
 103 #ifdef ASSERT
 104 class AssertNonScavengableClosure: public OopClosure {
 105 public:
 106   virtual void do_oop(oop* p) {
 107     assert(!Universe::heap()->is_in_partial_collection(*p),
 108       "Referent should not be scavengable.");  }
 109   virtual void do_oop(narrowOop* p) { ShouldNotReachHere(); }
 110 };
 111 static AssertNonScavengableClosure assert_is_non_scavengable_closure;
 112 #endif
 113 
 114 SharedHeap::StrongRootsScope* SharedHeap::active_strong_roots_scope() const {
 115   return _strong_roots_scope;
 116 }
 117 void SharedHeap::register_strong_roots_scope(SharedHeap::StrongRootsScope* scope) {
 118   assert(_strong_roots_scope == NULL, "Should only have one StrongRootsScope active");
 119   assert(scope != NULL, "Illegal argument");
 120   _strong_roots_scope = scope;
 121 }
 122 void SharedHeap::unregister_strong_roots_scope(SharedHeap::StrongRootsScope* scope) {
 123   assert(_strong_roots_scope == scope, "Wrong scope unregistered");
 124   _strong_roots_scope = NULL;
 125 }
 126 
 127 void SharedHeap::change_strong_roots_parity() {
 128   // Also set the new collection parity.
 129   assert(_strong_roots_parity >= 0 && _strong_roots_parity <= 2,
 130          "Not in range.");
 131   _strong_roots_parity++;
 132   if (_strong_roots_parity == 3) _strong_roots_parity = 1;
 133   assert(_strong_roots_parity >= 1 && _strong_roots_parity <= 2,
 134          "Not in range.");
 135 }
 136 
 137 SharedHeap::StrongRootsScope::StrongRootsScope(SharedHeap* heap, bool activate)
 138   : MarkScope(activate), _sh(heap), _n_workers_done_with_threads(0)
 139 {
 140   if (_active) {
 141     _sh->register_strong_roots_scope(this);
 142     _sh->change_strong_roots_parity();
 143     // Zero the claimed high water mark in the StringTable
 144     StringTable::clear_parallel_claimed_index();
 145   }
 146 }
 147 
 148 SharedHeap::StrongRootsScope::~StrongRootsScope() {
 149   if (_active) {
 150     _sh->unregister_strong_roots_scope(this);
 151   }
 152 }
 153 
 154 Monitor* SharedHeap::StrongRootsScope::_lock = new Monitor(Mutex::leaf, "StrongRootsScope lock", false, Monitor::_safepoint_check_never);
 155 
 156 void SharedHeap::StrongRootsScope::mark_worker_done_with_threads(uint n_workers) {
 157   // The Thread work barrier is only needed by G1 Class Unloading.
 158   // No need to use the barrier if this is single-threaded code.
 159   if (UseG1GC && ClassUnloadingWithConcurrentMark && n_workers > 0) {
 160     uint new_value = (uint)Atomic::add(1, &_n_workers_done_with_threads);
 161     if (new_value == n_workers) {
 162       // This thread is last. Notify the others.
 163       MonitorLockerEx ml(_lock, Mutex::_no_safepoint_check_flag);
 164       _lock->notify_all();
 165     }
 166   }
 167 }
 168 
 169 void SharedHeap::StrongRootsScope::wait_until_all_workers_done_with_threads(uint n_workers) {
 170   assert(UseG1GC,                          "Currently only used by G1");
 171   assert(ClassUnloadingWithConcurrentMark, "Currently only needed when doing G1 Class Unloading");
 172 
 173   // No need to use the barrier if this is single-threaded code.
 174   if (n_workers > 0 && (uint)_n_workers_done_with_threads != n_workers) {
 175     MonitorLockerEx ml(_lock, Mutex::_no_safepoint_check_flag);
 176     while ((uint)_n_workers_done_with_threads != n_workers) {
 177       _lock->wait(Mutex::_no_safepoint_check_flag, 0, false);
 178     }
 179   }
 180 }
 181 
 182 void SharedHeap::process_roots(bool activate_scope,
 183                                ScanningOption so,
 184                                OopClosure* strong_roots,
 185                                OopClosure* weak_roots,
 186                                CLDClosure* strong_cld_closure,
 187                                CLDClosure* weak_cld_closure,
 188                                CodeBlobClosure* code_roots) {
 189   StrongRootsScope srs(this, activate_scope);
 190 
 191   // General roots.
 192   assert(_strong_roots_parity != 0, "must have called prologue code");
 193   assert(code_roots != NULL, "code root closure should always be set");
 194   // _n_termination for _process_strong_tasks should be set up stream
 195   // in a method not running in a GC worker.  Otherwise the GC worker
 196   // could be trying to change the termination condition while the task
 197   // is executing in another GC worker.
 198 
 199   // Iterating over the CLDG and the Threads are done early to allow G1 to
 200   // first process the strong CLDs and nmethods and then, after a barrier,
 201   // let the thread process the weak CLDs and nmethods.
 202 
 203   if (!_process_strong_tasks->is_task_claimed(SH_PS_ClassLoaderDataGraph_oops_do)) {
 204     ClassLoaderDataGraph::roots_cld_do(strong_cld_closure, weak_cld_closure);
 205   }
 206 
 207   // Some CLDs contained in the thread frames should be considered strong.
 208   // Don't process them if they will be processed during the ClassLoaderDataGraph phase.
 209   CLDClosure* roots_from_clds_p = (strong_cld_closure != weak_cld_closure) ? strong_cld_closure : NULL;
 210   // Only process code roots from thread stacks if we aren't visiting the entire CodeCache anyway
 211   CodeBlobClosure* roots_from_code_p = (so & SO_AllCodeCache) ? NULL : code_roots;
 212 
 213   Threads::possibly_parallel_oops_do(strong_roots, roots_from_clds_p, roots_from_code_p);
 214 
 215   // This is the point where this worker thread will not find more strong CLDs/nmethods.
 216   // Report this so G1 can synchronize the strong and weak CLDs/nmethods processing.
 217   active_strong_roots_scope()->mark_worker_done_with_threads(n_par_threads());
 218 
 219   if (!_process_strong_tasks->is_task_claimed(SH_PS_Universe_oops_do)) {
 220     Universe::oops_do(strong_roots);
 221   }
 222   // Global (strong) JNI handles
 223   if (!_process_strong_tasks->is_task_claimed(SH_PS_JNIHandles_oops_do))
 224     JNIHandles::oops_do(strong_roots);
 225 
 226   if (!_process_strong_tasks-> is_task_claimed(SH_PS_ObjectSynchronizer_oops_do))
 227     ObjectSynchronizer::oops_do(strong_roots);
 228   if (!_process_strong_tasks->is_task_claimed(SH_PS_FlatProfiler_oops_do))
 229     FlatProfiler::oops_do(strong_roots);
 230   if (!_process_strong_tasks->is_task_claimed(SH_PS_Management_oops_do))
 231     Management::oops_do(strong_roots);
 232   if (!_process_strong_tasks->is_task_claimed(SH_PS_jvmti_oops_do))
 233     JvmtiExport::oops_do(strong_roots);
 234 
 235   if (!_process_strong_tasks->is_task_claimed(SH_PS_SystemDictionary_oops_do)) {
 236     SystemDictionary::roots_oops_do(strong_roots, weak_roots);
 237   }
 238 
 239   // All threads execute the following. A specific chunk of buckets
 240   // from the StringTable are the individual tasks.
 241   if (weak_roots != NULL) {
 242     if (CollectedHeap::use_parallel_gc_threads()) {
 243       StringTable::possibly_parallel_oops_do(weak_roots);
 244     } else {
 245       StringTable::oops_do(weak_roots);
 246     }
 247   }
 248 
 249   if (!_process_strong_tasks->is_task_claimed(SH_PS_CodeCache_oops_do)) {
 250     if (so & SO_ScavengeCodeCache) {
 251       assert(code_roots != NULL, "must supply closure for code cache");
 252 
 253       // We only visit parts of the CodeCache when scavenging.
 254       CodeCache::scavenge_root_nmethods_do(code_roots);
 255     }
 256     if (so & SO_AllCodeCache) {
 257       assert(code_roots != NULL, "must supply closure for code cache");
 258 
 259       // CMSCollector uses this to do intermediate-strength collections.
 260       // We scan the entire code cache, since CodeCache::do_unloading is not called.
 261       CodeCache::blobs_do(code_roots);
 262     }
 263     // Verify that the code cache contents are not subject to
 264     // movement by a scavenging collection.
 265     DEBUG_ONLY(CodeBlobToOopClosure assert_code_is_non_scavengable(&assert_is_non_scavengable_closure, !CodeBlobToOopClosure::FixRelocations));
 266     DEBUG_ONLY(CodeCache::asserted_non_scavengable_nmethods_do(&assert_code_is_non_scavengable));
 267   }
 268 
 269   _process_strong_tasks->all_tasks_completed();
 270 }
 271 
 272 void SharedHeap::process_all_roots(bool activate_scope,
 273                                    ScanningOption so,
 274                                    OopClosure* roots,
 275                                    CLDClosure* cld_closure,
 276                                    CodeBlobClosure* code_closure) {
 277   process_roots(activate_scope, so,
 278                 roots, roots,
 279                 cld_closure, cld_closure,
 280                 code_closure);
 281 }
 282 
 283 void SharedHeap::process_strong_roots(bool activate_scope,
 284                                       ScanningOption so,
 285                                       OopClosure* roots,
 286                                       CLDClosure* cld_closure,
 287                                       CodeBlobClosure* code_closure) {
 288   process_roots(activate_scope, so,
 289                 roots, NULL,
 290                 cld_closure, NULL,
 291                 code_closure);
 292 }
 293 
 294 
 295 class AlwaysTrueClosure: public BoolObjectClosure {
 296 public:
 297   bool do_object_b(oop p) { return true; }
 298 };
 299 static AlwaysTrueClosure always_true;
 300 
 301 void SharedHeap::process_weak_roots(OopClosure* root_closure) {
 302   // Global (weak) JNI handles
 303   JNIHandles::weak_oops_do(&always_true, root_closure);
 304 }
 305 
 306 void SharedHeap::set_barrier_set(BarrierSet* bs) {
 307   _barrier_set = bs;
 308   // Cached barrier set for fast access in oops
 309   oopDesc::set_bs(bs);
 310 }
 311 
 312 void SharedHeap::post_initialize() {
 313   CollectedHeap::post_initialize();
 314   ref_processing_init();
 315 }
 316 
 317 void SharedHeap::ref_processing_init() {}
 318 
 319 // Some utilities.
 320 void SharedHeap::print_size_transition(outputStream* out,
 321                                        size_t bytes_before,
 322                                        size_t bytes_after,
 323                                        size_t capacity) {
 324   out->print(" " SIZE_FORMAT "%s->" SIZE_FORMAT "%s(" SIZE_FORMAT "%s)",
 325              byte_size_in_proper_unit(bytes_before),
 326              proper_unit_for_byte_size(bytes_before),
 327              byte_size_in_proper_unit(bytes_after),
 328              proper_unit_for_byte_size(bytes_after),
 329              byte_size_in_proper_unit(capacity),
 330              proper_unit_for_byte_size(capacity));
 331 }