1 /*
   2  * Copyright (c) 2000, 2015, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "classfile/stringTable.hpp"
  27 #include "classfile/systemDictionary.hpp"
  28 #include "code/codeCache.hpp"
  29 #include "gc_interface/collectedHeap.inline.hpp"
  30 #include "memory/sharedHeap.hpp"
  31 #include "oops/oop.inline.hpp"
  32 #include "runtime/atomic.inline.hpp"
  33 #include "runtime/fprofiler.hpp"
  34 #include "runtime/java.hpp"
  35 #include "services/management.hpp"
  36 #include "utilities/copy.hpp"
  37 #include "utilities/workgroup.hpp"
  38 
  39 PRAGMA_FORMAT_MUTE_WARNINGS_FOR_GCC
  40 
  41 static const char* ext_roots_task_strings[SharedHeap::SH_PS_NumElements] = {
  42       "Thread Roots (ms)",
  43       "StringTable Roots (ms)",
  44       "Universe Roots (ms)",
  45       "JNI Handles Roots (ms)",
  46       "ObjectSynchronizer Roots (ms)",
  47       "FlatProfiler Roots (ms)",
  48       "Management Roots (ms)",
  49       "SystemDictionary Roots (ms)",
  50       "CLDG Roots (ms)",
  51       "JVMTI Roots (ms)",
  52       "CodeCache Roots (ms)"
  53 };
  54 
  55 const char* SharedHeap::ext_roots_task_str(uint task) {
  56   vmassert(task < ARRAY_SIZE(ext_roots_task_strings), "must be");
  57   return ext_roots_task_strings[task];
  58 }
  59         
  60 SharedHeap* SharedHeap::_sh;
  61 
  62 SharedHeap::SharedHeap(CollectorPolicy* policy_) :
  63   CollectedHeap(),
  64   _collector_policy(policy_),
  65   _strong_roots_scope(NULL),
  66   _strong_roots_parity(0),
  67   _process_strong_tasks(new SubTasksDone(SH_PS_NumElements)),
  68   _workers(NULL)
  69 {
  70   if (_process_strong_tasks == NULL || !_process_strong_tasks->valid()) {
  71     vm_exit_during_initialization("Failed necessary allocation.");
  72   }
  73   _sh = this;  // ch is static, should be set only once.
  74   if (UseConcMarkSweepGC || UseG1GC) {
  75     _workers = new FlexibleWorkGang("Parallel GC Threads", ParallelGCThreads,
  76                             /* are_GC_task_threads */true,
  77                             /* are_ConcurrentGC_threads */false);
  78     if (_workers == NULL) {
  79       vm_exit_during_initialization("Failed necessary allocation.");
  80     } else {
  81       _workers->initialize_workers();
  82     }
  83   }
  84 }
  85 
  86 int SharedHeap::n_termination() {
  87   return _process_strong_tasks->n_threads();
  88 }
  89 
  90 void SharedHeap::set_n_termination(int t) {
  91   _process_strong_tasks->set_n_threads(t);
  92 }
  93 
  94 bool SharedHeap::heap_lock_held_for_gc() {
  95   Thread* t = Thread::current();
  96   return    Heap_lock->owned_by_self()
  97          || (   (t->is_GC_task_thread() ||  t->is_VM_thread())
  98              && _thread_holds_heap_lock_for_gc);
  99 }
 100 
 101 void SharedHeap::set_par_threads(uint t) {
 102   assert(t == 0 || !UseSerialGC, "Cannot have parallel threads");
 103   _n_par_threads = t;
 104   _process_strong_tasks->set_n_threads(t);
 105 }
 106 
 107 #ifdef ASSERT
 108 class AssertNonScavengableClosure: public OopClosure {
 109 public:
 110   virtual void do_oop(oop* p) {
 111     assert(!Universe::heap()->is_in_partial_collection(*p),
 112       "Referent should not be scavengable.");  }
 113   virtual void do_oop(narrowOop* p) { ShouldNotReachHere(); }
 114 };
 115 static AssertNonScavengableClosure assert_is_non_scavengable_closure;
 116 #endif
 117 
 118 SharedHeap::StrongRootsScope* SharedHeap::active_strong_roots_scope() const {
 119   return _strong_roots_scope;
 120 }
 121 void SharedHeap::register_strong_roots_scope(SharedHeap::StrongRootsScope* scope) {
 122   assert(_strong_roots_scope == NULL, "Should only have one StrongRootsScope active");
 123   assert(scope != NULL, "Illegal argument");
 124   _strong_roots_scope = scope;
 125 }
 126 void SharedHeap::unregister_strong_roots_scope(SharedHeap::StrongRootsScope* scope) {
 127   assert(_strong_roots_scope == scope, "Wrong scope unregistered");
 128   _strong_roots_scope = NULL;
 129 }
 130 
 131 void SharedHeap::change_strong_roots_parity() {
 132   // Also set the new collection parity.
 133   assert(_strong_roots_parity >= 0 && _strong_roots_parity <= 2,
 134          "Not in range.");
 135   _strong_roots_parity++;
 136   if (_strong_roots_parity == 3) _strong_roots_parity = 1;
 137   assert(_strong_roots_parity >= 1 && _strong_roots_parity <= 2,
 138          "Not in range.");
 139 }
 140 
 141 SharedHeap::StrongRootsScope::StrongRootsScope(SharedHeap* heap, bool activate)
 142   : MarkScope(activate), _sh(heap), _n_workers_done_with_threads(0)
 143 {
 144   if (_active) {
 145     _sh->register_strong_roots_scope(this);
 146     _sh->change_strong_roots_parity();
 147     // Zero the claimed high water mark in the StringTable
 148     StringTable::clear_parallel_claimed_index();
 149   }
 150 }
 151 
 152 SharedHeap::StrongRootsScope::~StrongRootsScope() {
 153   if (_active) {
 154     _sh->unregister_strong_roots_scope(this);
 155   }
 156 }
 157 
 158 Monitor* SharedHeap::StrongRootsScope::_lock = new Monitor(Mutex::leaf, "StrongRootsScope lock", false, Monitor::_safepoint_check_never);
 159 
 160 void SharedHeap::StrongRootsScope::mark_worker_done_with_threads(uint n_workers) {
 161   // The Thread work barrier is only needed by G1 Class Unloading.
 162   // No need to use the barrier if this is single-threaded code.
 163   if (UseG1GC && ClassUnloadingWithConcurrentMark && n_workers > 0) {
 164     uint new_value = (uint)Atomic::add(1, &_n_workers_done_with_threads);
 165     if (new_value == n_workers) {
 166       // This thread is last. Notify the others.
 167       MonitorLockerEx ml(_lock, Mutex::_no_safepoint_check_flag);
 168       _lock->notify_all();
 169     }
 170   }
 171 }
 172 
 173 void SharedHeap::StrongRootsScope::wait_until_all_workers_done_with_threads(uint n_workers) {
 174   assert(UseG1GC,                          "Currently only used by G1");
 175   assert(ClassUnloadingWithConcurrentMark, "Currently only needed when doing G1 Class Unloading");
 176 
 177   // No need to use the barrier if this is single-threaded code.
 178   if (n_workers > 0 && (uint)_n_workers_done_with_threads != n_workers) {
 179     MonitorLockerEx ml(_lock, Mutex::_no_safepoint_check_flag);
 180     while ((uint)_n_workers_done_with_threads != n_workers) {
 181       _lock->wait(Mutex::_no_safepoint_check_flag, 0, false);
 182     }
 183   }
 184 }
 185 
 186 void SharedHeap::process_roots(bool activate_scope,
 187                                ScanningOption so,
 188                                OopClosure* strong_roots,
 189                                OopClosure* weak_roots,
 190                                CLDClosure* strong_cld_closure,
 191                                CLDClosure* weak_cld_closure,
 192                                CodeBlobClosure* code_roots,
 193                                PhaseTimeData* phase_durations) {
 194   StrongRootsScope srs(this, activate_scope);
 195 
 196   // General roots.
 197   assert(_strong_roots_parity != 0, "must have called prologue code");
 198   assert(code_roots != NULL, "code root closure should always be set");
 199   // _n_termination for _process_strong_tasks should be set up stream
 200   // in a method not running in a GC worker.  Otherwise the GC worker
 201   // could be trying to change the termination condition while the task
 202   // is executing in another GC worker.
 203 
 204   // Iterating over the CLDG and the Threads are done early to allow G1 to
 205   // first process the strong CLDs and nmethods and then, after a barrier,
 206   // let the thread process the weak CLDs and nmethods.
 207 
 208   {
 209     TrackPhaseTime x(phase_durations, SH_PS_ClassLoaderDataGraph_oops_do);
 210     if (!_process_strong_tasks->is_task_claimed(SH_PS_ClassLoaderDataGraph_oops_do)) {
 211       ClassLoaderDataGraph::roots_cld_do(strong_cld_closure, weak_cld_closure);
 212     }
 213   }
 214 
 215   // Some CLDs contained in the thread frames should be considered strong.
 216   // Don't process them if they will be processed during the ClassLoaderDataGraph phase.
 217   CLDClosure* roots_from_clds_p = (strong_cld_closure != weak_cld_closure) ? strong_cld_closure : NULL;
 218   // Only process code roots from thread stacks if we aren't visiting the entire CodeCache anyway
 219   CodeBlobClosure* roots_from_code_p = (so & SO_AllCodeCache) ? NULL : code_roots;
 220 
 221   {
 222     TrackPhaseTime x(phase_durations, SH_PS_Threads_oops_do);
 223     Threads::possibly_parallel_oops_do(strong_roots, roots_from_clds_p, roots_from_code_p);
 224   }
 225 
 226   // This is the point where this worker thread will not find more strong CLDs/nmethods.
 227   // Report this so G1 can synchronize the strong and weak CLDs/nmethods processing.
 228   active_strong_roots_scope()->mark_worker_done_with_threads(n_par_threads());
 229 
 230   {
 231     TrackPhaseTime x(phase_durations, SH_PS_Universe_oops_do);
 232     if (!_process_strong_tasks->is_task_claimed(SH_PS_Universe_oops_do)) {
 233       Universe::oops_do(strong_roots);
 234     }
 235   }
 236   {
 237     TrackPhaseTime x(phase_durations, SH_PS_JNIHandles_oops_do);
 238     // Global (strong) JNI handles
 239     if (!_process_strong_tasks->is_task_claimed(SH_PS_JNIHandles_oops_do))
 240       JNIHandles::oops_do(strong_roots);
 241   }
 242 
 243   {
 244     TrackPhaseTime x(phase_durations, SH_PS_ObjectSynchronizer_oops_do);
 245     if (!_process_strong_tasks-> is_task_claimed(SH_PS_ObjectSynchronizer_oops_do))
 246       ObjectSynchronizer::oops_do(strong_roots);
 247   }
 248   {
 249     TrackPhaseTime x(phase_durations, SH_PS_FlatProfiler_oops_do);
 250     if (!_process_strong_tasks->is_task_claimed(SH_PS_FlatProfiler_oops_do))
 251       FlatProfiler::oops_do(strong_roots);
 252   }
 253   {
 254     TrackPhaseTime x(phase_durations, SH_PS_Management_oops_do);
 255     if (!_process_strong_tasks->is_task_claimed(SH_PS_Management_oops_do))
 256       Management::oops_do(strong_roots);
 257   }
 258   {
 259     TrackPhaseTime x(phase_durations, SH_PS_jvmti_oops_do);
 260     if (!_process_strong_tasks->is_task_claimed(SH_PS_jvmti_oops_do))
 261       JvmtiExport::oops_do(strong_roots);
 262   }
 263   {
 264     TrackPhaseTime x(phase_durations, SH_PS_SystemDictionary_oops_do);
 265     if (!_process_strong_tasks->is_task_claimed(SH_PS_SystemDictionary_oops_do)) {
 266       SystemDictionary::roots_oops_do(strong_roots, weak_roots);
 267     }
 268   }
 269 
 270   {
 271    TrackPhaseTime x(phase_durations, SH_PS_StringTable_oops_do);
 272     // All threads execute the following. A specific chunk of buckets
 273     // from the StringTable are the individual tasks.
 274     if (weak_roots != NULL) {
 275       if (CollectedHeap::use_parallel_gc_threads()) {
 276         StringTable::possibly_parallel_oops_do(weak_roots);
 277       } else {
 278         StringTable::oops_do(weak_roots);
 279       }
 280     }
 281   }
 282 
 283   {
 284     TrackPhaseTime x(phase_durations, SH_PS_CodeCache_oops_do);
 285     if (!_process_strong_tasks->is_task_claimed(SH_PS_CodeCache_oops_do)) {
 286       if (so & SO_ScavengeCodeCache) {
 287         assert(code_roots != NULL, "must supply closure for code cache");
 288 
 289         // We only visit parts of the CodeCache when scavenging.
 290         CodeCache::scavenge_root_nmethods_do(code_roots);
 291       }
 292       if (so & SO_AllCodeCache) {
 293         assert(code_roots != NULL, "must supply closure for code cache");
 294 
 295         // CMSCollector uses this to do intermediate-strength collections.
 296         // We scan the entire code cache, since CodeCache::do_unloading is not called.
 297         CodeCache::blobs_do(code_roots);
 298       }
 299       // Verify that the code cache contents are not subject to
 300       // movement by a scavenging collection.
 301       DEBUG_ONLY(CodeBlobToOopClosure assert_code_is_non_scavengable(&assert_is_non_scavengable_closure, !CodeBlobToOopClosure::FixRelocations));
 302       DEBUG_ONLY(CodeCache::asserted_non_scavengable_nmethods_do(&assert_code_is_non_scavengable));
 303     }
 304   }
 305 
 306   _process_strong_tasks->all_tasks_completed();
 307 }
 308 
 309 void SharedHeap::process_all_roots(bool activate_scope,
 310                                    ScanningOption so,
 311                                    OopClosure* roots,
 312                                    CLDClosure* cld_closure,
 313                                    CodeBlobClosure* code_closure,
 314                                    PhaseTimeData* phase_durations) {
 315   process_roots(activate_scope, so,
 316                 roots, roots,
 317                 cld_closure, cld_closure,
 318                 code_closure,
 319                 phase_durations);
 320 }
 321 
 322 void SharedHeap::process_strong_roots(bool activate_scope,
 323                                       ScanningOption so,
 324                                       OopClosure* roots,
 325                                       CLDClosure* cld_closure,
 326                                       CodeBlobClosure* code_closure,
 327                                       PhaseTimeData* phase_durations) {
 328   process_roots(activate_scope, so,
 329                 roots, NULL,
 330                 cld_closure, NULL,
 331                 code_closure,
 332                 phase_durations);
 333 }
 334 
 335 
 336 class AlwaysTrueClosure: public BoolObjectClosure {
 337 public:
 338   bool do_object_b(oop p) { return true; }
 339 };
 340 static AlwaysTrueClosure always_true;
 341 
 342 void SharedHeap::process_weak_roots(OopClosure* root_closure) {
 343   // Global (weak) JNI handles
 344   JNIHandles::weak_oops_do(&always_true, root_closure);
 345 }
 346 
 347 void SharedHeap::set_barrier_set(BarrierSet* bs) {
 348   _barrier_set = bs;
 349   // Cached barrier set for fast access in oops
 350   oopDesc::set_bs(bs);
 351 }
 352 
 353 void SharedHeap::post_initialize() {
 354   CollectedHeap::post_initialize();
 355   ref_processing_init();
 356 }
 357 
 358 void SharedHeap::ref_processing_init() {}
 359 
 360 // Some utilities.
 361 void SharedHeap::print_size_transition(outputStream* out,
 362                                        size_t bytes_before,
 363                                        size_t bytes_after,
 364                                        size_t capacity) {
 365   out->print(" " SIZE_FORMAT "%s->" SIZE_FORMAT "%s(" SIZE_FORMAT "%s)",
 366              byte_size_in_proper_unit(bytes_before),
 367              proper_unit_for_byte_size(bytes_before),
 368              byte_size_in_proper_unit(bytes_after),
 369              proper_unit_for_byte_size(bytes_after),
 370              byte_size_in_proper_unit(capacity),
 371              proper_unit_for_byte_size(capacity));
 372 }