1 /*
   2  * Copyright (c) 2000, 2013, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "classfile/symbolTable.hpp"
  27 #include "classfile/systemDictionary.hpp"
  28 #include "code/codeCache.hpp"
  29 #include "gc_interface/collectedHeap.inline.hpp"
  30 #include "memory/sharedHeap.hpp"
  31 #include "oops/oop.inline.hpp"
  32 #include "runtime/fprofiler.hpp"
  33 #include "runtime/java.hpp"
  34 #include "services/management.hpp"
  35 #include "utilities/copy.hpp"
  36 #include "utilities/workgroup.hpp"
  37 
  38 SharedHeap* SharedHeap::_sh;
  39 
  40 // The set of potentially parallel tasks in strong root scanning.
  41 enum SH_process_strong_roots_tasks {
  42   SH_PS_Universe_oops_do,
  43   SH_PS_JNIHandles_oops_do,
  44   SH_PS_ObjectSynchronizer_oops_do,
  45   SH_PS_FlatProfiler_oops_do,
  46   SH_PS_Management_oops_do,
  47   SH_PS_SystemDictionary_oops_do,
  48   SH_PS_jvmti_oops_do,
  49   SH_PS_CodeCache_oops_do,
  50   // Leave this one last.
  51   SH_PS_NumElements
  52 };
  53 
  54 SharedHeap::SharedHeap(CollectorPolicy* policy_) :
  55   CollectedHeap(),
  56   _collector_policy(policy_),
  57   _perm_gen(NULL), _rem_set(NULL),
  58   _strong_roots_parity(0),
  59   _process_strong_tasks(new SubTasksDone(SH_PS_NumElements)),
  60   _workers(NULL)
  61 {
  62   if (_process_strong_tasks == NULL || !_process_strong_tasks->valid()) {
  63     vm_exit_during_initialization("Failed necessary allocation.");
  64   }
  65   _sh = this;  // ch is static, should be set only once.
  66   if ((UseParNewGC ||
  67       (UseConcMarkSweepGC && (CMSParallelInitialMarkEnabled ||
  68                               CMSParallelRemarkEnabled)) ||
  69        UseG1GC) &&
  70       ParallelGCThreads > 0) {
  71     _workers = new FlexibleWorkGang("Parallel GC Threads", ParallelGCThreads,
  72                             /* are_GC_task_threads */true,
  73                             /* are_ConcurrentGC_threads */false);
  74     if (_workers == NULL) {
  75       vm_exit_during_initialization("Failed necessary allocation.");
  76     } else {
  77       _workers->initialize_workers();
  78     }
  79   }
  80 }
  81 
  82 int SharedHeap::n_termination() {
  83   return _process_strong_tasks->n_threads();
  84 }
  85 
  86 void SharedHeap::set_n_termination(int t) {
  87   _process_strong_tasks->set_n_threads(t);
  88 }
  89 
  90 bool SharedHeap::heap_lock_held_for_gc() {
  91   Thread* t = Thread::current();
  92   return    Heap_lock->owned_by_self()
  93          || (   (t->is_GC_task_thread() ||  t->is_VM_thread())
  94              && _thread_holds_heap_lock_for_gc);
  95 }
  96 
  97 void SharedHeap::set_par_threads(uint t) {
  98   assert(t == 0 || !UseSerialGC, "Cannot have parallel threads");
  99   _n_par_threads = t;
 100   _process_strong_tasks->set_n_threads(t);
 101 }
 102 
 103 class AssertIsPermClosure: public OopClosure {
 104 public:
 105   virtual void do_oop(oop* p) {
 106     assert((*p) == NULL || (*p)->is_perm(), "Referent should be perm.");
 107   }
 108   virtual void do_oop(narrowOop* p) { ShouldNotReachHere(); }
 109 };
 110 static AssertIsPermClosure assert_is_perm_closure;
 111 
 112 #ifdef ASSERT
 113 class AssertNonScavengableClosure: public OopClosure {
 114 public:
 115   virtual void do_oop(oop* p) {
 116     assert(!Universe::heap()->is_in_partial_collection(*p),
 117       "Referent should not be scavengable.");  }
 118   virtual void do_oop(narrowOop* p) { ShouldNotReachHere(); }
 119 };
 120 static AssertNonScavengableClosure assert_is_non_scavengable_closure;
 121 #endif
 122 
 123 void SharedHeap::change_strong_roots_parity() {
 124   // Also set the new collection parity.
 125   assert(_strong_roots_parity >= 0 && _strong_roots_parity <= 2,
 126          "Not in range.");
 127   _strong_roots_parity++;
 128   if (_strong_roots_parity == 3) _strong_roots_parity = 1;
 129   assert(_strong_roots_parity >= 1 && _strong_roots_parity <= 2,
 130          "Not in range.");
 131 }
 132 
 133 SharedHeap::StrongRootsScope::StrongRootsScope(SharedHeap* outer, bool activate)
 134   : MarkScope(activate)
 135 {
 136   if (_active) {
 137     outer->change_strong_roots_parity();
 138     // Zero the claimed high water mark in the StringTable
 139     StringTable::clear_parallel_claimed_index();
 140   }
 141 }
 142 
 143 SharedHeap::StrongRootsScope::~StrongRootsScope() {
 144   // nothing particular
 145 }
 146 
 147 void SharedHeap::process_strong_roots(bool activate_scope,
 148                                       bool collecting_perm_gen,
 149                                       ScanningOption so,
 150                                       OopClosure* roots,
 151                                       CodeBlobClosure* code_roots,
 152                                       OopsInGenClosure* perm_blk,
 153                                       bool manages_code_roots) {
 154   StrongRootsScope srs(this, activate_scope);
 155   // General strong roots.
 156   assert(_strong_roots_parity != 0, "must have called prologue code");
 157   // _n_termination for _process_strong_tasks should be set up stream
 158   // in a method not running in a GC worker.  Otherwise the GC worker
 159   // could be trying to change the termination condition while the task
 160   // is executing in another GC worker.
 161   if (!_process_strong_tasks->is_task_claimed(SH_PS_Universe_oops_do)) {
 162     Universe::oops_do(roots);
 163     // Consider perm-gen discovered lists to be strong.
 164     perm_gen()->ref_processor()->weak_oops_do(roots);
 165   }
 166   // Global (strong) JNI handles
 167   if (!_process_strong_tasks->is_task_claimed(SH_PS_JNIHandles_oops_do))
 168     JNIHandles::oops_do(roots);
 169 
 170   // All threads execute this; the individual threads are task groups.
 171   if (CollectedHeap::use_parallel_gc_threads()) {
 172     Threads::possibly_parallel_oops_do(roots, code_roots);
 173   } else {
 174     Threads::oops_do(roots, code_roots);
 175   }
 176 
 177   if (!_process_strong_tasks-> is_task_claimed(SH_PS_ObjectSynchronizer_oops_do))
 178     ObjectSynchronizer::oops_do(roots);
 179   if (!_process_strong_tasks->is_task_claimed(SH_PS_FlatProfiler_oops_do))
 180     FlatProfiler::oops_do(roots);
 181   if (!_process_strong_tasks->is_task_claimed(SH_PS_Management_oops_do))
 182     Management::oops_do(roots);
 183   if (!_process_strong_tasks->is_task_claimed(SH_PS_jvmti_oops_do))
 184     JvmtiExport::oops_do(roots);
 185 
 186   if (!_process_strong_tasks->is_task_claimed(SH_PS_SystemDictionary_oops_do)) {
 187     if (so & SO_AllClasses) {
 188       SystemDictionary::oops_do(roots);
 189     } else if (so & SO_SystemClasses) {
 190       SystemDictionary::always_strong_oops_do(roots);
 191     }
 192   }
 193 
 194   // All threads execute the following. A specific chunk of buckets
 195   // from the StringTable are the individual tasks.
 196   if (so & SO_Strings || (!collecting_perm_gen && !JavaObjectsInPerm)) {
 197     if (CollectedHeap::use_parallel_gc_threads()) {
 198       StringTable::possibly_parallel_oops_do(roots);
 199     } else {
 200       StringTable::oops_do(roots);
 201     }
 202   }
 203   if (JavaObjectsInPerm) {
 204     // Verify the string table contents are in the perm gen
 205     if (CollectedHeap::use_parallel_gc_threads()) {
 206       NOT_PRODUCT(StringTable::possibly_parallel_oops_do(&assert_is_perm_closure));
 207     } else {
 208       NOT_PRODUCT(StringTable::oops_do(&assert_is_perm_closure));
 209     }
 210   }
 211 
 212   if (!_process_strong_tasks->is_task_claimed(SH_PS_CodeCache_oops_do)) {
 213     if (so & SO_CodeCache) {
 214       // (Currently, CMSCollector uses this to do intermediate-strength collections.)
 215       assert(collecting_perm_gen, "scanning all of code cache");
 216       assert(code_roots != NULL, "must supply closure for code cache");
 217       if (code_roots != NULL) {
 218         CodeCache::blobs_do(code_roots);
 219       }
 220     } else if (so & (SO_SystemClasses|SO_AllClasses)) {
 221       if (!manages_code_roots && !collecting_perm_gen) {
 222         // If we are collecting from class statics, but we are not going to
 223         // visit all of the CodeCache, collect from the non-perm roots if any.
 224         // This makes the code cache function temporarily as a source of strong
 225         // roots for oops, until the next major collection.
 226         //
 227         // If collecting_perm_gen is true, we require that this phase will call
 228         // CodeCache::do_unloading.  This will kill off nmethods with expired
 229         // weak references, such as stale invokedynamic targets.
 230         CodeCache::scavenge_root_nmethods_do(code_roots);
 231       }
 232     }
 233     // Verify that the code cache contents are not subject to
 234     // movement by a scavenging collection.
 235     DEBUG_ONLY(CodeBlobToOopClosure assert_code_is_non_scavengable(&assert_is_non_scavengable_closure, /*do_marking=*/ false));
 236     DEBUG_ONLY(CodeCache::asserted_non_scavengable_nmethods_do(&assert_code_is_non_scavengable));
 237   }
 238 
 239   if (!collecting_perm_gen) {
 240     // All threads perform this; coordination is handled internally.
 241 
 242     rem_set()->younger_refs_iterate(perm_gen(), perm_blk);
 243   }
 244   _process_strong_tasks->all_tasks_completed();
 245 }
 246 
 247 class AlwaysTrueClosure: public BoolObjectClosure {
 248 public:
 249   void do_object(oop p) { ShouldNotReachHere(); }
 250   bool do_object_b(oop p) { return true; }
 251 };
 252 static AlwaysTrueClosure always_true;
 253 
 254 class SkipAdjustingSharedStrings: public OopClosure {
 255   OopClosure* _clo;
 256 public:
 257   SkipAdjustingSharedStrings(OopClosure* clo) : _clo(clo) {}
 258 
 259   virtual void do_oop(oop* p) {
 260     oop o = (*p);
 261     if (!o->is_shared_readwrite()) {
 262       _clo->do_oop(p);
 263     }
 264   }
 265   virtual void do_oop(narrowOop* p) { ShouldNotReachHere(); }
 266 };
 267 
 268 // Unmarked shared Strings in the StringTable (which got there due to
 269 // being in the constant pools of as-yet unloaded shared classes) were
 270 // not marked and therefore did not have their mark words preserved.
 271 // These entries are also deliberately not purged from the string
 272 // table during unloading of unmarked strings. If an identity hash
 273 // code was computed for any of these objects, it will not have been
 274 // cleared to zero during the forwarding process or by the
 275 // RecursiveAdjustSharedObjectClosure, and will be confused by the
 276 // adjusting process as a forwarding pointer. We need to skip
 277 // forwarding StringTable entries which contain unmarked shared
 278 // Strings. Actually, since shared strings won't be moving, we can
 279 // just skip adjusting any shared entries in the string table.
 280 
 281 void SharedHeap::process_weak_roots(OopClosure* root_closure,
 282                                     CodeBlobClosure* code_roots,
 283                                     OopClosure* non_root_closure) {
 284   // Global (weak) JNI handles
 285   JNIHandles::weak_oops_do(&always_true, root_closure);
 286 
 287   CodeCache::blobs_do(code_roots);
 288   if (UseSharedSpaces && !DumpSharedSpaces) {
 289     SkipAdjustingSharedStrings skip_closure(root_closure);
 290     StringTable::oops_do(&skip_closure);
 291   } else {
 292     StringTable::oops_do(root_closure);
 293   }
 294 }
 295 
 296 void SharedHeap::set_barrier_set(BarrierSet* bs) {
 297   _barrier_set = bs;
 298   // Cached barrier set for fast access in oops
 299   oopDesc::set_bs(bs);
 300 }
 301 
 302 void SharedHeap::post_initialize() {
 303   ref_processing_init();
 304 }
 305 
 306 void SharedHeap::ref_processing_init() {
 307   perm_gen()->ref_processor_init();
 308 }
 309 
 310 // Some utilities.
 311 void SharedHeap::print_size_transition(outputStream* out,
 312                                        size_t bytes_before,
 313                                        size_t bytes_after,
 314                                        size_t capacity) {
 315   out->print(" %d%s->%d%s(%d%s)",
 316              byte_size_in_proper_unit(bytes_before),
 317              proper_unit_for_byte_size(bytes_before),
 318              byte_size_in_proper_unit(bytes_after),
 319              proper_unit_for_byte_size(bytes_after),
 320              byte_size_in_proper_unit(capacity),
 321              proper_unit_for_byte_size(capacity));
 322 }