src/share/vm/memory/sharedHeap.cpp

Print this page




  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "classfile/stringTable.hpp"
  27 #include "classfile/systemDictionary.hpp"
  28 #include "code/codeCache.hpp"
  29 #include "gc_interface/collectedHeap.inline.hpp"
  30 #include "memory/sharedHeap.hpp"
  31 #include "oops/oop.inline.hpp"

  32 #include "runtime/fprofiler.hpp"
  33 #include "runtime/java.hpp"
  34 #include "services/management.hpp"
  35 #include "utilities/copy.hpp"
  36 #include "utilities/workgroup.hpp"
  37 
  38 PRAGMA_FORMAT_MUTE_WARNINGS_FOR_GCC
  39 
  40 SharedHeap* SharedHeap::_sh;
  41 
  42 // The set of potentially parallel tasks in strong root scanning.
  43 enum SH_process_strong_roots_tasks {
  44   SH_PS_Universe_oops_do,
  45   SH_PS_JNIHandles_oops_do,
  46   SH_PS_ObjectSynchronizer_oops_do,
  47   SH_PS_FlatProfiler_oops_do,
  48   SH_PS_Management_oops_do,
  49   SH_PS_SystemDictionary_oops_do,
  50   SH_PS_ClassLoaderDataGraph_oops_do,
  51   SH_PS_jvmti_oops_do,
  52   SH_PS_CodeCache_oops_do,
  53   // Leave this one last.
  54   SH_PS_NumElements
  55 };
  56 
  57 SharedHeap::SharedHeap(CollectorPolicy* policy_) :
  58   CollectedHeap(),
  59   _collector_policy(policy_),
  60   _rem_set(NULL),

  61   _strong_roots_parity(0),
  62   _process_strong_tasks(new SubTasksDone(SH_PS_NumElements)),
  63   _workers(NULL)
  64 {
  65   if (_process_strong_tasks == NULL || !_process_strong_tasks->valid()) {
  66     vm_exit_during_initialization("Failed necessary allocation.");
  67   }
  68   _sh = this;  // ch is static, should be set only once.
  69   if ((UseParNewGC ||
  70       (UseConcMarkSweepGC && (CMSParallelInitialMarkEnabled ||
  71                               CMSParallelRemarkEnabled)) ||
  72        UseG1GC) &&
  73       ParallelGCThreads > 0) {
  74     _workers = new FlexibleWorkGang("Parallel GC Threads", ParallelGCThreads,
  75                             /* are_GC_task_threads */true,
  76                             /* are_ConcurrentGC_threads */false);
  77     if (_workers == NULL) {
  78       vm_exit_during_initialization("Failed necessary allocation.");
  79     } else {
  80       _workers->initialize_workers();


  97              && _thread_holds_heap_lock_for_gc);
  98 }
  99 
 100 void SharedHeap::set_par_threads(uint t) {
 101   assert(t == 0 || !UseSerialGC, "Cannot have parallel threads");
 102   _n_par_threads = t;
 103   _process_strong_tasks->set_n_threads(t);
 104 }
 105 
 106 #ifdef ASSERT
 107 class AssertNonScavengableClosure: public OopClosure {
 108 public:
 109   virtual void do_oop(oop* p) {
 110     assert(!Universe::heap()->is_in_partial_collection(*p),
 111       "Referent should not be scavengable.");  }
 112   virtual void do_oop(narrowOop* p) { ShouldNotReachHere(); }
 113 };
 114 static AssertNonScavengableClosure assert_is_non_scavengable_closure;
 115 #endif
 116 













 117 void SharedHeap::change_strong_roots_parity() {
 118   // Also set the new collection parity.
 119   assert(_strong_roots_parity >= 0 && _strong_roots_parity <= 2,
 120          "Not in range.");
 121   _strong_roots_parity++;
 122   if (_strong_roots_parity == 3) _strong_roots_parity = 1;
 123   assert(_strong_roots_parity >= 1 && _strong_roots_parity <= 2,
 124          "Not in range.");
 125 }
 126 
 127 SharedHeap::StrongRootsScope::StrongRootsScope(SharedHeap* outer, bool activate)
 128   : MarkScope(activate)
 129 {
 130   if (_active) {
 131     outer->change_strong_roots_parity();

 132     // Zero the claimed high water mark in the StringTable
 133     StringTable::clear_parallel_claimed_index();
 134   }
 135 }
 136 
 137 SharedHeap::StrongRootsScope::~StrongRootsScope() {
 138   // nothing particular


 139 }
 140 
 141 void SharedHeap::process_strong_roots(bool activate_scope,

























 142                                       ScanningOption so,
 143                                       OopClosure* roots,
 144                                       KlassClosure* klass_closure) {



 145   StrongRootsScope srs(this, activate_scope);
 146 
 147   // General strong roots.
 148   assert(_strong_roots_parity != 0, "must have called prologue code");

 149   // _n_termination for _process_strong_tasks should be set up stream
 150   // in a method not running in a GC worker.  Otherwise the GC worker
 151   // could be trying to change the termination condition while the task
 152   // is executing in another GC worker.
 153   if (!_process_strong_tasks->is_task_claimed(SH_PS_Universe_oops_do)) {
 154     Universe::oops_do(roots);





 155   }
 156   // Global (strong) JNI handles
 157   if (!_process_strong_tasks->is_task_claimed(SH_PS_JNIHandles_oops_do))
 158     JNIHandles::oops_do(roots);
 159 
 160   CodeBlobToOopClosure code_roots(roots, true);




 161 
 162   CLDToOopClosure roots_from_clds(roots);
 163   // If we limit class scanning to SO_SystemClasses we need to apply a CLD closure to
 164   // CLDs which are strongly reachable from the thread stacks.
 165   CLDToOopClosure* roots_from_clds_p = ((so & SO_SystemClasses) ? &roots_from_clds : NULL);
 166   // All threads execute this; the individual threads are task groups.
 167   if (CollectedHeap::use_parallel_gc_threads()) {
 168     Threads::possibly_parallel_oops_do(roots, roots_from_clds_p, &code_roots);
 169   } else {
 170     Threads::oops_do(roots, roots_from_clds_p, &code_roots);
 171   }



 172 
 173   if (!_process_strong_tasks-> is_task_claimed(SH_PS_ObjectSynchronizer_oops_do))
 174     ObjectSynchronizer::oops_do(roots);
 175   if (!_process_strong_tasks->is_task_claimed(SH_PS_FlatProfiler_oops_do))
 176     FlatProfiler::oops_do(roots);
 177   if (!_process_strong_tasks->is_task_claimed(SH_PS_Management_oops_do))
 178     Management::oops_do(roots);
 179   if (!_process_strong_tasks->is_task_claimed(SH_PS_jvmti_oops_do))
 180     JvmtiExport::oops_do(roots);
 181 
 182   if (!_process_strong_tasks->is_task_claimed(SH_PS_SystemDictionary_oops_do)) {
 183     if (so & SO_AllClasses) {
 184       SystemDictionary::oops_do(roots);
 185     } else if (so & SO_SystemClasses) {
 186       SystemDictionary::always_strong_oops_do(roots);
 187     } else {
 188       fatal("We should always have selected either SO_AllClasses or SO_SystemClasses");
 189     }
 190   }
 191 
 192   if (!_process_strong_tasks->is_task_claimed(SH_PS_ClassLoaderDataGraph_oops_do)) {
 193     if (so & SO_AllClasses) {
 194       ClassLoaderDataGraph::oops_do(roots, klass_closure, /* must_claim */ false);
 195     } else if (so & SO_SystemClasses) {
 196       ClassLoaderDataGraph::always_strong_oops_do(roots, klass_closure, /* must_claim */ true);
 197     }
 198   }
 199 
 200   // All threads execute the following. A specific chunk of buckets
 201   // from the StringTable are the individual tasks.
 202   if (so & SO_Strings) {
 203     if (CollectedHeap::use_parallel_gc_threads()) {
 204       StringTable::possibly_parallel_oops_do(roots);
 205     } else {
 206       StringTable::oops_do(roots);
 207     }
 208   }
 209 
 210   if (!_process_strong_tasks->is_task_claimed(SH_PS_CodeCache_oops_do)) {
 211     if (so & SO_ScavengeCodeCache) {
 212       assert(&code_roots != NULL, "must supply closure for code cache");
 213 
 214       // We only visit parts of the CodeCache when scavenging.
 215       CodeCache::scavenge_root_nmethods_do(&code_roots);
 216     }
 217     if (so & SO_AllCodeCache) {
 218       assert(&code_roots != NULL, "must supply closure for code cache");
 219 
 220       // CMSCollector uses this to do intermediate-strength collections.
 221       // We scan the entire code cache, since CodeCache::do_unloading is not called.
 222       CodeCache::blobs_do(&code_roots);
 223     }
 224     // Verify that the code cache contents are not subject to
 225     // movement by a scavenging collection.
 226     DEBUG_ONLY(CodeBlobToOopClosure assert_code_is_non_scavengable(&assert_is_non_scavengable_closure, /*do_marking=*/ false));
 227     DEBUG_ONLY(CodeCache::asserted_non_scavengable_nmethods_do(&assert_code_is_non_scavengable));
 228   }
 229 
 230   _process_strong_tasks->all_tasks_completed();
 231 }























 232 
 233 class AlwaysTrueClosure: public BoolObjectClosure {
 234 public:
 235   bool do_object_b(oop p) { return true; }
 236 };
 237 static AlwaysTrueClosure always_true;
 238 
 239 void SharedHeap::process_weak_roots(OopClosure* root_closure) {
 240   // Global (weak) JNI handles
 241   JNIHandles::weak_oops_do(&always_true, root_closure);
 242 }
 243 
 244 void SharedHeap::set_barrier_set(BarrierSet* bs) {
 245   _barrier_set = bs;
 246   // Cached barrier set for fast access in oops
 247   oopDesc::set_bs(bs);
 248 }
 249 
 250 void SharedHeap::post_initialize() {
 251   CollectedHeap::post_initialize();


  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "classfile/stringTable.hpp"
  27 #include "classfile/systemDictionary.hpp"
  28 #include "code/codeCache.hpp"
  29 #include "gc_interface/collectedHeap.inline.hpp"
  30 #include "memory/sharedHeap.hpp"
  31 #include "oops/oop.inline.hpp"
  32 #include "runtime/atomic.inline.hpp"
  33 #include "runtime/fprofiler.hpp"
  34 #include "runtime/java.hpp"
  35 #include "services/management.hpp"
  36 #include "utilities/copy.hpp"
  37 #include "utilities/workgroup.hpp"
  38 
  39 PRAGMA_FORMAT_MUTE_WARNINGS_FOR_GCC
  40 
  41 SharedHeap* SharedHeap::_sh;
  42 
  43 // The set of potentially parallel tasks in root scanning.
  44 enum SH_process_roots_tasks {
  45   SH_PS_Universe_oops_do,
  46   SH_PS_JNIHandles_oops_do,
  47   SH_PS_ObjectSynchronizer_oops_do,
  48   SH_PS_FlatProfiler_oops_do,
  49   SH_PS_Management_oops_do,
  50   SH_PS_SystemDictionary_oops_do,
  51   SH_PS_ClassLoaderDataGraph_oops_do,
  52   SH_PS_jvmti_oops_do,
  53   SH_PS_CodeCache_oops_do,
  54   // Leave this one last.
  55   SH_PS_NumElements
  56 };
  57 
  58 SharedHeap::SharedHeap(CollectorPolicy* policy_) :
  59   CollectedHeap(),
  60   _collector_policy(policy_),
  61   _rem_set(NULL),
  62   _strong_roots_scope(NULL),
  63   _strong_roots_parity(0),
  64   _process_strong_tasks(new SubTasksDone(SH_PS_NumElements)),
  65   _workers(NULL)
  66 {
  67   if (_process_strong_tasks == NULL || !_process_strong_tasks->valid()) {
  68     vm_exit_during_initialization("Failed necessary allocation.");
  69   }
  70   _sh = this;  // ch is static, should be set only once.
  71   if ((UseParNewGC ||
  72       (UseConcMarkSweepGC && (CMSParallelInitialMarkEnabled ||
  73                               CMSParallelRemarkEnabled)) ||
  74        UseG1GC) &&
  75       ParallelGCThreads > 0) {
  76     _workers = new FlexibleWorkGang("Parallel GC Threads", ParallelGCThreads,
  77                             /* are_GC_task_threads */true,
  78                             /* are_ConcurrentGC_threads */false);
  79     if (_workers == NULL) {
  80       vm_exit_during_initialization("Failed necessary allocation.");
  81     } else {
  82       _workers->initialize_workers();


  99              && _thread_holds_heap_lock_for_gc);
 100 }
 101 
 102 void SharedHeap::set_par_threads(uint t) {
 103   assert(t == 0 || !UseSerialGC, "Cannot have parallel threads");
 104   _n_par_threads = t;
 105   _process_strong_tasks->set_n_threads(t);
 106 }
 107 
 108 #ifdef ASSERT
 109 class AssertNonScavengableClosure: public OopClosure {
 110 public:
 111   virtual void do_oop(oop* p) {
 112     assert(!Universe::heap()->is_in_partial_collection(*p),
 113       "Referent should not be scavengable.");  }
 114   virtual void do_oop(narrowOop* p) { ShouldNotReachHere(); }
 115 };
 116 static AssertNonScavengableClosure assert_is_non_scavengable_closure;
 117 #endif
 118 
 119 SharedHeap::StrongRootsScope* SharedHeap::active_strong_roots_scope() const {
 120   return _strong_roots_scope;
 121 }
 122 void SharedHeap::register_strong_roots_scope(SharedHeap::StrongRootsScope* scope) {
 123   assert(_strong_roots_scope == NULL, "Should only have one StrongRootsScope active");
 124   assert(scope != NULL, "Illegal argument");
 125   _strong_roots_scope = scope;
 126 }
 127 void SharedHeap::unregister_strong_roots_scope(SharedHeap::StrongRootsScope* scope) {
 128   assert(_strong_roots_scope == scope, "Wrong scope unregistered");
 129   _strong_roots_scope = NULL;
 130 }
 131 
 132 void SharedHeap::change_strong_roots_parity() {
 133   // Also set the new collection parity.
 134   assert(_strong_roots_parity >= 0 && _strong_roots_parity <= 2,
 135          "Not in range.");
 136   _strong_roots_parity++;
 137   if (_strong_roots_parity == 3) _strong_roots_parity = 1;
 138   assert(_strong_roots_parity >= 1 && _strong_roots_parity <= 2,
 139          "Not in range.");
 140 }
 141 
 142 SharedHeap::StrongRootsScope::StrongRootsScope(SharedHeap* heap, bool activate)
 143   : MarkScope(activate), _sh(heap), _n_workers_done_with_threads(0)
 144 {
 145   if (_active) {
 146     _sh->register_strong_roots_scope(this);
 147     _sh->change_strong_roots_parity();
 148     // Zero the claimed high water mark in the StringTable
 149     StringTable::clear_parallel_claimed_index();
 150   }
 151 }
 152 
 153 SharedHeap::StrongRootsScope::~StrongRootsScope() {
 154   if (_active) {
 155     _sh->unregister_strong_roots_scope(this);
 156   }
 157 }
 158 
 159 Monitor* SharedHeap::StrongRootsScope::_lock = new Monitor(Mutex::leaf, "StrongRootsScope lock", false);
 160 
 161 void SharedHeap::StrongRootsScope::mark_worker_done_with_threads(uint n_workers) {
 162   // The Thread work barrier is only needed by G1.
 163   // No need to use the barrier if this is single-threaded code.
 164   if (UseG1GC && n_workers > 0) {
 165     uint new_value = (uint)Atomic::add(1, &_n_workers_done_with_threads);
 166     if (new_value == n_workers) {
 167       // This thread is last. Notify the others.
 168       MonitorLockerEx ml(_lock, Mutex::_no_safepoint_check_flag);
 169       _lock->notify_all();
 170     }
 171   }
 172 }
 173 
 174 void SharedHeap::StrongRootsScope::wait_until_all_workers_done_with_threads(uint n_workers) {
 175   // No need to use the barrier if this is single-threaded code.
 176   if (n_workers > 0 && (uint)_n_workers_done_with_threads != n_workers) {
 177     MonitorLockerEx ml(_lock, Mutex::_no_safepoint_check_flag);
 178     while ((uint)_n_workers_done_with_threads != n_workers) {
 179       _lock->wait(Mutex::_no_safepoint_check_flag, 0, false);
 180     }
 181   }
 182 }
 183 
 184 void SharedHeap::process_roots(bool activate_scope,
 185                                ScanningOption so,
 186                                OopClosure* strong_roots,
 187                                OopClosure* weak_roots,
 188                                CLDClosure* strong_cld_closure,
 189                                CLDClosure* weak_cld_closure,
 190                                CodeBlobClosure* code_roots) {
 191   StrongRootsScope srs(this, activate_scope);
 192 
 193   // General roots.
 194   assert(_strong_roots_parity != 0, "must have called prologue code");
 195   assert(code_roots != NULL, "code root closure should always be set");
 196   // _n_termination for _process_strong_tasks should be set up stream
 197   // in a method not running in a GC worker.  Otherwise the GC worker
 198   // could be trying to change the termination condition while the task
 199   // is executing in another GC worker.
 200 
 201   // Iterating over the CLDG and the Threads are done early to allow G1 to
 202   // first process the strong CLDs and nmethods and then, after a barrier,
 203   // let the thread process the weak CLDs and nmethods.
 204 
 205   if (!_process_strong_tasks->is_task_claimed(SH_PS_ClassLoaderDataGraph_oops_do)) {
 206     ClassLoaderDataGraph::roots_cld_do(strong_cld_closure, weak_cld_closure);
 207   }



 208 
 209   // Some CLDs contained in the thread frames should be considered strong.
 210   // Don't process them if they will be processed during the ClassLoaderDataGraph phase.
 211   CLDClosure* roots_from_clds_p = (strong_cld_closure != weak_cld_closure) ? strong_cld_closure : NULL;
 212   // Only process code roots from thread stacks if we aren't visiting the entire CodeCache anyway
 213   CodeBlobClosure* roots_from_code_p = (so & SO_AllCodeCache) ? NULL : code_roots;
 214 
 215   Threads::possibly_parallel_oops_do(strong_roots, roots_from_clds_p, roots_from_code_p);
 216 
 217   // This is the point where this worker thread will not find more strong CLDs/nmethods.
 218   // Report this so G1 can synchronize the strong and weak CLDs/nmethods processing.
 219   active_strong_roots_scope()->mark_worker_done_with_threads(n_par_threads());
 220 
 221   if (!_process_strong_tasks->is_task_claimed(SH_PS_Universe_oops_do)) {
 222     Universe::oops_do(strong_roots);

 223   }
 224   // Global (strong) JNI handles
 225   if (!_process_strong_tasks->is_task_claimed(SH_PS_JNIHandles_oops_do))
 226     JNIHandles::oops_do(strong_roots);
 227 
 228   if (!_process_strong_tasks-> is_task_claimed(SH_PS_ObjectSynchronizer_oops_do))
 229     ObjectSynchronizer::oops_do(strong_roots);
 230   if (!_process_strong_tasks->is_task_claimed(SH_PS_FlatProfiler_oops_do))
 231     FlatProfiler::oops_do(strong_roots);
 232   if (!_process_strong_tasks->is_task_claimed(SH_PS_Management_oops_do))
 233     Management::oops_do(strong_roots);
 234   if (!_process_strong_tasks->is_task_claimed(SH_PS_jvmti_oops_do))
 235     JvmtiExport::oops_do(strong_roots);
 236 
 237   if (!_process_strong_tasks->is_task_claimed(SH_PS_SystemDictionary_oops_do)) {
 238     SystemDictionary::roots_oops_do(strong_roots, weak_roots);














 239   }
 240 
 241   // All threads execute the following. A specific chunk of buckets
 242   // from the StringTable are the individual tasks.
 243   if (weak_roots != NULL) {
 244     if (CollectedHeap::use_parallel_gc_threads()) {
 245       StringTable::possibly_parallel_oops_do(weak_roots);
 246     } else {
 247       StringTable::oops_do(weak_roots);
 248     }
 249   }
 250 
 251   if (!_process_strong_tasks->is_task_claimed(SH_PS_CodeCache_oops_do)) {
 252     if (so & SO_ScavengeCodeCache) {
 253       assert(code_roots != NULL, "must supply closure for code cache");
 254 
 255       // We only visit parts of the CodeCache when scavenging.
 256       CodeCache::scavenge_root_nmethods_do(code_roots);
 257     }
 258     if (so & SO_AllCodeCache) {
 259       assert(code_roots != NULL, "must supply closure for code cache");
 260 
 261       // CMSCollector uses this to do intermediate-strength collections.
 262       // We scan the entire code cache, since CodeCache::do_unloading is not called.
 263       CodeCache::blobs_do(code_roots);
 264     }
 265     // Verify that the code cache contents are not subject to
 266     // movement by a scavenging collection.
 267     DEBUG_ONLY(CodeBlobToOopClosure assert_code_is_non_scavengable(&assert_is_non_scavengable_closure, !CodeBlobToOopClosure::FixRelocations));
 268     DEBUG_ONLY(CodeCache::asserted_non_scavengable_nmethods_do(&assert_code_is_non_scavengable));
 269   }
 270 
 271   _process_strong_tasks->all_tasks_completed();
 272 }
 273 
 274 void SharedHeap::process_all_roots(bool activate_scope,
 275                                    ScanningOption so,
 276                                    OopClosure* roots,
 277                                    CLDClosure* cld_closure,
 278                                    CodeBlobClosure* code_closure) {
 279   process_roots(activate_scope, so,
 280                 roots, roots,
 281                 cld_closure, cld_closure,
 282                 code_closure);
 283 }
 284 
 285 void SharedHeap::process_strong_roots(bool activate_scope,
 286                                       ScanningOption so,
 287                                       OopClosure* roots,
 288                                       CLDClosure* cld_closure,
 289                                       CodeBlobClosure* code_closure) {
 290   process_roots(activate_scope, so,
 291                 roots, NULL,
 292                 cld_closure, NULL,
 293                 code_closure);
 294 }
 295 
 296 
 297 class AlwaysTrueClosure: public BoolObjectClosure {
 298 public:
 299   bool do_object_b(oop p) { return true; }
 300 };
 301 static AlwaysTrueClosure always_true;
 302 
 303 void SharedHeap::process_weak_roots(OopClosure* root_closure) {
 304   // Global (weak) JNI handles
 305   JNIHandles::weak_oops_do(&always_true, root_closure);
 306 }
 307 
 308 void SharedHeap::set_barrier_set(BarrierSet* bs) {
 309   _barrier_set = bs;
 310   // Cached barrier set for fast access in oops
 311   oopDesc::set_bs(bs);
 312 }
 313 
 314 void SharedHeap::post_initialize() {
 315   CollectedHeap::post_initialize();