< prev index next >

src/share/vm/memory/sharedHeap.cpp

Print this page
rev 7854 : imported patch 8027962-per-phase-timing-measurements-for-strong-roots-processing


  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "classfile/stringTable.hpp"
  27 #include "classfile/systemDictionary.hpp"
  28 #include "code/codeCache.hpp"
  29 #include "gc_interface/collectedHeap.inline.hpp"
  30 #include "memory/sharedHeap.hpp"
  31 #include "oops/oop.inline.hpp"
  32 #include "runtime/atomic.inline.hpp"
  33 #include "runtime/fprofiler.hpp"
  34 #include "runtime/java.hpp"
  35 #include "services/management.hpp"
  36 #include "utilities/copy.hpp"
  37 #include "utilities/workgroup.hpp"
  38 
  39 PRAGMA_FORMAT_MUTE_WARNINGS_FOR_GCC
  40 
  41 SharedHeap* SharedHeap::_sh;
  42 
  43 // The set of potentially parallel tasks in root scanning.
  44 enum SH_process_roots_tasks {
  45   SH_PS_Universe_oops_do,
  46   SH_PS_JNIHandles_oops_do,
  47   SH_PS_ObjectSynchronizer_oops_do,
  48   SH_PS_FlatProfiler_oops_do,
  49   SH_PS_Management_oops_do,
  50   SH_PS_SystemDictionary_oops_do,
  51   SH_PS_ClassLoaderDataGraph_oops_do,
  52   SH_PS_jvmti_oops_do,
  53   SH_PS_CodeCache_oops_do,
  54   // Leave this one last.
  55   SH_PS_NumElements
  56 };
  57 







  58 SharedHeap::SharedHeap(CollectorPolicy* policy_) :
  59   CollectedHeap(),
  60   _collector_policy(policy_),
  61   _strong_roots_scope(NULL),
  62   _strong_roots_parity(0),
  63   _process_strong_tasks(new SubTasksDone(SH_PS_NumElements)),
  64   _workers(NULL)
  65 {
  66   if (_process_strong_tasks == NULL || !_process_strong_tasks->valid()) {
  67     vm_exit_during_initialization("Failed necessary allocation.");
  68   }
  69   _sh = this;  // ch is static, should be set only once.
  70   if (UseConcMarkSweepGC || UseG1GC) {
  71     _workers = new FlexibleWorkGang("Parallel GC Threads", ParallelGCThreads,
  72                             /* are_GC_task_threads */true,
  73                             /* are_ConcurrentGC_threads */false);
  74     if (_workers == NULL) {
  75       vm_exit_during_initialization("Failed necessary allocation.");
  76     } else {
  77       _workers->initialize_workers();


 168 
 169 void SharedHeap::StrongRootsScope::wait_until_all_workers_done_with_threads(uint n_workers) {
 170   assert(UseG1GC,                          "Currently only used by G1");
 171   assert(ClassUnloadingWithConcurrentMark, "Currently only needed when doing G1 Class Unloading");
 172 
 173   // No need to use the barrier if this is single-threaded code.
 174   if (n_workers > 0 && (uint)_n_workers_done_with_threads != n_workers) {
 175     MonitorLockerEx ml(_lock, Mutex::_no_safepoint_check_flag);
 176     while ((uint)_n_workers_done_with_threads != n_workers) {
 177       _lock->wait(Mutex::_no_safepoint_check_flag, 0, false);
 178     }
 179   }
 180 }
 181 
 182 void SharedHeap::process_roots(bool activate_scope,
 183                                ScanningOption so,
 184                                OopClosure* strong_roots,
 185                                OopClosure* weak_roots,
 186                                CLDClosure* strong_cld_closure,
 187                                CLDClosure* weak_cld_closure,
 188                                CodeBlobClosure* code_roots) {

 189   StrongRootsScope srs(this, activate_scope);
 190 
 191   // General roots.
 192   assert(_strong_roots_parity != 0, "must have called prologue code");
 193   assert(code_roots != NULL, "code root closure should always be set");
 194   // _n_termination for _process_strong_tasks should be set up stream
 195   // in a method not running in a GC worker.  Otherwise the GC worker
 196   // could be trying to change the termination condition while the task
 197   // is executing in another GC worker.
 198 
 199   // Iterating over the CLDG and the Threads are done early to allow G1 to
 200   // first process the strong CLDs and nmethods and then, after a barrier,
 201   // let the thread process the weak CLDs and nmethods.
 202 


 203   if (!_process_strong_tasks->is_task_claimed(SH_PS_ClassLoaderDataGraph_oops_do)) {
 204     ClassLoaderDataGraph::roots_cld_do(strong_cld_closure, weak_cld_closure);
 205   }

 206 
 207   // Some CLDs contained in the thread frames should be considered strong.
 208   // Don't process them if they will be processed during the ClassLoaderDataGraph phase.
 209   CLDClosure* roots_from_clds_p = (strong_cld_closure != weak_cld_closure) ? strong_cld_closure : NULL;
 210   // Only process code roots from thread stacks if we aren't visiting the entire CodeCache anyway
 211   CodeBlobClosure* roots_from_code_p = (so & SO_AllCodeCache) ? NULL : code_roots;
 212 


 213   Threads::possibly_parallel_oops_do(strong_roots, roots_from_clds_p, roots_from_code_p);

 214 
 215   // This is the point where this worker thread will not find more strong CLDs/nmethods.
 216   // Report this so G1 can synchronize the strong and weak CLDs/nmethods processing.
 217   active_strong_roots_scope()->mark_worker_done_with_threads(n_par_threads());
 218 


 219   if (!_process_strong_tasks->is_task_claimed(SH_PS_Universe_oops_do)) {
 220     Universe::oops_do(strong_roots);
 221   }



 222   // Global (strong) JNI handles
 223   if (!_process_strong_tasks->is_task_claimed(SH_PS_JNIHandles_oops_do))
 224     JNIHandles::oops_do(strong_roots);

 225 


 226   if (!_process_strong_tasks-> is_task_claimed(SH_PS_ObjectSynchronizer_oops_do))
 227     ObjectSynchronizer::oops_do(strong_roots);



 228   if (!_process_strong_tasks->is_task_claimed(SH_PS_FlatProfiler_oops_do))
 229     FlatProfiler::oops_do(strong_roots);



 230   if (!_process_strong_tasks->is_task_claimed(SH_PS_Management_oops_do))
 231     Management::oops_do(strong_roots);



 232   if (!_process_strong_tasks->is_task_claimed(SH_PS_jvmti_oops_do))
 233     JvmtiExport::oops_do(strong_roots);
 234 


 235   if (!_process_strong_tasks->is_task_claimed(SH_PS_SystemDictionary_oops_do)) {
 236     SystemDictionary::roots_oops_do(strong_roots, weak_roots);
 237   }

 238 


 239   // All threads execute the following. A specific chunk of buckets
 240   // from the StringTable are the individual tasks.
 241   if (weak_roots != NULL) {
 242     if (CollectedHeap::use_parallel_gc_threads()) {
 243       StringTable::possibly_parallel_oops_do(weak_roots);
 244     } else {
 245       StringTable::oops_do(weak_roots);
 246     }
 247   }

 248 


 249   if (!_process_strong_tasks->is_task_claimed(SH_PS_CodeCache_oops_do)) {
 250     if (so & SO_ScavengeCodeCache) {
 251       assert(code_roots != NULL, "must supply closure for code cache");
 252 
 253       // We only visit parts of the CodeCache when scavenging.
 254       CodeCache::scavenge_root_nmethods_do(code_roots);
 255     }
 256     if (so & SO_AllCodeCache) {
 257       assert(code_roots != NULL, "must supply closure for code cache");
 258 
 259       // CMSCollector uses this to do intermediate-strength collections.
 260       // We scan the entire code cache, since CodeCache::do_unloading is not called.
 261       CodeCache::blobs_do(code_roots);
 262     }
 263     // Verify that the code cache contents are not subject to
 264     // movement by a scavenging collection.
 265     DEBUG_ONLY(CodeBlobToOopClosure assert_code_is_non_scavengable(&assert_is_non_scavengable_closure, !CodeBlobToOopClosure::FixRelocations));
 266     DEBUG_ONLY(CodeCache::asserted_non_scavengable_nmethods_do(&assert_code_is_non_scavengable));
 267   }

 268 
 269   _process_strong_tasks->all_tasks_completed();
 270 }
 271 
 272 void SharedHeap::process_all_roots(bool activate_scope,
 273                                    ScanningOption so,
 274                                    OopClosure* roots,
 275                                    CLDClosure* cld_closure,
 276                                    CodeBlobClosure* code_closure) {

 277   process_roots(activate_scope, so,
 278                 roots, roots,
 279                 cld_closure, cld_closure,
 280                 code_closure);

 281 }
 282 
 283 void SharedHeap::process_strong_roots(bool activate_scope,
 284                                       ScanningOption so,
 285                                       OopClosure* roots,
 286                                       CLDClosure* cld_closure,
 287                                       CodeBlobClosure* code_closure) {

 288   process_roots(activate_scope, so,
 289                 roots, NULL,
 290                 cld_closure, NULL,
 291                 code_closure);

 292 }
 293 
 294 
 295 class AlwaysTrueClosure: public BoolObjectClosure {
 296 public:
 297   bool do_object_b(oop p) { return true; }
 298 };
 299 static AlwaysTrueClosure always_true;
 300 
 301 void SharedHeap::process_weak_roots(OopClosure* root_closure) {
 302   // Global (weak) JNI handles
 303   JNIHandles::weak_oops_do(&always_true, root_closure);
 304 }
 305 
 306 void SharedHeap::set_barrier_set(BarrierSet* bs) {
 307   _barrier_set = bs;
 308   // Cached barrier set for fast access in oops
 309   oopDesc::set_bs(bs);
 310 }
 311 


  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "classfile/stringTable.hpp"
  27 #include "classfile/systemDictionary.hpp"
  28 #include "code/codeCache.hpp"
  29 #include "gc_interface/collectedHeap.inline.hpp"
  30 #include "memory/sharedHeap.hpp"
  31 #include "oops/oop.inline.hpp"
  32 #include "runtime/atomic.inline.hpp"
  33 #include "runtime/fprofiler.hpp"
  34 #include "runtime/java.hpp"
  35 #include "services/management.hpp"
  36 #include "utilities/copy.hpp"
  37 #include "utilities/workgroup.hpp"
  38 
  39 PRAGMA_FORMAT_MUTE_WARNINGS_FOR_GCC
  40 
  41 static const char* ext_roots_task_strings[SharedHeap::SH_PS_NumElements] = {
  42       "Thread Roots (ms)",
  43       "StringTable Roots (ms)",
  44       "Universe Roots (ms)",
  45       "JNI Handles Roots (ms)",
  46       "ObjectSynchronizer Roots (ms)",
  47       "FlatProfiler Roots (ms)",
  48       "Management Roots (ms)",
  49       "SystemDictionary Roots (ms)",
  50       "CLDG Roots (ms)",
  51       "JVMTI Roots (ms)",
  52       "CodeCache Roots (ms)"



  53 };
  54 
  55 const char* SharedHeap::ext_roots_task_str(uint task) {
  56   vmassert(task < ARRAY_SIZE(ext_roots_task_strings), "must be");
  57   return ext_roots_task_strings[task];
  58 }
  59         
  60 SharedHeap* SharedHeap::_sh;
  61 
  62 SharedHeap::SharedHeap(CollectorPolicy* policy_) :
  63   CollectedHeap(),
  64   _collector_policy(policy_),
  65   _strong_roots_scope(NULL),
  66   _strong_roots_parity(0),
  67   _process_strong_tasks(new SubTasksDone(SH_PS_NumElements)),
  68   _workers(NULL)
  69 {
  70   if (_process_strong_tasks == NULL || !_process_strong_tasks->valid()) {
  71     vm_exit_during_initialization("Failed necessary allocation.");
  72   }
  73   _sh = this;  // ch is static, should be set only once.
  74   if (UseConcMarkSweepGC || UseG1GC) {
  75     _workers = new FlexibleWorkGang("Parallel GC Threads", ParallelGCThreads,
  76                             /* are_GC_task_threads */true,
  77                             /* are_ConcurrentGC_threads */false);
  78     if (_workers == NULL) {
  79       vm_exit_during_initialization("Failed necessary allocation.");
  80     } else {
  81       _workers->initialize_workers();


 172 
 173 void SharedHeap::StrongRootsScope::wait_until_all_workers_done_with_threads(uint n_workers) {
 174   assert(UseG1GC,                          "Currently only used by G1");
 175   assert(ClassUnloadingWithConcurrentMark, "Currently only needed when doing G1 Class Unloading");
 176 
 177   // No need to use the barrier if this is single-threaded code.
 178   if (n_workers > 0 && (uint)_n_workers_done_with_threads != n_workers) {
 179     MonitorLockerEx ml(_lock, Mutex::_no_safepoint_check_flag);
 180     while ((uint)_n_workers_done_with_threads != n_workers) {
 181       _lock->wait(Mutex::_no_safepoint_check_flag, 0, false);
 182     }
 183   }
 184 }
 185 
 186 void SharedHeap::process_roots(bool activate_scope,
 187                                ScanningOption so,
 188                                OopClosure* strong_roots,
 189                                OopClosure* weak_roots,
 190                                CLDClosure* strong_cld_closure,
 191                                CLDClosure* weak_cld_closure,
 192                                CodeBlobClosure* code_roots,
 193                                GCPhaseTimeTracker* phase_durations) {
 194   StrongRootsScope srs(this, activate_scope);
 195 
 196   // General roots.
 197   assert(_strong_roots_parity != 0, "must have called prologue code");
 198   assert(code_roots != NULL, "code root closure should always be set");
 199   // _n_termination for _process_strong_tasks should be set up stream
 200   // in a method not running in a GC worker.  Otherwise the GC worker
 201   // could be trying to change the termination condition while the task
 202   // is executing in another GC worker.
 203 
 204   // Iterating over the CLDG and the Threads are done early to allow G1 to
 205   // first process the strong CLDs and nmethods and then, after a barrier,
 206   // let the thread process the weak CLDs and nmethods.
 207 
 208   {
 209     TrackPhaseTime x(phase_durations, SH_PS_ClassLoaderDataGraph_oops_do);
 210     if (!_process_strong_tasks->is_task_claimed(SH_PS_ClassLoaderDataGraph_oops_do)) {
 211       ClassLoaderDataGraph::roots_cld_do(strong_cld_closure, weak_cld_closure);
 212     }
 213   }
 214 
 215   // Some CLDs contained in the thread frames should be considered strong.
 216   // Don't process them if they will be processed during the ClassLoaderDataGraph phase.
 217   CLDClosure* roots_from_clds_p = (strong_cld_closure != weak_cld_closure) ? strong_cld_closure : NULL;
 218   // Only process code roots from thread stacks if we aren't visiting the entire CodeCache anyway
 219   CodeBlobClosure* roots_from_code_p = (so & SO_AllCodeCache) ? NULL : code_roots;
 220 
 221   {
 222     TrackPhaseTime x(phase_durations, SH_PS_Threads_oops_do);
 223     Threads::possibly_parallel_oops_do(strong_roots, roots_from_clds_p, roots_from_code_p);
 224   }
 225 
 226   // This is the point where this worker thread will not find more strong CLDs/nmethods.
 227   // Report this so G1 can synchronize the strong and weak CLDs/nmethods processing.
 228   active_strong_roots_scope()->mark_worker_done_with_threads(n_par_threads());
 229 
 230   {
 231     TrackPhaseTime x(phase_durations, SH_PS_Universe_oops_do);
 232     if (!_process_strong_tasks->is_task_claimed(SH_PS_Universe_oops_do)) {
 233       Universe::oops_do(strong_roots);
 234     }
 235   }
 236   {
 237     TrackPhaseTime x(phase_durations, SH_PS_JNIHandles_oops_do);
 238     // Global (strong) JNI handles
 239     if (!_process_strong_tasks->is_task_claimed(SH_PS_JNIHandles_oops_do))
 240       JNIHandles::oops_do(strong_roots);
 241   }
 242 
 243   {
 244     TrackPhaseTime x(phase_durations, SH_PS_ObjectSynchronizer_oops_do);
 245     if (!_process_strong_tasks-> is_task_claimed(SH_PS_ObjectSynchronizer_oops_do))
 246       ObjectSynchronizer::oops_do(strong_roots);
 247   }
 248   {
 249     TrackPhaseTime x(phase_durations, SH_PS_FlatProfiler_oops_do);
 250     if (!_process_strong_tasks->is_task_claimed(SH_PS_FlatProfiler_oops_do))
 251       FlatProfiler::oops_do(strong_roots);
 252   }
 253   {
 254     TrackPhaseTime x(phase_durations, SH_PS_Management_oops_do);
 255     if (!_process_strong_tasks->is_task_claimed(SH_PS_Management_oops_do))
 256       Management::oops_do(strong_roots);
 257   }
 258   {
 259     TrackPhaseTime x(phase_durations, SH_PS_jvmti_oops_do);
 260     if (!_process_strong_tasks->is_task_claimed(SH_PS_jvmti_oops_do))
 261       JvmtiExport::oops_do(strong_roots);
 262   }
 263   {
 264     TrackPhaseTime x(phase_durations, SH_PS_SystemDictionary_oops_do);
 265     if (!_process_strong_tasks->is_task_claimed(SH_PS_SystemDictionary_oops_do)) {
 266       SystemDictionary::roots_oops_do(strong_roots, weak_roots);
 267     }
 268   }
 269 
 270   {
 271    TrackPhaseTime x(phase_durations, SH_PS_StringTable_oops_do);
 272     // All threads execute the following. A specific chunk of buckets
 273     // from the StringTable are the individual tasks.
 274     if (weak_roots != NULL) {
 275       if (CollectedHeap::use_parallel_gc_threads()) {
 276         StringTable::possibly_parallel_oops_do(weak_roots);
 277       } else {
 278         StringTable::oops_do(weak_roots);
 279       }
 280     }
 281   }
 282 
 283   {
 284     TrackPhaseTime x(phase_durations, SH_PS_CodeCache_oops_do);
 285     if (!_process_strong_tasks->is_task_claimed(SH_PS_CodeCache_oops_do)) {
 286       if (so & SO_ScavengeCodeCache) {
 287         assert(code_roots != NULL, "must supply closure for code cache");
 288 
 289         // We only visit parts of the CodeCache when scavenging.
 290         CodeCache::scavenge_root_nmethods_do(code_roots);
 291       }
 292       if (so & SO_AllCodeCache) {
 293         assert(code_roots != NULL, "must supply closure for code cache");
 294 
 295         // CMSCollector uses this to do intermediate-strength collections.
 296         // We scan the entire code cache, since CodeCache::do_unloading is not called.
 297         CodeCache::blobs_do(code_roots);
 298       }
 299       // Verify that the code cache contents are not subject to
 300       // movement by a scavenging collection.
 301       DEBUG_ONLY(CodeBlobToOopClosure assert_code_is_non_scavengable(&assert_is_non_scavengable_closure, !CodeBlobToOopClosure::FixRelocations));
 302       DEBUG_ONLY(CodeCache::asserted_non_scavengable_nmethods_do(&assert_code_is_non_scavengable));
 303     }
 304   }
 305 
 306   _process_strong_tasks->all_tasks_completed();
 307 }
 308 
 309 void SharedHeap::process_all_roots(bool activate_scope,
 310                                    ScanningOption so,
 311                                    OopClosure* roots,
 312                                    CLDClosure* cld_closure,
 313                                    CodeBlobClosure* code_closure,
 314                                    GCPhaseTimeTracker* phase_durations) {
 315   process_roots(activate_scope, so,
 316                 roots, roots,
 317                 cld_closure, cld_closure,
 318                 code_closure,
 319                 phase_durations);
 320 }
 321 
 322 void SharedHeap::process_strong_roots(bool activate_scope,
 323                                       ScanningOption so,
 324                                       OopClosure* roots,
 325                                       CLDClosure* cld_closure,
 326                                       CodeBlobClosure* code_closure,
 327                                       GCPhaseTimeTracker* phase_durations) {
 328   process_roots(activate_scope, so,
 329                 roots, NULL,
 330                 cld_closure, NULL,
 331                 code_closure,
 332                 phase_durations);
 333 }
 334 
 335 
 336 class AlwaysTrueClosure: public BoolObjectClosure {
 337 public:
 338   bool do_object_b(oop p) { return true; }
 339 };
 340 static AlwaysTrueClosure always_true;
 341 
 342 void SharedHeap::process_weak_roots(OopClosure* root_closure) {
 343   // Global (weak) JNI handles
 344   JNIHandles::weak_oops_do(&always_true, root_closure);
 345 }
 346 
 347 void SharedHeap::set_barrier_set(BarrierSet* bs) {
 348   _barrier_set = bs;
 349   // Cached barrier set for fast access in oops
 350   oopDesc::set_bs(bs);
 351 }
 352 
< prev index next >