< prev index next >

src/hotspot/share/gc/g1/g1GCPhaseTimes.cpp

Print this page
rev 56667 : imported patch 8230706-kim-strong-weak-processing
rev 56669 : imported patch 8228609-used-vs-copied-bytes
rev 56672 : imported patch 8232779-collection-parallel-time-no-optional-evac
rev 56673 : imported patch 8227739-merge-scan-rs-update-rs-cost


  55   _gc_par_phases[ThreadRoots] = new WorkerDataArray<double>(max_gc_threads, "Thread Roots (ms):");
  56   _gc_par_phases[UniverseRoots] = new WorkerDataArray<double>(max_gc_threads, "Universe Roots (ms):");
  57   _gc_par_phases[JNIRoots] = new WorkerDataArray<double>(max_gc_threads, "JNI Handles Roots (ms):");
  58   _gc_par_phases[ObjectSynchronizerRoots] = new WorkerDataArray<double>(max_gc_threads, "ObjectSynchronizer Roots (ms):");
  59   _gc_par_phases[ManagementRoots] = new WorkerDataArray<double>(max_gc_threads, "Management Roots (ms):");
  60   _gc_par_phases[SystemDictionaryRoots] = new WorkerDataArray<double>(max_gc_threads, "SystemDictionary Roots (ms):");
  61   _gc_par_phases[CLDGRoots] = new WorkerDataArray<double>(max_gc_threads, "CLDG Roots (ms):");
  62   _gc_par_phases[JVMTIRoots] = new WorkerDataArray<double>(max_gc_threads, "JVMTI Roots (ms):");
  63   AOT_ONLY(_gc_par_phases[AOTCodeRoots] = new WorkerDataArray<double>(max_gc_threads, "AOT Root Scan (ms):");)
  64   _gc_par_phases[CMRefRoots] = new WorkerDataArray<double>(max_gc_threads, "CM RefProcessor Roots (ms):");
  65 
  66   _gc_par_phases[MergeER] = new WorkerDataArray<double>(max_gc_threads, "Eager Reclaim (ms):");
  67 
  68   _gc_par_phases[MergeRS] = new WorkerDataArray<double>(max_gc_threads, "Remembered Sets (ms):");
  69   _merge_rs_merged_sparse = new WorkerDataArray<size_t>(max_gc_threads, "Merged Sparse:");
  70   _gc_par_phases[MergeRS]->link_thread_work_items(_merge_rs_merged_sparse, MergeRSMergedSparse);
  71   _merge_rs_merged_fine = new WorkerDataArray<size_t>(max_gc_threads, "Merged Fine:");
  72   _gc_par_phases[MergeRS]->link_thread_work_items(_merge_rs_merged_fine, MergeRSMergedFine);
  73   _merge_rs_merged_coarse = new WorkerDataArray<size_t>(max_gc_threads, "Merged Coarse:");
  74   _gc_par_phases[MergeRS]->link_thread_work_items(_merge_rs_merged_coarse, MergeRSMergedCoarse);


  75 
  76   _gc_par_phases[OptMergeRS] = new WorkerDataArray<double>(max_gc_threads, "Optional Remembered Sets (ms):");
  77   _opt_merge_rs_merged_sparse = new WorkerDataArray<size_t>(max_gc_threads, "Merged Sparse:");
  78   _gc_par_phases[OptMergeRS]->link_thread_work_items(_opt_merge_rs_merged_sparse, MergeRSMergedSparse);
  79   _opt_merge_rs_merged_fine = new WorkerDataArray<size_t>(max_gc_threads, "Merged Fine:");
  80   _gc_par_phases[OptMergeRS]->link_thread_work_items(_opt_merge_rs_merged_fine, MergeRSMergedFine);
  81   _opt_merge_rs_merged_coarse = new WorkerDataArray<size_t>(max_gc_threads, "Merged Coarse:");
  82   _gc_par_phases[OptMergeRS]->link_thread_work_items(_opt_merge_rs_merged_coarse, MergeRSMergedCoarse);


  83 
  84   _gc_par_phases[MergeLB] = new WorkerDataArray<double>(max_gc_threads, "Log Buffers (ms):");
  85   if (G1HotCardCache::default_use_cache()) {
  86     _gc_par_phases[MergeHCC] = new WorkerDataArray<double>(max_gc_threads, "Hot Card Cache (ms):");
  87     _merge_hcc_dirty_cards = new WorkerDataArray<size_t>(max_gc_threads, "Dirty Cards:");
  88     _gc_par_phases[MergeHCC]->link_thread_work_items(_merge_hcc_dirty_cards, MergeHCCDirtyCards);
  89     _merge_hcc_skipped_cards = new WorkerDataArray<size_t>(max_gc_threads, "Skipped Cards:");
  90     _gc_par_phases[MergeHCC]->link_thread_work_items(_merge_hcc_skipped_cards, MergeHCCSkippedCards);
  91   } else {
  92     _gc_par_phases[MergeHCC] = NULL;
  93     _merge_hcc_dirty_cards = NULL;
  94     _merge_hcc_skipped_cards = NULL;
  95   }
  96   _gc_par_phases[ScanHR] = new WorkerDataArray<double>(max_gc_threads, "Scan Heap Roots (ms):");
  97   _gc_par_phases[OptScanHR] = new WorkerDataArray<double>(max_gc_threads, "Optional Scan Heap Roots (ms):");
  98   _gc_par_phases[CodeRoots] = new WorkerDataArray<double>(max_gc_threads, "Code Root Scan (ms):");
  99   _gc_par_phases[OptCodeRoots] = new WorkerDataArray<double>(max_gc_threads, "Optional Code Root Scan (ms):");
 100   _gc_par_phases[ObjCopy] = new WorkerDataArray<double>(max_gc_threads, "Object Copy (ms):");
 101   _gc_par_phases[OptObjCopy] = new WorkerDataArray<double>(max_gc_threads, "Optional Object Copy (ms):");
 102   _gc_par_phases[Termination] = new WorkerDataArray<double>(max_gc_threads, "Termination (ms):");


 293 }
 294 
 295 double G1GCPhaseTimes::get_time_secs(GCParPhases phase, uint worker_id) {
 296   return _gc_par_phases[phase]->get(worker_id);
 297 }
 298 
 299 void G1GCPhaseTimes::record_thread_work_item(GCParPhases phase, uint worker_id, size_t count, uint index) {
 300   _gc_par_phases[phase]->set_thread_work_item(worker_id, count, index);
 301 }
 302 
 303 void G1GCPhaseTimes::record_or_add_thread_work_item(GCParPhases phase, uint worker_id, size_t count, uint index) {
 304   _gc_par_phases[phase]->set_or_add_thread_work_item(worker_id, count, index);
 305 }
 306 
 307 size_t G1GCPhaseTimes::get_thread_work_item(GCParPhases phase, uint worker_id, uint index) {
 308   return _gc_par_phases[phase]->get_thread_work_item(worker_id, index);
 309 }
 310 
 311 // return the average time for a phase in milliseconds
 312 double G1GCPhaseTimes::average_time_ms(GCParPhases phase) {



 313   return _gc_par_phases[phase]->average() * 1000.0;
 314 }
 315 
 316 size_t G1GCPhaseTimes::sum_thread_work_items(GCParPhases phase, uint index) {



 317   assert(_gc_par_phases[phase]->thread_work_items(index) != NULL, "No sub count");
 318   return _gc_par_phases[phase]->thread_work_items(index)->sum();
 319 }
 320 
 321 template <class T>
 322 void G1GCPhaseTimes::details(T* phase, const char* indent) const {
 323   LogTarget(Trace, gc, phases, task) lt;
 324   if (lt.is_enabled()) {
 325     LogStream ls(lt);
 326     ls.print("%s", indent);
 327     phase->print_details_on(&ls);
 328   }
 329 }
 330 
 331 void G1GCPhaseTimes::log_phase(WorkerDataArray<double>* phase, uint indent, outputStream* out, bool print_sum) const {
 332   out->print("%s", Indents[indent]);
 333   phase->print_summary_on(out, print_sum);
 334   details(phase, Indents[indent]);
 335 
 336   for (uint i = 0; i < phase->MaxThreadWorkItems; i++) {




  55   _gc_par_phases[ThreadRoots] = new WorkerDataArray<double>(max_gc_threads, "Thread Roots (ms):");
  56   _gc_par_phases[UniverseRoots] = new WorkerDataArray<double>(max_gc_threads, "Universe Roots (ms):");
  57   _gc_par_phases[JNIRoots] = new WorkerDataArray<double>(max_gc_threads, "JNI Handles Roots (ms):");
  58   _gc_par_phases[ObjectSynchronizerRoots] = new WorkerDataArray<double>(max_gc_threads, "ObjectSynchronizer Roots (ms):");
  59   _gc_par_phases[ManagementRoots] = new WorkerDataArray<double>(max_gc_threads, "Management Roots (ms):");
  60   _gc_par_phases[SystemDictionaryRoots] = new WorkerDataArray<double>(max_gc_threads, "SystemDictionary Roots (ms):");
  61   _gc_par_phases[CLDGRoots] = new WorkerDataArray<double>(max_gc_threads, "CLDG Roots (ms):");
  62   _gc_par_phases[JVMTIRoots] = new WorkerDataArray<double>(max_gc_threads, "JVMTI Roots (ms):");
  63   AOT_ONLY(_gc_par_phases[AOTCodeRoots] = new WorkerDataArray<double>(max_gc_threads, "AOT Root Scan (ms):");)
  64   _gc_par_phases[CMRefRoots] = new WorkerDataArray<double>(max_gc_threads, "CM RefProcessor Roots (ms):");
  65 
  66   _gc_par_phases[MergeER] = new WorkerDataArray<double>(max_gc_threads, "Eager Reclaim (ms):");
  67 
  68   _gc_par_phases[MergeRS] = new WorkerDataArray<double>(max_gc_threads, "Remembered Sets (ms):");
  69   _merge_rs_merged_sparse = new WorkerDataArray<size_t>(max_gc_threads, "Merged Sparse:");
  70   _gc_par_phases[MergeRS]->link_thread_work_items(_merge_rs_merged_sparse, MergeRSMergedSparse);
  71   _merge_rs_merged_fine = new WorkerDataArray<size_t>(max_gc_threads, "Merged Fine:");
  72   _gc_par_phases[MergeRS]->link_thread_work_items(_merge_rs_merged_fine, MergeRSMergedFine);
  73   _merge_rs_merged_coarse = new WorkerDataArray<size_t>(max_gc_threads, "Merged Coarse:");
  74   _gc_par_phases[MergeRS]->link_thread_work_items(_merge_rs_merged_coarse, MergeRSMergedCoarse);
  75   _merge_rs_dirty_cards = new WorkerDataArray<size_t>(max_gc_threads, "Dirty Cards:");
  76   _gc_par_phases[MergeRS]->link_thread_work_items(_merge_rs_dirty_cards, MergeRSDirtyCards);
  77 
  78   _gc_par_phases[OptMergeRS] = new WorkerDataArray<double>(max_gc_threads, "Optional Remembered Sets (ms):");
  79   _opt_merge_rs_merged_sparse = new WorkerDataArray<size_t>(max_gc_threads, "Merged Sparse:");
  80   _gc_par_phases[OptMergeRS]->link_thread_work_items(_opt_merge_rs_merged_sparse, MergeRSMergedSparse);
  81   _opt_merge_rs_merged_fine = new WorkerDataArray<size_t>(max_gc_threads, "Merged Fine:");
  82   _gc_par_phases[OptMergeRS]->link_thread_work_items(_opt_merge_rs_merged_fine, MergeRSMergedFine);
  83   _opt_merge_rs_merged_coarse = new WorkerDataArray<size_t>(max_gc_threads, "Merged Coarse:");
  84   _gc_par_phases[OptMergeRS]->link_thread_work_items(_opt_merge_rs_merged_coarse, MergeRSMergedCoarse);
  85   _opt_merge_rs_dirty_cards = new WorkerDataArray<size_t>(max_gc_threads, "Dirty Cards:");
  86   _gc_par_phases[OptMergeRS]->link_thread_work_items(_opt_merge_rs_dirty_cards, MergeRSDirtyCards);
  87 
  88   _gc_par_phases[MergeLB] = new WorkerDataArray<double>(max_gc_threads, "Log Buffers (ms):");
  89   if (G1HotCardCache::default_use_cache()) {
  90     _gc_par_phases[MergeHCC] = new WorkerDataArray<double>(max_gc_threads, "Hot Card Cache (ms):");
  91     _merge_hcc_dirty_cards = new WorkerDataArray<size_t>(max_gc_threads, "Dirty Cards:");
  92     _gc_par_phases[MergeHCC]->link_thread_work_items(_merge_hcc_dirty_cards, MergeHCCDirtyCards);
  93     _merge_hcc_skipped_cards = new WorkerDataArray<size_t>(max_gc_threads, "Skipped Cards:");
  94     _gc_par_phases[MergeHCC]->link_thread_work_items(_merge_hcc_skipped_cards, MergeHCCSkippedCards);
  95   } else {
  96     _gc_par_phases[MergeHCC] = NULL;
  97     _merge_hcc_dirty_cards = NULL;
  98     _merge_hcc_skipped_cards = NULL;
  99   }
 100   _gc_par_phases[ScanHR] = new WorkerDataArray<double>(max_gc_threads, "Scan Heap Roots (ms):");
 101   _gc_par_phases[OptScanHR] = new WorkerDataArray<double>(max_gc_threads, "Optional Scan Heap Roots (ms):");
 102   _gc_par_phases[CodeRoots] = new WorkerDataArray<double>(max_gc_threads, "Code Root Scan (ms):");
 103   _gc_par_phases[OptCodeRoots] = new WorkerDataArray<double>(max_gc_threads, "Optional Code Root Scan (ms):");
 104   _gc_par_phases[ObjCopy] = new WorkerDataArray<double>(max_gc_threads, "Object Copy (ms):");
 105   _gc_par_phases[OptObjCopy] = new WorkerDataArray<double>(max_gc_threads, "Optional Object Copy (ms):");
 106   _gc_par_phases[Termination] = new WorkerDataArray<double>(max_gc_threads, "Termination (ms):");


 297 }
 298 
 299 double G1GCPhaseTimes::get_time_secs(GCParPhases phase, uint worker_id) {
 300   return _gc_par_phases[phase]->get(worker_id);
 301 }
 302 
 303 void G1GCPhaseTimes::record_thread_work_item(GCParPhases phase, uint worker_id, size_t count, uint index) {
 304   _gc_par_phases[phase]->set_thread_work_item(worker_id, count, index);
 305 }
 306 
 307 void G1GCPhaseTimes::record_or_add_thread_work_item(GCParPhases phase, uint worker_id, size_t count, uint index) {
 308   _gc_par_phases[phase]->set_or_add_thread_work_item(worker_id, count, index);
 309 }
 310 
 311 size_t G1GCPhaseTimes::get_thread_work_item(GCParPhases phase, uint worker_id, uint index) {
 312   return _gc_par_phases[phase]->get_thread_work_item(worker_id, index);
 313 }
 314 
 315 // return the average time for a phase in milliseconds
 316 double G1GCPhaseTimes::average_time_ms(GCParPhases phase) {
 317   if (_gc_par_phases[phase] == NULL) {
 318     return 0.0;
 319   }
 320   return _gc_par_phases[phase]->average() * 1000.0;
 321 }
 322 
 323 size_t G1GCPhaseTimes::sum_thread_work_items(GCParPhases phase, uint index) {
 324   if (_gc_par_phases[phase] == NULL) {
 325     return 0;
 326   }
 327   assert(_gc_par_phases[phase]->thread_work_items(index) != NULL, "No sub count");
 328   return _gc_par_phases[phase]->thread_work_items(index)->sum();
 329 }
 330 
 331 template <class T>
 332 void G1GCPhaseTimes::details(T* phase, const char* indent) const {
 333   LogTarget(Trace, gc, phases, task) lt;
 334   if (lt.is_enabled()) {
 335     LogStream ls(lt);
 336     ls.print("%s", indent);
 337     phase->print_details_on(&ls);
 338   }
 339 }
 340 
 341 void G1GCPhaseTimes::log_phase(WorkerDataArray<double>* phase, uint indent, outputStream* out, bool print_sum) const {
 342   out->print("%s", Indents[indent]);
 343   phase->print_summary_on(out, print_sum);
 344   details(phase, Indents[indent]);
 345 
 346   for (uint i = 0; i < phase->MaxThreadWorkItems; i++) {


< prev index next >