< prev index next >

src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp

Print this page
rev 7854 : imported patch 8027962-per-phase-timing-measurements-for-strong-roots-processing


 334   const char* names[] = {"YOUNG", "SURVIVOR"};
 335 
 336   for (uint list = 0; list < ARRAY_SIZE(lists); ++list) {
 337     gclog_or_tty->print_cr("%s LIST CONTENTS", names[list]);
 338     HeapRegion *curr = lists[list];
 339     if (curr == NULL)
 340       gclog_or_tty->print_cr("  empty");
 341     while (curr != NULL) {
 342       gclog_or_tty->print_cr("  "HR_FORMAT", P: "PTR_FORMAT ", N: "PTR_FORMAT", age: %4d",
 343                              HR_FORMAT_PARAMS(curr),
 344                              curr->prev_top_at_mark_start(),
 345                              curr->next_top_at_mark_start(),
 346                              curr->age_in_surv_rate_group_cond());
 347       curr = curr->get_next_young_region();
 348     }
 349   }
 350 
 351   gclog_or_tty->cr();
 352 }
 353 

















 354 void G1RegionMappingChangedListener::reset_from_card_cache(uint start_idx, size_t num_regions) {
 355   HeapRegionRemSet::invalidate_from_card_cache(start_idx, num_regions);
 356 }
 357 
 358 void G1RegionMappingChangedListener::on_commit(uint start_idx, size_t num_regions, bool zero_filled) {
 359   // The from card cache is not the memory that is actually committed. So we cannot
 360   // take advantage of the zero_filled parameter.
 361   reset_from_card_cache(start_idx, num_regions);
 362 }
 363 
 364 void G1CollectedHeap::push_dirty_cards_region(HeapRegion* hr)
 365 {
 366   // Claim the right to put the region on the dirty cards region list
 367   // by installing a self pointer.
 368   HeapRegion* next = hr->get_next_dirty_cards_region();
 369   if (next == NULL) {
 370     HeapRegion* res = (HeapRegion*)
 371       Atomic::cmpxchg_ptr(hr, hr->next_dirty_cards_region_addr(),
 372                           NULL);
 373     if (res == NULL) {


4535         strong_cld_cl  = &scan_mark_cld_cl;
4536         strong_code_cl = &scan_mark_code_cl;
4537         if (ClassUnloadingWithConcurrentMark) {
4538           weak_root_cl = &scan_mark_weak_root_cl;
4539           weak_cld_cl  = &scan_mark_weak_cld_cl;
4540         } else {
4541           weak_root_cl = &scan_mark_root_cl;
4542           weak_cld_cl  = &scan_mark_cld_cl;
4543         }
4544       } else {
4545         strong_root_cl = &scan_only_root_cl;
4546         weak_root_cl   = &scan_only_root_cl;
4547         strong_cld_cl  = &scan_only_cld_cl;
4548         weak_cld_cl    = &scan_only_cld_cl;
4549         strong_code_cl = &scan_only_code_cl;
4550       }
4551 
4552 
4553       G1ParPushHeapRSClosure  push_heap_rs_cl(_g1h, &pss);
4554 



4555       pss.start_strong_roots();
4556       _g1h->g1_process_roots(strong_root_cl,
4557                              weak_root_cl,
4558                              &push_heap_rs_cl,
4559                              strong_cld_cl,
4560                              weak_cld_cl,
4561                              strong_code_cl,
4562                              worker_id);

4563 
4564       pss.end_strong_roots();
4565 
4566       {
4567         double start = os::elapsedTime();
4568         G1ParEvacuateFollowersClosure evac(_g1h, &pss, _queues, &_terminator);
4569         evac.do_void();
4570         double elapsed_ms = (os::elapsedTime()-start)*1000.0;
4571         double term_ms = pss.term_time()*1000.0;
4572         _g1h->g1_policy()->phase_times()->add_obj_copy_time(worker_id, elapsed_ms-term_ms);
4573         _g1h->g1_policy()->phase_times()->record_termination(worker_id, term_ms, pss.term_attempts());
4574       }
4575       _g1h->g1_policy()->record_thread_age_table(pss.age_table());
4576       _g1h->update_surviving_young_words(pss.surviving_young_words()+1);
4577 
4578       if (PrintTerminationStats) {
4579         MutexLocker x(stats_lock());
4580         pss.print_termination_stats(worker_id);
4581       }
4582 


4587       // "GC Worker Time".
4588     }
4589 
4590     double end_time_ms = os::elapsedTime() * 1000.0;
4591     _g1h->g1_policy()->phase_times()->record_gc_worker_end_time(worker_id, end_time_ms);
4592   }
4593 };
4594 
4595 // *** Common G1 Evacuation Stuff
4596 
4597 // This method is run in a GC worker.
4598 
4599 void
4600 G1CollectedHeap::
4601 g1_process_roots(OopClosure* scan_non_heap_roots,
4602                  OopClosure* scan_non_heap_weak_roots,
4603                  G1ParPushHeapRSClosure* scan_rs,
4604                  CLDClosure* scan_strong_clds,
4605                  CLDClosure* scan_weak_clds,
4606                  CodeBlobClosure* scan_strong_code,
4607                  uint worker_i) {

4608 
4609   // First scan the shared roots.
4610   double ext_roots_start = os::elapsedTime();
4611   double closure_app_time_sec = 0.0;
4612 
4613   bool during_im = _g1h->g1_policy()->during_initial_mark_pause();
4614   bool trace_metadata = during_im && ClassUnloadingWithConcurrentMark;
4615 
4616   BufferingOopClosure buf_scan_non_heap_roots(scan_non_heap_roots);
4617   BufferingOopClosure buf_scan_non_heap_weak_roots(scan_non_heap_weak_roots);
4618 
4619   process_roots(false, // no scoping; this is parallel code
4620                 SharedHeap::SO_None,
4621                 &buf_scan_non_heap_roots,
4622                 &buf_scan_non_heap_weak_roots,
4623                 scan_strong_clds,
4624                 // Unloading Initial Marks handle the weak CLDs separately.
4625                 (trace_metadata ? NULL : scan_weak_clds),
4626                 scan_strong_code);

4627 
4628   // Now the CM ref_processor roots.


4629   if (!_process_strong_tasks->is_task_claimed(G1H_PS_refProcessor_oops_do)) {
4630     // We need to treat the discovered reference lists of the
4631     // concurrent mark ref processor as roots and keep entries
4632     // (which are added by the marking threads) on them live
4633     // until they can be processed at the end of marking.
4634     ref_processor_cm()->weak_oops_do(&buf_scan_non_heap_roots);
4635   }

4636 


4637   if (trace_metadata) {
4638     // Barrier to make sure all workers passed
4639     // the strong CLD and strong nmethods phases.
4640     active_strong_roots_scope()->wait_until_all_workers_done_with_threads(n_par_threads());


4641 



4642     // Now take the complement of the strong CLDs.
4643     ClassLoaderDataGraph::roots_cld_do(NULL, scan_weak_clds);
4644   }

4645 
4646   // Finish up any enqueued closure apps (attributed as object copy time).
4647   buf_scan_non_heap_roots.done();
4648   buf_scan_non_heap_weak_roots.done();
4649 
4650   double obj_copy_time_sec = buf_scan_non_heap_roots.closure_app_seconds()
4651       + buf_scan_non_heap_weak_roots.closure_app_seconds();
4652 
4653   g1_policy()->phase_times()->record_obj_copy_time(worker_i, obj_copy_time_sec * 1000.0);
4654 
4655   double ext_root_time_ms =
4656     ((os::elapsedTime() - ext_roots_start) - obj_copy_time_sec) * 1000.0;
4657 
4658   g1_policy()->phase_times()->record_ext_root_scan_time(worker_i, ext_root_time_ms);
4659 
4660   // During conc marking we have to filter the per-thread SATB buffers
4661   // to make sure we remove any oops into the CSet (which will show up
4662   // as implicitly live).
4663   double satb_filtering_ms = 0.0;


4664   if (!_process_strong_tasks->is_task_claimed(G1H_PS_filter_satb_buffers)) {
4665     if (mark_in_progress()) {
4666       double satb_filter_start = os::elapsedTime();
4667 
4668       JavaThread::satb_mark_queue_set().filter_thread_buffers();
4669 
4670       satb_filtering_ms = (os::elapsedTime() - satb_filter_start) * 1000.0;

4671     }
4672   }
4673   g1_policy()->phase_times()->record_satb_filtering_time(worker_i, satb_filtering_ms);
4674 
4675   // Now scan the complement of the collection set.
4676   G1CodeBlobClosure scavenge_cs_nmethods(scan_non_heap_weak_roots);
4677 
4678   g1_rem_set()->oops_into_collection_set_do(scan_rs, &scavenge_cs_nmethods, worker_i);
4679 
4680   _process_strong_tasks->all_tasks_completed();
4681 }
4682 
4683 class G1StringSymbolTableUnlinkTask : public AbstractGangTask {
4684 private:
4685   BoolObjectClosure* _is_alive;
4686   int _initial_string_table_size;
4687   int _initial_symbol_table_size;
4688 
4689   bool  _process_strings;
4690   int _strings_processed;




 334   const char* names[] = {"YOUNG", "SURVIVOR"};
 335 
 336   for (uint list = 0; list < ARRAY_SIZE(lists); ++list) {
 337     gclog_or_tty->print_cr("%s LIST CONTENTS", names[list]);
 338     HeapRegion *curr = lists[list];
 339     if (curr == NULL)
 340       gclog_or_tty->print_cr("  empty");
 341     while (curr != NULL) {
 342       gclog_or_tty->print_cr("  "HR_FORMAT", P: "PTR_FORMAT ", N: "PTR_FORMAT", age: %4d",
 343                              HR_FORMAT_PARAMS(curr),
 344                              curr->prev_top_at_mark_start(),
 345                              curr->next_top_at_mark_start(),
 346                              curr->age_in_surv_rate_group_cond());
 347       curr = curr->get_next_young_region();
 348     }
 349   }
 350 
 351   gclog_or_tty->cr();
 352 }
 353 
 354 static const char* g1_ext_root_task_strings[G1CollectedHeap::G1H_PS_NumElements] = {
 355   "Filter SATB Roots (ms)",
 356   "CM RefProcessor Roots (ms)",
 357   "Wait For Strong CLD (ms)",
 358   "Weak CLD Roots (ms)"
 359 };
 360 
 361 const char* G1CollectedHeap::ext_roots_task_string(uint i) {
 362   vmassert(i < num_ext_root_tasks(), "must be");
 363   if (i < SH_PS_NumElements) {
 364     return SharedHeap::ext_roots_task_str(i);
 365   } else {
 366     return g1_ext_root_task_strings[i - SharedHeap::SH_PS_NumElements];
 367   }
 368 }
 369 
 370 
 371 void G1RegionMappingChangedListener::reset_from_card_cache(uint start_idx, size_t num_regions) {
 372   HeapRegionRemSet::invalidate_from_card_cache(start_idx, num_regions);
 373 }
 374 
 375 void G1RegionMappingChangedListener::on_commit(uint start_idx, size_t num_regions, bool zero_filled) {
 376   // The from card cache is not the memory that is actually committed. So we cannot
 377   // take advantage of the zero_filled parameter.
 378   reset_from_card_cache(start_idx, num_regions);
 379 }
 380 
 381 void G1CollectedHeap::push_dirty_cards_region(HeapRegion* hr)
 382 {
 383   // Claim the right to put the region on the dirty cards region list
 384   // by installing a self pointer.
 385   HeapRegion* next = hr->get_next_dirty_cards_region();
 386   if (next == NULL) {
 387     HeapRegion* res = (HeapRegion*)
 388       Atomic::cmpxchg_ptr(hr, hr->next_dirty_cards_region_addr(),
 389                           NULL);
 390     if (res == NULL) {


4552         strong_cld_cl  = &scan_mark_cld_cl;
4553         strong_code_cl = &scan_mark_code_cl;
4554         if (ClassUnloadingWithConcurrentMark) {
4555           weak_root_cl = &scan_mark_weak_root_cl;
4556           weak_cld_cl  = &scan_mark_weak_cld_cl;
4557         } else {
4558           weak_root_cl = &scan_mark_root_cl;
4559           weak_cld_cl  = &scan_mark_cld_cl;
4560         }
4561       } else {
4562         strong_root_cl = &scan_only_root_cl;
4563         weak_root_cl   = &scan_only_root_cl;
4564         strong_cld_cl  = &scan_only_cld_cl;
4565         weak_cld_cl    = &scan_only_cld_cl;
4566         strong_code_cl = &scan_only_code_cl;
4567       }
4568 
4569 
4570       G1ParPushHeapRSClosure  push_heap_rs_cl(_g1h, &pss);
4571 
4572       G1GCPhaseTimes* pt = _g1h->g1_policy()->phase_times();
4573       GCPhaseTimeTracker phase_tracker(pt->get_ext_root_scan_phase_times(), pt->num_ext_root_scan_phases(), worker_id);
4574 
4575       pss.start_strong_roots();
4576       _g1h->g1_process_roots(strong_root_cl,
4577                              weak_root_cl,
4578                              &push_heap_rs_cl,
4579                              strong_cld_cl,
4580                              weak_cld_cl,
4581                              strong_code_cl,
4582                              worker_id,
4583                              &phase_tracker);
4584 
4585       pss.end_strong_roots();
4586 
4587       {
4588         double start = os::elapsedTime();
4589         G1ParEvacuateFollowersClosure evac(_g1h, &pss, _queues, &_terminator);
4590         evac.do_void();
4591         double elapsed_ms = (os::elapsedTime()-start)*1000.0;
4592         double term_ms = pss.term_time()*1000.0;
4593         _g1h->g1_policy()->phase_times()->add_obj_copy_time(worker_id, elapsed_ms-term_ms);
4594         _g1h->g1_policy()->phase_times()->record_termination(worker_id, term_ms, pss.term_attempts());
4595       }
4596       _g1h->g1_policy()->record_thread_age_table(pss.age_table());
4597       _g1h->update_surviving_young_words(pss.surviving_young_words()+1);
4598 
4599       if (PrintTerminationStats) {
4600         MutexLocker x(stats_lock());
4601         pss.print_termination_stats(worker_id);
4602       }
4603 


4608       // "GC Worker Time".
4609     }
4610 
4611     double end_time_ms = os::elapsedTime() * 1000.0;
4612     _g1h->g1_policy()->phase_times()->record_gc_worker_end_time(worker_id, end_time_ms);
4613   }
4614 };
4615 
4616 // *** Common G1 Evacuation Stuff
4617 
4618 // This method is run in a GC worker.
4619 
4620 void
4621 G1CollectedHeap::
4622 g1_process_roots(OopClosure* scan_non_heap_roots,
4623                  OopClosure* scan_non_heap_weak_roots,
4624                  G1ParPushHeapRSClosure* scan_rs,
4625                  CLDClosure* scan_strong_clds,
4626                  CLDClosure* scan_weak_clds,
4627                  CodeBlobClosure* scan_strong_code,
4628                  uint worker_i,
4629                  GCPhaseTimeTracker* phase_tracker) {
4630 
4631   // First scan the shared roots.
4632   double ext_roots_start = os::elapsedTime();
4633   double closure_app_time_sec = 0.0;
4634 
4635   bool during_im = _g1h->g1_policy()->during_initial_mark_pause();
4636   bool trace_metadata = during_im && ClassUnloadingWithConcurrentMark;
4637 
4638   BufferingOopClosure buf_scan_non_heap_roots(scan_non_heap_roots);
4639   BufferingOopClosure buf_scan_non_heap_weak_roots(scan_non_heap_weak_roots);
4640 
4641   process_roots(false, // no scoping; this is parallel code
4642                 SharedHeap::SO_None,
4643                 &buf_scan_non_heap_roots,
4644                 &buf_scan_non_heap_weak_roots,
4645                 scan_strong_clds,
4646                 // Unloading Initial Marks handle the weak CLDs separately.
4647                 (trace_metadata ? NULL : scan_weak_clds),
4648                 scan_strong_code,
4649                 phase_tracker);
4650 
4651   // Now the CM ref_processor roots.
4652   {
4653     TrackPhaseTime x(phase_tracker, SharedHeap::SH_PS_NumElements + G1H_PS_refProcessor_oops_do);
4654     if (!_process_strong_tasks->is_task_claimed(G1H_PS_refProcessor_oops_do)) {
4655       // We need to treat the discovered reference lists of the
4656       // concurrent mark ref processor as roots and keep entries
4657       // (which are added by the marking threads) on them live
4658       // until they can be processed at the end of marking.
4659       ref_processor_cm()->weak_oops_do(&buf_scan_non_heap_roots);
4660     }
4661   }
4662 
4663   {
4664     TrackPhaseTime x(phase_tracker, SharedHeap::SH_PS_NumElements + G1H_PS_wait_strong_cld_nmethods);
4665     if (trace_metadata) {
4666       // Barrier to make sure all workers passed
4667       // the strong CLD and strong nmethods phases.
4668       active_strong_roots_scope()->wait_until_all_workers_done_with_threads(n_par_threads());
4669     }
4670   }
4671 
4672   {
4673     TrackPhaseTime x(phase_tracker, SharedHeap::SH_PS_NumElements + G1H_PS_weak_clds_oops_do);
4674     if (trace_metadata) {
4675       // Now take the complement of the strong CLDs.
4676       ClassLoaderDataGraph::roots_cld_do(NULL, scan_weak_clds);
4677     }
4678   }
4679 
4680   // Finish up any enqueued closure apps (attributed as object copy time).
4681   buf_scan_non_heap_roots.done();
4682   buf_scan_non_heap_weak_roots.done();
4683 
4684   double obj_copy_time_sec = buf_scan_non_heap_roots.closure_app_seconds()
4685       + buf_scan_non_heap_weak_roots.closure_app_seconds();
4686 
4687   g1_policy()->phase_times()->record_obj_copy_time(worker_i, obj_copy_time_sec * 1000.0);
4688 
4689   double ext_root_time_ms =
4690     ((os::elapsedTime() - ext_roots_start) - obj_copy_time_sec) * 1000.0;
4691 
4692   g1_policy()->phase_times()->record_ext_root_scan_time(worker_i, ext_root_time_ms);
4693 
4694   // During conc marking we have to filter the per-thread SATB buffers
4695   // to make sure we remove any oops into the CSet (which will show up
4696   // as implicitly live).
4697   double satb_filtering_ms = 0.0;
4698   {
4699     TrackPhaseTime x(phase_tracker, SharedHeap::SH_PS_NumElements + G1H_PS_filter_satb_buffers);
4700     if (!_process_strong_tasks->is_task_claimed(G1H_PS_filter_satb_buffers)) {
4701       if (mark_in_progress()) {
4702         double satb_filter_start = os::elapsedTime();
4703 
4704         JavaThread::satb_mark_queue_set().filter_thread_buffers();
4705 
4706         satb_filtering_ms = (os::elapsedTime() - satb_filter_start) * 1000.0;
4707       }
4708     }
4709   }
4710   g1_policy()->phase_times()->record_satb_filtering_time(worker_i, satb_filtering_ms);
4711 
4712   // Now scan the complement of the collection set.
4713   G1CodeBlobClosure scavenge_cs_nmethods(scan_non_heap_weak_roots);
4714 
4715   g1_rem_set()->oops_into_collection_set_do(scan_rs, &scavenge_cs_nmethods, worker_i);
4716 
4717   _process_strong_tasks->all_tasks_completed();
4718 }
4719 
4720 class G1StringSymbolTableUnlinkTask : public AbstractGangTask {
4721 private:
4722   BoolObjectClosure* _is_alive;
4723   int _initial_string_table_size;
4724   int _initial_symbol_table_size;
4725 
4726   bool  _process_strings;
4727   int _strings_processed;


< prev index next >