< prev index next >

src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp

Print this page




2189 
2190   bool failures() { return _failures; }
2191 };
2192 
2193 void G1CollectedHeap::check_gc_time_stamps() {
2194   CheckGCTimeStampsHRClosure cl(_gc_time_stamp);
2195   heap_region_iterate(&cl);
2196   guarantee(!cl.failures(), "all GC time stamps should have been reset");
2197 }
2198 #endif // PRODUCT
2199 
2200 void G1CollectedHeap::iterate_dirty_card_closure(CardTableEntryClosure* cl,
2201                                                  DirtyCardQueue* into_cset_dcq,
2202                                                  bool concurrent,
2203                                                  uint worker_i) {
2204   // Clean cards in the hot card cache
2205   G1HotCardCache* hot_card_cache = _cg1r->hot_card_cache();
2206   hot_card_cache->drain(worker_i, g1_rem_set(), into_cset_dcq);
2207 
2208   DirtyCardQueueSet& dcqs = JavaThread::dirty_card_queue_set();
2209   int n_completed_buffers = 0;
2210   while (dcqs.apply_closure_to_completed_buffer(cl, worker_i, 0, true)) {
2211     n_completed_buffers++;
2212   }
2213   g1_policy()->phase_times()->record_update_rs_processed_buffers(worker_i, n_completed_buffers);
2214   dcqs.clear_n_completed_buffers();
2215   assert(!dcqs.completed_buffers_exist_dirty(), "Completed buffers exist!");
2216 }
2217 
2218 
2219 // Computes the sum of the storage used by the various regions.
2220 size_t G1CollectedHeap::used() const {
2221   return _allocator->used();
2222 }
2223 
2224 size_t G1CollectedHeap::used_unlocked() const {
2225   return _allocator->used_unlocked();
2226 }
2227 
2228 class SumUsedClosure: public HeapRegionClosure {
2229   size_t _used;
2230 public:
2231   SumUsedClosure() : _used(0) {}
2232   bool doHeapRegion(HeapRegion* r) {
2233     if (!r->is_continues_humongous()) {


3734   // Record whether this pause is an initial mark. When the current
3735   // thread has completed its logging output and it's safe to signal
3736   // the CM thread, the flag's value in the policy has been reset.
3737   bool should_start_conc_mark = g1_policy()->during_initial_mark_pause();
3738 
3739   // Inner scope for scope based logging, timers, and stats collection
3740   {
3741     EvacuationInfo evacuation_info;
3742 
3743     if (g1_policy()->during_initial_mark_pause()) {
3744       // We are about to start a marking cycle, so we increment the
3745       // full collection counter.
3746       increment_old_marking_cycles_started();
3747       register_concurrent_cycle_start(_gc_timer_stw->gc_start());
3748     }
3749 
3750     _gc_tracer_stw->report_yc_type(yc_type());
3751 
3752     TraceCPUTime tcpu(G1Log::finer(), true, gclog_or_tty);
3753 
3754     int active_workers = workers()->active_workers();
3755     double pause_start_sec = os::elapsedTime();
3756     g1_policy()->phase_times()->note_gc_start(active_workers);
3757     log_gc_header();
3758 
3759     TraceCollectorStats tcs(g1mm()->incremental_collection_counters());
3760     TraceMemoryManagerStats tms(false /* fullGC */, gc_cause());
3761 
3762     // If the secondary_free_list is not empty, append it to the
3763     // free_list. No need to wait for the cleanup operation to finish;
3764     // the region allocation code will check the secondary_free_list
3765     // and wait if necessary. If the G1StressConcRegionFreeing flag is
3766     // set, skip this step so that the region allocation code has to
3767     // get entries from the secondary_free_list.
3768     if (!G1StressConcRegionFreeing) {
3769       append_secondary_free_list_if_not_empty_with_lock();
3770     }
3771 
3772     assert(check_young_list_well_formed(), "young list should be well formed");
3773 
3774     // Don't dynamically change the number of GC threads this early.  A value of
3775     // 0 is used to indicate serial work.  When parallel work is done,
3776     // it will be set.


4469    public:
4470     G1CLDClosure(G1ParCopyClosure<G1BarrierNone, do_mark_object>* oop_closure,
4471                  bool only_young, bool claim)
4472         : _oop_closure(oop_closure),
4473           _oop_in_klass_closure(oop_closure->g1(),
4474                                 oop_closure->pss(),
4475                                 oop_closure->rp()),
4476           _klass_in_cld_closure(&_oop_in_klass_closure, only_young),
4477           _claim(claim) {
4478 
4479     }
4480 
4481     void do_cld(ClassLoaderData* cld) {
4482       cld->oops_do(_oop_closure, &_klass_in_cld_closure, _claim);
4483     }
4484   };
4485 
4486   void work(uint worker_id) {
4487     if (worker_id >= _n_workers) return;  // no work needed this round
4488 
4489     double start_time_ms = os::elapsedTime() * 1000.0;
4490     _g1h->g1_policy()->phase_times()->record_gc_worker_start_time(worker_id, start_time_ms);
4491 
4492     {
4493       ResourceMark rm;
4494       HandleMark   hm;
4495 
4496       ReferenceProcessor*             rp = _g1h->ref_processor_stw();
4497 
4498       G1ParScanThreadState            pss(_g1h, worker_id, rp);
4499       G1ParScanHeapEvacFailureClosure evac_failure_cl(_g1h, &pss, rp);
4500 
4501       pss.set_evac_failure_closure(&evac_failure_cl);
4502 
4503       bool only_young = _g1h->g1_policy()->gcs_are_young();
4504 
4505       // Non-IM young GC.
4506       G1ParCopyClosure<G1BarrierNone, G1MarkNone>             scan_only_root_cl(_g1h, &pss, rp);
4507       G1CLDClosure<G1MarkNone>                                scan_only_cld_cl(&scan_only_root_cl,
4508                                                                                only_young, // Only process dirty klasses.
4509                                                                                false);     // No need to claim CLDs.
4510       // IM young GC.


4550       }
4551 
4552 
4553       G1ParPushHeapRSClosure  push_heap_rs_cl(_g1h, &pss);
4554 
4555       pss.start_strong_roots();
4556       _g1h->g1_process_roots(strong_root_cl,
4557                              weak_root_cl,
4558                              &push_heap_rs_cl,
4559                              strong_cld_cl,
4560                              weak_cld_cl,
4561                              strong_code_cl,
4562                              worker_id);
4563 
4564       pss.end_strong_roots();
4565 
4566       {
4567         double start = os::elapsedTime();
4568         G1ParEvacuateFollowersClosure evac(_g1h, &pss, _queues, &_terminator);
4569         evac.do_void();
4570         double elapsed_ms = (os::elapsedTime()-start)*1000.0;
4571         double term_ms = pss.term_time()*1000.0;
4572         _g1h->g1_policy()->phase_times()->add_obj_copy_time(worker_id, elapsed_ms-term_ms);
4573         _g1h->g1_policy()->phase_times()->record_termination(worker_id, term_ms, pss.term_attempts());

4574       }
4575       _g1h->g1_policy()->record_thread_age_table(pss.age_table());
4576       _g1h->update_surviving_young_words(pss.surviving_young_words()+1);
4577 
4578       if (PrintTerminationStats) {
4579         MutexLocker x(stats_lock());
4580         pss.print_termination_stats(worker_id);
4581       }
4582 
4583       assert(pss.queue_is_empty(), "should be empty");
4584 
4585       // Close the inner scope so that the ResourceMark and HandleMark
4586       // destructors are executed here and are included as part of the
4587       // "GC Worker Time".
4588     }
4589 
4590     double end_time_ms = os::elapsedTime() * 1000.0;
4591     _g1h->g1_policy()->phase_times()->record_gc_worker_end_time(worker_id, end_time_ms);
4592   }
4593 };
4594 
4595 // *** Common G1 Evacuation Stuff
4596 
4597 // This method is run in a GC worker.
4598 
4599 void
4600 G1CollectedHeap::
4601 g1_process_roots(OopClosure* scan_non_heap_roots,
4602                  OopClosure* scan_non_heap_weak_roots,
4603                  G1ParPushHeapRSClosure* scan_rs,
4604                  CLDClosure* scan_strong_clds,
4605                  CLDClosure* scan_weak_clds,
4606                  CodeBlobClosure* scan_strong_code,
4607                  uint worker_i) {
4608 
4609   // First scan the shared roots.
4610   double ext_roots_start = os::elapsedTime();
4611   double closure_app_time_sec = 0.0;


4633     // until they can be processed at the end of marking.
4634     ref_processor_cm()->weak_oops_do(&buf_scan_non_heap_roots);
4635   }
4636 
4637   if (trace_metadata) {
4638     // Barrier to make sure all workers passed
4639     // the strong CLD and strong nmethods phases.
4640     active_strong_roots_scope()->wait_until_all_workers_done_with_threads(n_par_threads());
4641 
4642     // Now take the complement of the strong CLDs.
4643     ClassLoaderDataGraph::roots_cld_do(NULL, scan_weak_clds);
4644   }
4645 
4646   // Finish up any enqueued closure apps (attributed as object copy time).
4647   buf_scan_non_heap_roots.done();
4648   buf_scan_non_heap_weak_roots.done();
4649 
4650   double obj_copy_time_sec = buf_scan_non_heap_roots.closure_app_seconds()
4651       + buf_scan_non_heap_weak_roots.closure_app_seconds();
4652 
4653   g1_policy()->phase_times()->record_obj_copy_time(worker_i, obj_copy_time_sec * 1000.0);
4654 
4655   double ext_root_time_ms =
4656     ((os::elapsedTime() - ext_roots_start) - obj_copy_time_sec) * 1000.0;
4657 
4658   g1_policy()->phase_times()->record_ext_root_scan_time(worker_i, ext_root_time_ms);

4659 
4660   // During conc marking we have to filter the per-thread SATB buffers
4661   // to make sure we remove any oops into the CSet (which will show up
4662   // as implicitly live).
4663   double satb_filtering_ms = 0.0;
4664   if (!_process_strong_tasks->is_task_claimed(G1H_PS_filter_satb_buffers)) {
4665     if (mark_in_progress()) {
4666       double satb_filter_start = os::elapsedTime();
4667 
4668       JavaThread::satb_mark_queue_set().filter_thread_buffers();
4669 
4670       satb_filtering_ms = (os::elapsedTime() - satb_filter_start) * 1000.0;
4671     }
4672   }
4673   g1_policy()->phase_times()->record_satb_filtering_time(worker_i, satb_filtering_ms);
4674 
4675   // Now scan the complement of the collection set.
4676   G1CodeBlobClosure scavenge_cs_nmethods(scan_non_heap_weak_roots);
4677 
4678   g1_rem_set()->oops_into_collection_set_do(scan_rs, &scavenge_cs_nmethods, worker_i);
4679 
4680   _process_strong_tasks->all_tasks_completed();
4681 }
4682 
4683 class G1StringSymbolTableUnlinkTask : public AbstractGangTask {
4684 private:
4685   BoolObjectClosure* _is_alive;
4686   int _initial_string_table_size;
4687   int _initial_symbol_table_size;
4688 
4689   bool  _process_strings;
4690   int _strings_processed;
4691   int _strings_removed;
4692 
4693   bool  _process_symbols;


5056   {
5057     uint n_workers = _g1h->workers()->active_workers();
5058     G1StringSymbolTableUnlinkTask g1_unlink_task(is_alive, process_strings, process_symbols);
5059     set_par_threads(n_workers);
5060     workers()->run_task(&g1_unlink_task);
5061     set_par_threads(0);
5062   }
5063 
5064   if (G1StringDedup::is_enabled()) {
5065     G1StringDedup::unlink(is_alive);
5066   }
5067 }
5068 
5069 class G1RedirtyLoggedCardsTask : public AbstractGangTask {
5070  private:
5071   DirtyCardQueueSet* _queue;
5072  public:
5073   G1RedirtyLoggedCardsTask(DirtyCardQueueSet* queue) : AbstractGangTask("Redirty Cards"), _queue(queue) { }
5074 
5075   virtual void work(uint worker_id) {
5076     double start_time = os::elapsedTime();

5077 
5078     RedirtyLoggedCardTableEntryClosure cl;
5079     _queue->par_apply_closure_to_all_completed_buffers(&cl);
5080 
5081     G1GCPhaseTimes* timer = G1CollectedHeap::heap()->g1_policy()->phase_times();
5082     timer->record_redirty_logged_cards_time_ms(worker_id, (os::elapsedTime() - start_time) * 1000.0);
5083     timer->record_redirty_logged_cards_processed_cards(worker_id, cl.num_processed());
5084   }
5085 };
5086 
5087 void G1CollectedHeap::redirty_logged_cards() {
5088   double redirty_logged_cards_start = os::elapsedTime();
5089 
5090   uint n_workers = _g1h->workers()->active_workers();
5091 
5092   G1RedirtyLoggedCardsTask redirty_task(&dirty_card_queue_set());
5093   dirty_card_queue_set().reset_for_par_iteration();
5094   set_par_threads(n_workers);
5095   workers()->run_task(&redirty_task);
5096   set_par_threads(0);
5097 
5098   DirtyCardQueueSet& dcq = JavaThread::dirty_card_queue_set();
5099   dcq.merge_bufferlists(&dirty_card_queue_set());
5100   assert(dirty_card_queue_set().completed_buffers_num() == 0, "All should be consumed");
5101 
5102   g1_policy()->phase_times()->record_redirty_logged_cards_time_ms((os::elapsedTime() - redirty_logged_cards_start) * 1000.0);
5103 }


5641     if (g1_policy()->during_initial_mark_pause()) {
5642       ClassLoaderDataGraph::clear_claimed_marks();
5643     }
5644 
5645      // The individual threads will set their evac-failure closures.
5646      if (PrintTerminationStats) G1ParScanThreadState::print_termination_stats_hdr();
5647      // These tasks use ShareHeap::_process_strong_tasks
5648      assert(UseDynamicNumberOfGCThreads ||
5649             workers()->active_workers() == workers()->total_workers(),
5650             "If not dynamic should be using all the  workers");
5651     workers()->run_task(&g1_par_task);
5652     end_par_time_sec = os::elapsedTime();
5653 
5654     // Closing the inner scope will execute the destructor
5655     // for the StrongRootsScope object. We record the current
5656     // elapsed time before closing the scope so that time
5657     // taken for the SRS destructor is NOT included in the
5658     // reported parallel time.
5659   }
5660 


5661   double par_time_ms = (end_par_time_sec - start_par_time_sec) * 1000.0;
5662   g1_policy()->phase_times()->record_par_time(par_time_ms);
5663 
5664   double code_root_fixup_time_ms =
5665         (os::elapsedTime() - end_par_time_sec) * 1000.0;
5666   g1_policy()->phase_times()->record_code_root_fixup_time(code_root_fixup_time_ms);
5667 
5668   set_par_threads(0);
5669 
5670   // Process any discovered reference objects - we have
5671   // to do this _before_ we retire the GC alloc regions
5672   // as we may have to copy some 'reachable' referent
5673   // objects (and their reachable sub-graphs) that were
5674   // not copied during the pause.
5675   process_discovered_references(n_workers);
5676 
5677   if (G1StringDedup::is_enabled()) {


5678     G1STWIsAliveClosure is_alive(this);
5679     G1KeepAliveClosure keep_alive(this);
5680     G1StringDedup::unlink_or_oops_do(&is_alive, &keep_alive);



5681   }
5682 
5683   _allocator->release_gc_alloc_regions(n_workers, evacuation_info);
5684   g1_rem_set()->cleanup_after_oops_into_collection_set_do();
5685 
5686   // Reset and re-enable the hot card cache.
5687   // Note the counts for the cards in the regions in the
5688   // collection set are reset when the collection set is freed.
5689   hot_card_cache->reset_hot_cache();
5690   hot_card_cache->set_use_cache(true);
5691 
5692   purge_code_root_memory();
5693 
5694   finalize_for_evac_failure();
5695 
5696   if (evacuation_failed()) {
5697     remove_self_forwarding_pointers();
5698 
5699     // Reset the G1EvacuationFailureALot counters and flags
5700     // Note: the values are reset only when an actual




2189 
2190   bool failures() { return _failures; }
2191 };
2192 
2193 void G1CollectedHeap::check_gc_time_stamps() {
2194   CheckGCTimeStampsHRClosure cl(_gc_time_stamp);
2195   heap_region_iterate(&cl);
2196   guarantee(!cl.failures(), "all GC time stamps should have been reset");
2197 }
2198 #endif // PRODUCT
2199 
2200 void G1CollectedHeap::iterate_dirty_card_closure(CardTableEntryClosure* cl,
2201                                                  DirtyCardQueue* into_cset_dcq,
2202                                                  bool concurrent,
2203                                                  uint worker_i) {
2204   // Clean cards in the hot card cache
2205   G1HotCardCache* hot_card_cache = _cg1r->hot_card_cache();
2206   hot_card_cache->drain(worker_i, g1_rem_set(), into_cset_dcq);
2207 
2208   DirtyCardQueueSet& dcqs = JavaThread::dirty_card_queue_set();
2209   size_t n_completed_buffers = 0;
2210   while (dcqs.apply_closure_to_completed_buffer(cl, worker_i, 0, true)) {
2211     n_completed_buffers++;
2212   }
2213   g1_policy()->phase_times()->record_sub_count(G1GCPhaseTimes::UpdateRS, worker_i, n_completed_buffers);
2214   dcqs.clear_n_completed_buffers();
2215   assert(!dcqs.completed_buffers_exist_dirty(), "Completed buffers exist!");
2216 }
2217 
2218 
2219 // Computes the sum of the storage used by the various regions.
2220 size_t G1CollectedHeap::used() const {
2221   return _allocator->used();
2222 }
2223 
2224 size_t G1CollectedHeap::used_unlocked() const {
2225   return _allocator->used_unlocked();
2226 }
2227 
2228 class SumUsedClosure: public HeapRegionClosure {
2229   size_t _used;
2230 public:
2231   SumUsedClosure() : _used(0) {}
2232   bool doHeapRegion(HeapRegion* r) {
2233     if (!r->is_continues_humongous()) {


3734   // Record whether this pause is an initial mark. When the current
3735   // thread has completed its logging output and it's safe to signal
3736   // the CM thread, the flag's value in the policy has been reset.
3737   bool should_start_conc_mark = g1_policy()->during_initial_mark_pause();
3738 
3739   // Inner scope for scope based logging, timers, and stats collection
3740   {
3741     EvacuationInfo evacuation_info;
3742 
3743     if (g1_policy()->during_initial_mark_pause()) {
3744       // We are about to start a marking cycle, so we increment the
3745       // full collection counter.
3746       increment_old_marking_cycles_started();
3747       register_concurrent_cycle_start(_gc_timer_stw->gc_start());
3748     }
3749 
3750     _gc_tracer_stw->report_yc_type(yc_type());
3751 
3752     TraceCPUTime tcpu(G1Log::finer(), true, gclog_or_tty);
3753 
3754     uint active_workers = workers()->active_workers();
3755     double pause_start_sec = os::elapsedTime();
3756     g1_policy()->phase_times()->note_gc_start(active_workers, mark_in_progress());
3757     log_gc_header();
3758 
3759     TraceCollectorStats tcs(g1mm()->incremental_collection_counters());
3760     TraceMemoryManagerStats tms(false /* fullGC */, gc_cause());
3761 
3762     // If the secondary_free_list is not empty, append it to the
3763     // free_list. No need to wait for the cleanup operation to finish;
3764     // the region allocation code will check the secondary_free_list
3765     // and wait if necessary. If the G1StressConcRegionFreeing flag is
3766     // set, skip this step so that the region allocation code has to
3767     // get entries from the secondary_free_list.
3768     if (!G1StressConcRegionFreeing) {
3769       append_secondary_free_list_if_not_empty_with_lock();
3770     }
3771 
3772     assert(check_young_list_well_formed(), "young list should be well formed");
3773 
3774     // Don't dynamically change the number of GC threads this early.  A value of
3775     // 0 is used to indicate serial work.  When parallel work is done,
3776     // it will be set.


4469    public:
4470     G1CLDClosure(G1ParCopyClosure<G1BarrierNone, do_mark_object>* oop_closure,
4471                  bool only_young, bool claim)
4472         : _oop_closure(oop_closure),
4473           _oop_in_klass_closure(oop_closure->g1(),
4474                                 oop_closure->pss(),
4475                                 oop_closure->rp()),
4476           _klass_in_cld_closure(&_oop_in_klass_closure, only_young),
4477           _claim(claim) {
4478 
4479     }
4480 
4481     void do_cld(ClassLoaderData* cld) {
4482       cld->oops_do(_oop_closure, &_klass_in_cld_closure, _claim);
4483     }
4484   };
4485 
4486   void work(uint worker_id) {
4487     if (worker_id >= _n_workers) return;  // no work needed this round
4488 
4489     _g1h->g1_policy()->phase_times()->record_time_secs(G1GCPhaseTimes::GCWorkerStart, worker_id, os::elapsedTime());

4490 
4491     {
4492       ResourceMark rm;
4493       HandleMark   hm;
4494 
4495       ReferenceProcessor*             rp = _g1h->ref_processor_stw();
4496 
4497       G1ParScanThreadState            pss(_g1h, worker_id, rp);
4498       G1ParScanHeapEvacFailureClosure evac_failure_cl(_g1h, &pss, rp);
4499 
4500       pss.set_evac_failure_closure(&evac_failure_cl);
4501 
4502       bool only_young = _g1h->g1_policy()->gcs_are_young();
4503 
4504       // Non-IM young GC.
4505       G1ParCopyClosure<G1BarrierNone, G1MarkNone>             scan_only_root_cl(_g1h, &pss, rp);
4506       G1CLDClosure<G1MarkNone>                                scan_only_cld_cl(&scan_only_root_cl,
4507                                                                                only_young, // Only process dirty klasses.
4508                                                                                false);     // No need to claim CLDs.
4509       // IM young GC.


4549       }
4550 
4551 
4552       G1ParPushHeapRSClosure  push_heap_rs_cl(_g1h, &pss);
4553 
4554       pss.start_strong_roots();
4555       _g1h->g1_process_roots(strong_root_cl,
4556                              weak_root_cl,
4557                              &push_heap_rs_cl,
4558                              strong_cld_cl,
4559                              weak_cld_cl,
4560                              strong_code_cl,
4561                              worker_id);
4562 
4563       pss.end_strong_roots();
4564 
4565       {
4566         double start = os::elapsedTime();
4567         G1ParEvacuateFollowersClosure evac(_g1h, &pss, _queues, &_terminator);
4568         evac.do_void();
4569         double elapsed_sec = os::elapsedTime() - start;
4570         double term_sec = pss.term_time();
4571         _g1h->g1_policy()->phase_times()->add_time_secs(G1GCPhaseTimes::ObjCopy, worker_id, elapsed_sec - term_sec);
4572         _g1h->g1_policy()->phase_times()->record_time_secs(G1GCPhaseTimes::Termination, worker_id, term_sec);
4573         _g1h->g1_policy()->phase_times()->record_sub_count(G1GCPhaseTimes::Termination, worker_id, pss.term_attempts());
4574       }
4575       _g1h->g1_policy()->record_thread_age_table(pss.age_table());
4576       _g1h->update_surviving_young_words(pss.surviving_young_words()+1);
4577 
4578       if (PrintTerminationStats) {
4579         MutexLocker x(stats_lock());
4580         pss.print_termination_stats(worker_id);
4581       }
4582 
4583       assert(pss.queue_is_empty(), "should be empty");
4584 
4585       // Close the inner scope so that the ResourceMark and HandleMark
4586       // destructors are executed here and are included as part of the
4587       // "GC Worker Time".
4588     }
4589     _g1h->g1_policy()->phase_times()->record_time_secs(G1GCPhaseTimes::GCWorkerEnd, worker_id, os::elapsedTime());


4590   }
4591 };
4592 
4593 // *** Common G1 Evacuation Stuff
4594 
4595 // This method is run in a GC worker.
4596 
4597 void
4598 G1CollectedHeap::
4599 g1_process_roots(OopClosure* scan_non_heap_roots,
4600                  OopClosure* scan_non_heap_weak_roots,
4601                  G1ParPushHeapRSClosure* scan_rs,
4602                  CLDClosure* scan_strong_clds,
4603                  CLDClosure* scan_weak_clds,
4604                  CodeBlobClosure* scan_strong_code,
4605                  uint worker_i) {
4606 
4607   // First scan the shared roots.
4608   double ext_roots_start = os::elapsedTime();
4609   double closure_app_time_sec = 0.0;


4631     // until they can be processed at the end of marking.
4632     ref_processor_cm()->weak_oops_do(&buf_scan_non_heap_roots);
4633   }
4634 
4635   if (trace_metadata) {
4636     // Barrier to make sure all workers passed
4637     // the strong CLD and strong nmethods phases.
4638     active_strong_roots_scope()->wait_until_all_workers_done_with_threads(n_par_threads());
4639 
4640     // Now take the complement of the strong CLDs.
4641     ClassLoaderDataGraph::roots_cld_do(NULL, scan_weak_clds);
4642   }
4643 
4644   // Finish up any enqueued closure apps (attributed as object copy time).
4645   buf_scan_non_heap_roots.done();
4646   buf_scan_non_heap_weak_roots.done();
4647 
4648   double obj_copy_time_sec = buf_scan_non_heap_roots.closure_app_seconds()
4649       + buf_scan_non_heap_weak_roots.closure_app_seconds();
4650 
4651   g1_policy()->phase_times()->record_time_secs(G1GCPhaseTimes::ObjCopy, worker_i, obj_copy_time_sec);



4652 
4653   double ext_root_time_sec = os::elapsedTime() - ext_roots_start - obj_copy_time_sec;
4654   g1_policy()->phase_times()->record_time_secs(G1GCPhaseTimes::ExtRootScan, worker_i, ext_root_time_sec);
4655 
4656   // During conc marking we have to filter the per-thread SATB buffers
4657   // to make sure we remove any oops into the CSet (which will show up
4658   // as implicitly live).
4659   {
4660     G1GCParPhaseTimesTracker x(g1_policy()->phase_times(), G1GCPhaseTimes::SATBFiltering, worker_i);
4661     if (!_process_strong_tasks->is_task_claimed(G1H_PS_filter_satb_buffers) && mark_in_progress()) {


4662       JavaThread::satb_mark_queue_set().filter_thread_buffers();


4663     }
4664   }

4665 
4666   // Now scan the complement of the collection set.
4667   G1CodeBlobClosure scavenge_cs_nmethods(scan_non_heap_weak_roots);
4668 
4669   g1_rem_set()->oops_into_collection_set_do(scan_rs, &scavenge_cs_nmethods, worker_i);
4670 
4671   _process_strong_tasks->all_tasks_completed();
4672 }
4673 
4674 class G1StringSymbolTableUnlinkTask : public AbstractGangTask {
4675 private:
4676   BoolObjectClosure* _is_alive;
4677   int _initial_string_table_size;
4678   int _initial_symbol_table_size;
4679 
4680   bool  _process_strings;
4681   int _strings_processed;
4682   int _strings_removed;
4683 
4684   bool  _process_symbols;


5047   {
5048     uint n_workers = _g1h->workers()->active_workers();
5049     G1StringSymbolTableUnlinkTask g1_unlink_task(is_alive, process_strings, process_symbols);
5050     set_par_threads(n_workers);
5051     workers()->run_task(&g1_unlink_task);
5052     set_par_threads(0);
5053   }
5054 
5055   if (G1StringDedup::is_enabled()) {
5056     G1StringDedup::unlink(is_alive);
5057   }
5058 }
5059 
5060 class G1RedirtyLoggedCardsTask : public AbstractGangTask {
5061  private:
5062   DirtyCardQueueSet* _queue;
5063  public:
5064   G1RedirtyLoggedCardsTask(DirtyCardQueueSet* queue) : AbstractGangTask("Redirty Cards"), _queue(queue) { }
5065 
5066   virtual void work(uint worker_id) {
5067     G1GCPhaseTimes* phase_times = G1CollectedHeap::heap()->g1_policy()->phase_times();
5068     G1GCParPhaseTimesTracker x(phase_times, G1GCPhaseTimes::RedirtyCards, worker_id);
5069 
5070     RedirtyLoggedCardTableEntryClosure cl;
5071     _queue->par_apply_closure_to_all_completed_buffers(&cl);
5072 
5073     phase_times->record_sub_count(G1GCPhaseTimes::RedirtyCards, worker_id, cl.num_processed());


5074   }
5075 };
5076 
5077 void G1CollectedHeap::redirty_logged_cards() {
5078   double redirty_logged_cards_start = os::elapsedTime();
5079 
5080   uint n_workers = _g1h->workers()->active_workers();
5081 
5082   G1RedirtyLoggedCardsTask redirty_task(&dirty_card_queue_set());
5083   dirty_card_queue_set().reset_for_par_iteration();
5084   set_par_threads(n_workers);
5085   workers()->run_task(&redirty_task);
5086   set_par_threads(0);
5087 
5088   DirtyCardQueueSet& dcq = JavaThread::dirty_card_queue_set();
5089   dcq.merge_bufferlists(&dirty_card_queue_set());
5090   assert(dirty_card_queue_set().completed_buffers_num() == 0, "All should be consumed");
5091 
5092   g1_policy()->phase_times()->record_redirty_logged_cards_time_ms((os::elapsedTime() - redirty_logged_cards_start) * 1000.0);
5093 }


5631     if (g1_policy()->during_initial_mark_pause()) {
5632       ClassLoaderDataGraph::clear_claimed_marks();
5633     }
5634 
5635      // The individual threads will set their evac-failure closures.
5636      if (PrintTerminationStats) G1ParScanThreadState::print_termination_stats_hdr();
5637      // These tasks use ShareHeap::_process_strong_tasks
5638      assert(UseDynamicNumberOfGCThreads ||
5639             workers()->active_workers() == workers()->total_workers(),
5640             "If not dynamic should be using all the  workers");
5641     workers()->run_task(&g1_par_task);
5642     end_par_time_sec = os::elapsedTime();
5643 
5644     // Closing the inner scope will execute the destructor
5645     // for the StrongRootsScope object. We record the current
5646     // elapsed time before closing the scope so that time
5647     // taken for the SRS destructor is NOT included in the
5648     // reported parallel time.
5649   }
5650 
5651   G1GCPhaseTimes* phase_times = g1_policy()->phase_times();
5652 
5653   double par_time_ms = (end_par_time_sec - start_par_time_sec) * 1000.0;
5654   phase_times->record_par_time(par_time_ms);
5655 
5656   double code_root_fixup_time_ms =
5657         (os::elapsedTime() - end_par_time_sec) * 1000.0;
5658   phase_times->record_code_root_fixup_time(code_root_fixup_time_ms);
5659 
5660   set_par_threads(0);
5661 
5662   // Process any discovered reference objects - we have
5663   // to do this _before_ we retire the GC alloc regions
5664   // as we may have to copy some 'reachable' referent
5665   // objects (and their reachable sub-graphs) that were
5666   // not copied during the pause.
5667   process_discovered_references(n_workers);
5668 
5669   if (G1StringDedup::is_enabled()) {
5670     double fixup_start = os::elapsedTime();
5671 
5672     G1STWIsAliveClosure is_alive(this);
5673     G1KeepAliveClosure keep_alive(this);
5674     G1StringDedup::unlink_or_oops_do(&is_alive, &keep_alive, true, phase_times);
5675 
5676     double fixup_time_ms = (os::elapsedTime() - fixup_start) * 1000.0;
5677     phase_times->record_string_dedup_fixup_time(fixup_time_ms);
5678   }
5679 
5680   _allocator->release_gc_alloc_regions(n_workers, evacuation_info);
5681   g1_rem_set()->cleanup_after_oops_into_collection_set_do();
5682 
5683   // Reset and re-enable the hot card cache.
5684   // Note the counts for the cards in the regions in the
5685   // collection set are reset when the collection set is freed.
5686   hot_card_cache->reset_hot_cache();
5687   hot_card_cache->set_use_cache(true);
5688 
5689   purge_code_root_memory();
5690 
5691   finalize_for_evac_failure();
5692 
5693   if (evacuation_failed()) {
5694     remove_self_forwarding_pointers();
5695 
5696     // Reset the G1EvacuationFailureALot counters and flags
5697     // Note: the values are reset only when an actual


< prev index next >