< prev index next >

src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp

Print this page




2281 
2282   bool failures() { return _failures; }
2283 };
2284 
2285 void G1CollectedHeap::check_gc_time_stamps() {
2286   CheckGCTimeStampsHRClosure cl(_gc_time_stamp);
2287   heap_region_iterate(&cl);
2288   guarantee(!cl.failures(), "all GC time stamps should have been reset");
2289 }
2290 #endif // PRODUCT
2291 
2292 void G1CollectedHeap::iterate_dirty_card_closure(CardTableEntryClosure* cl,
2293                                                  DirtyCardQueue* into_cset_dcq,
2294                                                  bool concurrent,
2295                                                  uint worker_i) {
2296   // Clean cards in the hot card cache
2297   G1HotCardCache* hot_card_cache = _cg1r->hot_card_cache();
2298   hot_card_cache->drain(worker_i, g1_rem_set(), into_cset_dcq);
2299 
2300   DirtyCardQueueSet& dcqs = JavaThread::dirty_card_queue_set();
2301   int n_completed_buffers = 0;
2302   while (dcqs.apply_closure_to_completed_buffer(cl, worker_i, 0, true)) {
2303     n_completed_buffers++;
2304   }
2305   g1_policy()->phase_times()->record_update_rs_processed_buffers(worker_i, n_completed_buffers);
2306   dcqs.clear_n_completed_buffers();
2307   assert(!dcqs.completed_buffers_exist_dirty(), "Completed buffers exist!");
2308 }
2309 
2310 
2311 // Computes the sum of the storage used by the various regions.
2312 size_t G1CollectedHeap::used() const {
2313   return _allocator->used();
2314 }
2315 
2316 size_t G1CollectedHeap::used_unlocked() const {
2317   return _allocator->used_unlocked();
2318 }
2319 
2320 class SumUsedClosure: public HeapRegionClosure {
2321   size_t _used;
2322 public:
2323   SumUsedClosure() : _used(0) {}
2324   bool doHeapRegion(HeapRegion* r) {
2325     if (!r->continuesHumongous()) {


3883   // Record whether this pause is an initial mark. When the current
3884   // thread has completed its logging output and it's safe to signal
3885   // the CM thread, the flag's value in the policy has been reset.
3886   bool should_start_conc_mark = g1_policy()->during_initial_mark_pause();
3887 
3888   // Inner scope for scope based logging, timers, and stats collection
3889   {
3890     EvacuationInfo evacuation_info;
3891 
3892     if (g1_policy()->during_initial_mark_pause()) {
3893       // We are about to start a marking cycle, so we increment the
3894       // full collection counter.
3895       increment_old_marking_cycles_started();
3896       register_concurrent_cycle_start(_gc_timer_stw->gc_start());
3897     }
3898 
3899     _gc_tracer_stw->report_yc_type(yc_type());
3900 
3901     TraceCPUTime tcpu(G1Log::finer(), true, gclog_or_tty);
3902 
3903     int active_workers = (G1CollectedHeap::use_parallel_gc_threads() ?
3904                                 workers()->active_workers() : 1);
3905     double pause_start_sec = os::elapsedTime();
3906     g1_policy()->phase_times()->note_gc_start(active_workers);
3907     log_gc_header();
3908 
3909     TraceCollectorStats tcs(g1mm()->incremental_collection_counters());
3910     TraceMemoryManagerStats tms(false /* fullGC */, gc_cause());
3911 
3912     // If the secondary_free_list is not empty, append it to the
3913     // free_list. No need to wait for the cleanup operation to finish;
3914     // the region allocation code will check the secondary_free_list
3915     // and wait if necessary. If the G1StressConcRegionFreeing flag is
3916     // set, skip this step so that the region allocation code has to
3917     // get entries from the secondary_free_list.
3918     if (!G1StressConcRegionFreeing) {
3919       append_secondary_free_list_if_not_empty_with_lock();
3920     }
3921 
3922     assert(check_young_list_well_formed(), "young list should be well formed");
3923     assert(check_heap_region_claim_values(HeapRegion::InitialClaimValue),
3924            "sanity check");
3925 
3926     // Don't dynamically change the number of GC threads this early.  A value of


4686    public:
4687     G1CLDClosure(G1ParCopyClosure<G1BarrierNone, do_mark_object>* oop_closure,
4688                  bool only_young, bool claim)
4689         : _oop_closure(oop_closure),
4690           _oop_in_klass_closure(oop_closure->g1(),
4691                                 oop_closure->pss(),
4692                                 oop_closure->rp()),
4693           _klass_in_cld_closure(&_oop_in_klass_closure, only_young),
4694           _claim(claim) {
4695 
4696     }
4697 
4698     void do_cld(ClassLoaderData* cld) {
4699       cld->oops_do(_oop_closure, &_klass_in_cld_closure, _claim);
4700     }
4701   };
4702 
4703   void work(uint worker_id) {
4704     if (worker_id >= _n_workers) return;  // no work needed this round
4705 
4706     double start_time_ms = os::elapsedTime() * 1000.0;
4707     _g1h->g1_policy()->phase_times()->record_gc_worker_start_time(worker_id, start_time_ms);
4708 
4709     {
4710       ResourceMark rm;
4711       HandleMark   hm;
4712 
4713       ReferenceProcessor*             rp = _g1h->ref_processor_stw();
4714 
4715       G1ParScanThreadState            pss(_g1h, worker_id, rp);
4716       G1ParScanHeapEvacFailureClosure evac_failure_cl(_g1h, &pss, rp);
4717 
4718       pss.set_evac_failure_closure(&evac_failure_cl);
4719 
4720       bool only_young = _g1h->g1_policy()->gcs_are_young();
4721 
4722       // Non-IM young GC.
4723       G1ParCopyClosure<G1BarrierNone, G1MarkNone>             scan_only_root_cl(_g1h, &pss, rp);
4724       G1CLDClosure<G1MarkNone>                                scan_only_cld_cl(&scan_only_root_cl,
4725                                                                                only_young, // Only process dirty klasses.
4726                                                                                false);     // No need to claim CLDs.
4727       // IM young GC.


4767       }
4768 
4769 
4770       G1ParPushHeapRSClosure  push_heap_rs_cl(_g1h, &pss);
4771 
4772       pss.start_strong_roots();
4773       _g1h->g1_process_roots(strong_root_cl,
4774                              weak_root_cl,
4775                              &push_heap_rs_cl,
4776                              strong_cld_cl,
4777                              weak_cld_cl,
4778                              strong_code_cl,
4779                              worker_id);
4780 
4781       pss.end_strong_roots();
4782 
4783       {
4784         double start = os::elapsedTime();
4785         G1ParEvacuateFollowersClosure evac(_g1h, &pss, _queues, &_terminator);
4786         evac.do_void();
4787         double elapsed_ms = (os::elapsedTime()-start)*1000.0;
4788         double term_ms = pss.term_time()*1000.0;
4789         _g1h->g1_policy()->phase_times()->add_obj_copy_time(worker_id, elapsed_ms-term_ms);
4790         _g1h->g1_policy()->phase_times()->record_termination(worker_id, term_ms, pss.term_attempts());

4791       }
4792       _g1h->g1_policy()->record_thread_age_table(pss.age_table());
4793       _g1h->update_surviving_young_words(pss.surviving_young_words()+1);
4794 
4795       if (ParallelGCVerbose) {
4796         MutexLocker x(stats_lock());
4797         pss.print_termination_stats(worker_id);
4798       }
4799 
4800       assert(pss.queue_is_empty(), "should be empty");
4801 
4802       // Close the inner scope so that the ResourceMark and HandleMark
4803       // destructors are executed here and are included as part of the
4804       // "GC Worker Time".
4805     }
4806 
4807     double end_time_ms = os::elapsedTime() * 1000.0;
4808     _g1h->g1_policy()->phase_times()->record_gc_worker_end_time(worker_id, end_time_ms);
4809   }
4810 };
4811 
4812 // *** Common G1 Evacuation Stuff
4813 
4814 // This method is run in a GC worker.
4815 
4816 void
4817 G1CollectedHeap::
4818 g1_process_roots(OopClosure* scan_non_heap_roots,
4819                  OopClosure* scan_non_heap_weak_roots,
4820                  OopsInHeapRegionClosure* scan_rs,
4821                  CLDClosure* scan_strong_clds,
4822                  CLDClosure* scan_weak_clds,
4823                  CodeBlobClosure* scan_strong_code,
4824                  uint worker_i) {
4825 
4826   // First scan the shared roots.
4827   double ext_roots_start = os::elapsedTime();
4828   double closure_app_time_sec = 0.0;


4850     // until they can be processed at the end of marking.
4851     ref_processor_cm()->weak_oops_do(&buf_scan_non_heap_roots);
4852   }
4853 
4854   if (trace_metadata) {
4855     // Barrier to make sure all workers passed
4856     // the strong CLD and strong nmethods phases.
4857     active_strong_roots_scope()->wait_until_all_workers_done_with_threads(n_par_threads());
4858 
4859     // Now take the complement of the strong CLDs.
4860     ClassLoaderDataGraph::roots_cld_do(NULL, scan_weak_clds);
4861   }
4862 
4863   // Finish up any enqueued closure apps (attributed as object copy time).
4864   buf_scan_non_heap_roots.done();
4865   buf_scan_non_heap_weak_roots.done();
4866 
4867   double obj_copy_time_sec = buf_scan_non_heap_roots.closure_app_seconds()
4868       + buf_scan_non_heap_weak_roots.closure_app_seconds();
4869 
4870   g1_policy()->phase_times()->record_obj_copy_time(worker_i, obj_copy_time_sec * 1000.0);
4871 
4872   double ext_root_time_ms =
4873     ((os::elapsedTime() - ext_roots_start) - obj_copy_time_sec) * 1000.0;
4874 
4875   g1_policy()->phase_times()->record_ext_root_scan_time(worker_i, ext_root_time_ms);

4876 
4877   // During conc marking we have to filter the per-thread SATB buffers
4878   // to make sure we remove any oops into the CSet (which will show up
4879   // as implicitly live).
4880   double satb_filtering_ms = 0.0;
4881   if (!_process_strong_tasks->is_task_claimed(G1H_PS_filter_satb_buffers)) {
4882     if (mark_in_progress()) {
4883       double satb_filter_start = os::elapsedTime();
4884 
4885       JavaThread::satb_mark_queue_set().filter_thread_buffers();
4886 
4887       satb_filtering_ms = (os::elapsedTime() - satb_filter_start) * 1000.0;
4888     }
4889   }
4890   g1_policy()->phase_times()->record_satb_filtering_time(worker_i, satb_filtering_ms);
4891 
4892   // Now scan the complement of the collection set.
4893   G1CodeBlobClosure scavenge_cs_nmethods(scan_non_heap_weak_roots);
4894 
4895   g1_rem_set()->oops_into_collection_set_do(scan_rs, &scavenge_cs_nmethods, worker_i);
4896 
4897   _process_strong_tasks->all_tasks_completed();
4898 }
4899 
4900 class G1StringSymbolTableUnlinkTask : public AbstractGangTask {
4901 private:
4902   BoolObjectClosure* _is_alive;
4903   int _initial_string_table_size;
4904   int _initial_symbol_table_size;
4905 
4906   bool  _process_strings;
4907   int _strings_processed;
4908   int _strings_removed;
4909 
4910   bool  _process_symbols;


5293       set_par_threads(n_workers);
5294       workers()->run_task(&g1_unlink_task);
5295       set_par_threads(0);
5296     } else {
5297       g1_unlink_task.work(0);
5298     }
5299   }
5300 
5301   if (G1StringDedup::is_enabled()) {
5302     G1StringDedup::unlink(is_alive);
5303   }
5304 }
5305 
5306 class G1RedirtyLoggedCardsTask : public AbstractGangTask {
5307  private:
5308   DirtyCardQueueSet* _queue;
5309  public:
5310   G1RedirtyLoggedCardsTask(DirtyCardQueueSet* queue) : AbstractGangTask("Redirty Cards"), _queue(queue) { }
5311 
5312   virtual void work(uint worker_id) {
5313     double start_time = os::elapsedTime();

5314 
5315     RedirtyLoggedCardTableEntryClosure cl;
5316     if (G1CollectedHeap::heap()->use_parallel_gc_threads()) {
5317       _queue->par_apply_closure_to_all_completed_buffers(&cl);
5318     } else {
5319       _queue->apply_closure_to_all_completed_buffers(&cl);
5320     }
5321 
5322     G1GCPhaseTimes* timer = G1CollectedHeap::heap()->g1_policy()->phase_times();
5323     timer->record_redirty_logged_cards_time_ms(worker_id, (os::elapsedTime() - start_time) * 1000.0);
5324     timer->record_redirty_logged_cards_processed_cards(worker_id, cl.num_processed());
5325   }
5326 };
5327 
5328 void G1CollectedHeap::redirty_logged_cards() {
5329   double redirty_logged_cards_start = os::elapsedTime();
5330 
5331   uint n_workers = (G1CollectedHeap::use_parallel_gc_threads() ?
5332                    _g1h->workers()->active_workers() : 1);
5333 
5334   G1RedirtyLoggedCardsTask redirty_task(&dirty_card_queue_set());
5335   dirty_card_queue_set().reset_for_par_iteration();
5336   if (use_parallel_gc_threads()) {
5337     set_par_threads(n_workers);
5338     workers()->run_task(&redirty_task);
5339     set_par_threads(0);
5340   } else {
5341     redirty_task.work(0);
5342   }
5343 
5344   DirtyCardQueueSet& dcq = JavaThread::dirty_card_queue_set();


5906       // The individual threads will set their evac-failure closures.
5907       if (ParallelGCVerbose) G1ParScanThreadState::print_termination_stats_hdr();
5908       // These tasks use ShareHeap::_process_strong_tasks
5909       assert(UseDynamicNumberOfGCThreads ||
5910              workers()->active_workers() == workers()->total_workers(),
5911              "If not dynamic should be using all the  workers");
5912       workers()->run_task(&g1_par_task);
5913     } else {
5914       g1_par_task.set_for_termination(n_workers);
5915       g1_par_task.work(0);
5916     }
5917     end_par_time_sec = os::elapsedTime();
5918 
5919     // Closing the inner scope will execute the destructor
5920     // for the StrongRootsScope object. We record the current
5921     // elapsed time before closing the scope so that time
5922     // taken for the SRS destructor is NOT included in the
5923     // reported parallel time.
5924   }
5925 


5926   double par_time_ms = (end_par_time_sec - start_par_time_sec) * 1000.0;
5927   g1_policy()->phase_times()->record_par_time(par_time_ms);
5928 
5929   double code_root_fixup_time_ms =
5930         (os::elapsedTime() - end_par_time_sec) * 1000.0;
5931   g1_policy()->phase_times()->record_code_root_fixup_time(code_root_fixup_time_ms);
5932 
5933   set_par_threads(0);
5934 
5935   // Process any discovered reference objects - we have
5936   // to do this _before_ we retire the GC alloc regions
5937   // as we may have to copy some 'reachable' referent
5938   // objects (and their reachable sub-graphs) that were
5939   // not copied during the pause.
5940   process_discovered_references(n_workers);
5941 
5942   if (G1StringDedup::is_enabled()) {


5943     G1STWIsAliveClosure is_alive(this);
5944     G1KeepAliveClosure keep_alive(this);
5945     G1StringDedup::unlink_or_oops_do(&is_alive, &keep_alive);



5946   }
5947 
5948   _allocator->release_gc_alloc_regions(n_workers, evacuation_info);
5949   g1_rem_set()->cleanup_after_oops_into_collection_set_do();
5950 
5951   // Reset and re-enable the hot card cache.
5952   // Note the counts for the cards in the regions in the
5953   // collection set are reset when the collection set is freed.
5954   hot_card_cache->reset_hot_cache();
5955   hot_card_cache->set_use_cache(true);
5956 
5957   purge_code_root_memory();
5958 
5959   if (g1_policy()->during_initial_mark_pause()) {
5960     // Reset the claim values set during marking the strong code roots
5961     reset_heap_region_claim_values();
5962   }
5963 
5964   finalize_for_evac_failure();
5965 




2281 
2282   bool failures() { return _failures; }
2283 };
2284 
2285 void G1CollectedHeap::check_gc_time_stamps() {
2286   CheckGCTimeStampsHRClosure cl(_gc_time_stamp);
2287   heap_region_iterate(&cl);
2288   guarantee(!cl.failures(), "all GC time stamps should have been reset");
2289 }
2290 #endif // PRODUCT
2291 
2292 void G1CollectedHeap::iterate_dirty_card_closure(CardTableEntryClosure* cl,
2293                                                  DirtyCardQueue* into_cset_dcq,
2294                                                  bool concurrent,
2295                                                  uint worker_i) {
2296   // Clean cards in the hot card cache
2297   G1HotCardCache* hot_card_cache = _cg1r->hot_card_cache();
2298   hot_card_cache->drain(worker_i, g1_rem_set(), into_cset_dcq);
2299 
2300   DirtyCardQueueSet& dcqs = JavaThread::dirty_card_queue_set();
2301   size_t n_completed_buffers = 0;
2302   while (dcqs.apply_closure_to_completed_buffer(cl, worker_i, 0, true)) {
2303     n_completed_buffers++;
2304   }
2305   g1_policy()->phase_times()->record_thread_work_item(G1GCPhaseTimes::UpdateRS, worker_i, n_completed_buffers);
2306   dcqs.clear_n_completed_buffers();
2307   assert(!dcqs.completed_buffers_exist_dirty(), "Completed buffers exist!");
2308 }
2309 
2310 
2311 // Computes the sum of the storage used by the various regions.
2312 size_t G1CollectedHeap::used() const {
2313   return _allocator->used();
2314 }
2315 
2316 size_t G1CollectedHeap::used_unlocked() const {
2317   return _allocator->used_unlocked();
2318 }
2319 
2320 class SumUsedClosure: public HeapRegionClosure {
2321   size_t _used;
2322 public:
2323   SumUsedClosure() : _used(0) {}
2324   bool doHeapRegion(HeapRegion* r) {
2325     if (!r->continuesHumongous()) {


3883   // Record whether this pause is an initial mark. When the current
3884   // thread has completed its logging output and it's safe to signal
3885   // the CM thread, the flag's value in the policy has been reset.
3886   bool should_start_conc_mark = g1_policy()->during_initial_mark_pause();
3887 
3888   // Inner scope for scope based logging, timers, and stats collection
3889   {
3890     EvacuationInfo evacuation_info;
3891 
3892     if (g1_policy()->during_initial_mark_pause()) {
3893       // We are about to start a marking cycle, so we increment the
3894       // full collection counter.
3895       increment_old_marking_cycles_started();
3896       register_concurrent_cycle_start(_gc_timer_stw->gc_start());
3897     }
3898 
3899     _gc_tracer_stw->report_yc_type(yc_type());
3900 
3901     TraceCPUTime tcpu(G1Log::finer(), true, gclog_or_tty);
3902 
3903     uint active_workers = (G1CollectedHeap::use_parallel_gc_threads() ?
3904                                 workers()->active_workers() : 1);
3905     double pause_start_sec = os::elapsedTime();
3906     g1_policy()->phase_times()->note_gc_start(active_workers, mark_in_progress());
3907     log_gc_header();
3908 
3909     TraceCollectorStats tcs(g1mm()->incremental_collection_counters());
3910     TraceMemoryManagerStats tms(false /* fullGC */, gc_cause());
3911 
3912     // If the secondary_free_list is not empty, append it to the
3913     // free_list. No need to wait for the cleanup operation to finish;
3914     // the region allocation code will check the secondary_free_list
3915     // and wait if necessary. If the G1StressConcRegionFreeing flag is
3916     // set, skip this step so that the region allocation code has to
3917     // get entries from the secondary_free_list.
3918     if (!G1StressConcRegionFreeing) {
3919       append_secondary_free_list_if_not_empty_with_lock();
3920     }
3921 
3922     assert(check_young_list_well_formed(), "young list should be well formed");
3923     assert(check_heap_region_claim_values(HeapRegion::InitialClaimValue),
3924            "sanity check");
3925 
3926     // Don't dynamically change the number of GC threads this early.  A value of


4686    public:
4687     G1CLDClosure(G1ParCopyClosure<G1BarrierNone, do_mark_object>* oop_closure,
4688                  bool only_young, bool claim)
4689         : _oop_closure(oop_closure),
4690           _oop_in_klass_closure(oop_closure->g1(),
4691                                 oop_closure->pss(),
4692                                 oop_closure->rp()),
4693           _klass_in_cld_closure(&_oop_in_klass_closure, only_young),
4694           _claim(claim) {
4695 
4696     }
4697 
4698     void do_cld(ClassLoaderData* cld) {
4699       cld->oops_do(_oop_closure, &_klass_in_cld_closure, _claim);
4700     }
4701   };
4702 
4703   void work(uint worker_id) {
4704     if (worker_id >= _n_workers) return;  // no work needed this round
4705 
4706     _g1h->g1_policy()->phase_times()->record_time_secs(G1GCPhaseTimes::GCWorkerStart, worker_id, os::elapsedTime());

4707 
4708     {
4709       ResourceMark rm;
4710       HandleMark   hm;
4711 
4712       ReferenceProcessor*             rp = _g1h->ref_processor_stw();
4713 
4714       G1ParScanThreadState            pss(_g1h, worker_id, rp);
4715       G1ParScanHeapEvacFailureClosure evac_failure_cl(_g1h, &pss, rp);
4716 
4717       pss.set_evac_failure_closure(&evac_failure_cl);
4718 
4719       bool only_young = _g1h->g1_policy()->gcs_are_young();
4720 
4721       // Non-IM young GC.
4722       G1ParCopyClosure<G1BarrierNone, G1MarkNone>             scan_only_root_cl(_g1h, &pss, rp);
4723       G1CLDClosure<G1MarkNone>                                scan_only_cld_cl(&scan_only_root_cl,
4724                                                                                only_young, // Only process dirty klasses.
4725                                                                                false);     // No need to claim CLDs.
4726       // IM young GC.


4766       }
4767 
4768 
4769       G1ParPushHeapRSClosure  push_heap_rs_cl(_g1h, &pss);
4770 
4771       pss.start_strong_roots();
4772       _g1h->g1_process_roots(strong_root_cl,
4773                              weak_root_cl,
4774                              &push_heap_rs_cl,
4775                              strong_cld_cl,
4776                              weak_cld_cl,
4777                              strong_code_cl,
4778                              worker_id);
4779 
4780       pss.end_strong_roots();
4781 
4782       {
4783         double start = os::elapsedTime();
4784         G1ParEvacuateFollowersClosure evac(_g1h, &pss, _queues, &_terminator);
4785         evac.do_void();
4786         double elapsed_sec = os::elapsedTime() - start;
4787         double term_sec = pss.term_time();
4788         _g1h->g1_policy()->phase_times()->add_time_secs(G1GCPhaseTimes::ObjCopy, worker_id, elapsed_sec - term_sec);
4789         _g1h->g1_policy()->phase_times()->record_time_secs(G1GCPhaseTimes::Termination, worker_id, term_sec);
4790         _g1h->g1_policy()->phase_times()->record_thread_work_item(G1GCPhaseTimes::Termination, worker_id, pss.term_attempts());
4791       }
4792       _g1h->g1_policy()->record_thread_age_table(pss.age_table());
4793       _g1h->update_surviving_young_words(pss.surviving_young_words()+1);
4794 
4795       if (ParallelGCVerbose) {
4796         MutexLocker x(stats_lock());
4797         pss.print_termination_stats(worker_id);
4798       }
4799 
4800       assert(pss.queue_is_empty(), "should be empty");
4801 
4802       // Close the inner scope so that the ResourceMark and HandleMark
4803       // destructors are executed here and are included as part of the
4804       // "GC Worker Time".
4805     }
4806     _g1h->g1_policy()->phase_times()->record_time_secs(G1GCPhaseTimes::GCWorkerEnd, worker_id, os::elapsedTime());


4807   }
4808 };
4809 
4810 // *** Common G1 Evacuation Stuff
4811 
4812 // This method is run in a GC worker.
4813 
4814 void
4815 G1CollectedHeap::
4816 g1_process_roots(OopClosure* scan_non_heap_roots,
4817                  OopClosure* scan_non_heap_weak_roots,
4818                  OopsInHeapRegionClosure* scan_rs,
4819                  CLDClosure* scan_strong_clds,
4820                  CLDClosure* scan_weak_clds,
4821                  CodeBlobClosure* scan_strong_code,
4822                  uint worker_i) {
4823 
4824   // First scan the shared roots.
4825   double ext_roots_start = os::elapsedTime();
4826   double closure_app_time_sec = 0.0;


4848     // until they can be processed at the end of marking.
4849     ref_processor_cm()->weak_oops_do(&buf_scan_non_heap_roots);
4850   }
4851 
4852   if (trace_metadata) {
4853     // Barrier to make sure all workers passed
4854     // the strong CLD and strong nmethods phases.
4855     active_strong_roots_scope()->wait_until_all_workers_done_with_threads(n_par_threads());
4856 
4857     // Now take the complement of the strong CLDs.
4858     ClassLoaderDataGraph::roots_cld_do(NULL, scan_weak_clds);
4859   }
4860 
4861   // Finish up any enqueued closure apps (attributed as object copy time).
4862   buf_scan_non_heap_roots.done();
4863   buf_scan_non_heap_weak_roots.done();
4864 
4865   double obj_copy_time_sec = buf_scan_non_heap_roots.closure_app_seconds()
4866       + buf_scan_non_heap_weak_roots.closure_app_seconds();
4867 
4868   g1_policy()->phase_times()->record_time_secs(G1GCPhaseTimes::ObjCopy, worker_i, obj_copy_time_sec);



4869 
4870   double ext_root_time_sec = os::elapsedTime() - ext_roots_start - obj_copy_time_sec;
4871   g1_policy()->phase_times()->record_time_secs(G1GCPhaseTimes::ExtRootScan, worker_i, ext_root_time_sec);
4872 
4873   // During conc marking we have to filter the per-thread SATB buffers
4874   // to make sure we remove any oops into the CSet (which will show up
4875   // as implicitly live).
4876   {
4877     G1GCParPhaseTimesTracker x(g1_policy()->phase_times(), G1GCPhaseTimes::SATBFiltering, worker_i);
4878     if (!_process_strong_tasks->is_task_claimed(G1H_PS_filter_satb_buffers) && mark_in_progress()) {


4879       JavaThread::satb_mark_queue_set().filter_thread_buffers();


4880     }
4881   }

4882 
4883   // Now scan the complement of the collection set.
4884   G1CodeBlobClosure scavenge_cs_nmethods(scan_non_heap_weak_roots);
4885 
4886   g1_rem_set()->oops_into_collection_set_do(scan_rs, &scavenge_cs_nmethods, worker_i);
4887 
4888   _process_strong_tasks->all_tasks_completed();
4889 }
4890 
4891 class G1StringSymbolTableUnlinkTask : public AbstractGangTask {
4892 private:
4893   BoolObjectClosure* _is_alive;
4894   int _initial_string_table_size;
4895   int _initial_symbol_table_size;
4896 
4897   bool  _process_strings;
4898   int _strings_processed;
4899   int _strings_removed;
4900 
4901   bool  _process_symbols;


5284       set_par_threads(n_workers);
5285       workers()->run_task(&g1_unlink_task);
5286       set_par_threads(0);
5287     } else {
5288       g1_unlink_task.work(0);
5289     }
5290   }
5291 
5292   if (G1StringDedup::is_enabled()) {
5293     G1StringDedup::unlink(is_alive);
5294   }
5295 }
5296 
5297 class G1RedirtyLoggedCardsTask : public AbstractGangTask {
5298  private:
5299   DirtyCardQueueSet* _queue;
5300  public:
5301   G1RedirtyLoggedCardsTask(DirtyCardQueueSet* queue) : AbstractGangTask("Redirty Cards"), _queue(queue) { }
5302 
5303   virtual void work(uint worker_id) {
5304     G1GCPhaseTimes* phase_times = G1CollectedHeap::heap()->g1_policy()->phase_times();
5305     G1GCParPhaseTimesTracker x(phase_times, G1GCPhaseTimes::RedirtyCards, worker_id);
5306 
5307     RedirtyLoggedCardTableEntryClosure cl;
5308     if (G1CollectedHeap::heap()->use_parallel_gc_threads()) {
5309       _queue->par_apply_closure_to_all_completed_buffers(&cl);
5310     } else {
5311       _queue->apply_closure_to_all_completed_buffers(&cl);
5312     }
5313 
5314     phase_times->record_thread_work_item(G1GCPhaseTimes::RedirtyCards, worker_id, cl.num_processed());


5315   }
5316 };
5317 
5318 void G1CollectedHeap::redirty_logged_cards() {
5319   double redirty_logged_cards_start = os::elapsedTime();
5320 
5321   uint n_workers = (G1CollectedHeap::use_parallel_gc_threads() ?
5322                    _g1h->workers()->active_workers() : 1);
5323 
5324   G1RedirtyLoggedCardsTask redirty_task(&dirty_card_queue_set());
5325   dirty_card_queue_set().reset_for_par_iteration();
5326   if (use_parallel_gc_threads()) {
5327     set_par_threads(n_workers);
5328     workers()->run_task(&redirty_task);
5329     set_par_threads(0);
5330   } else {
5331     redirty_task.work(0);
5332   }
5333 
5334   DirtyCardQueueSet& dcq = JavaThread::dirty_card_queue_set();


5896       // The individual threads will set their evac-failure closures.
5897       if (ParallelGCVerbose) G1ParScanThreadState::print_termination_stats_hdr();
5898       // These tasks use ShareHeap::_process_strong_tasks
5899       assert(UseDynamicNumberOfGCThreads ||
5900              workers()->active_workers() == workers()->total_workers(),
5901              "If not dynamic should be using all the  workers");
5902       workers()->run_task(&g1_par_task);
5903     } else {
5904       g1_par_task.set_for_termination(n_workers);
5905       g1_par_task.work(0);
5906     }
5907     end_par_time_sec = os::elapsedTime();
5908 
5909     // Closing the inner scope will execute the destructor
5910     // for the StrongRootsScope object. We record the current
5911     // elapsed time before closing the scope so that time
5912     // taken for the SRS destructor is NOT included in the
5913     // reported parallel time.
5914   }
5915 
5916   G1GCPhaseTimes* phase_times = g1_policy()->phase_times();
5917 
5918   double par_time_ms = (end_par_time_sec - start_par_time_sec) * 1000.0;
5919   phase_times->record_par_time(par_time_ms);
5920 
5921   double code_root_fixup_time_ms =
5922         (os::elapsedTime() - end_par_time_sec) * 1000.0;
5923   phase_times->record_code_root_fixup_time(code_root_fixup_time_ms);
5924 
5925   set_par_threads(0);
5926 
5927   // Process any discovered reference objects - we have
5928   // to do this _before_ we retire the GC alloc regions
5929   // as we may have to copy some 'reachable' referent
5930   // objects (and their reachable sub-graphs) that were
5931   // not copied during the pause.
5932   process_discovered_references(n_workers);
5933 
5934   if (G1StringDedup::is_enabled()) {
5935     double fixup_start = os::elapsedTime();
5936 
5937     G1STWIsAliveClosure is_alive(this);
5938     G1KeepAliveClosure keep_alive(this);
5939     G1StringDedup::unlink_or_oops_do(&is_alive, &keep_alive, true, phase_times);
5940 
5941     double fixup_time_ms = (os::elapsedTime() - fixup_start) * 1000.0;
5942     phase_times->record_string_dedup_fixup_time(fixup_time_ms);
5943   }
5944 
5945   _allocator->release_gc_alloc_regions(n_workers, evacuation_info);
5946   g1_rem_set()->cleanup_after_oops_into_collection_set_do();
5947 
5948   // Reset and re-enable the hot card cache.
5949   // Note the counts for the cards in the regions in the
5950   // collection set are reset when the collection set is freed.
5951   hot_card_cache->reset_hot_cache();
5952   hot_card_cache->set_use_cache(true);
5953 
5954   purge_code_root_memory();
5955 
5956   if (g1_policy()->during_initial_mark_pause()) {
5957     // Reset the claim values set during marking the strong code roots
5958     reset_heap_region_claim_values();
5959   }
5960 
5961   finalize_for_evac_failure();
5962 


< prev index next >