< prev index next >

src/hotspot/share/gc/g1/g1CollectedHeap.cpp

Print this page
rev 49826 : imported patch 6672778-partial-queue-trimming
rev 49827 : imported patch 6672778-refactoring
rev 49828 : imported patch 6672778-stefanj-review
rev 49831 : imported patch 8201492-properly-implement-non-contiguous-reference-processing
rev 49832 : imported patch 8201640-use-ref-processor-members-directly
rev 49833 : imported patch 8202018-move-card-table-clear
rev 49834 : [mq]: 8202021-cleanup-referenceprocessor
rev 49836 : [mq]: 8202017-reference-processor-remove-enqueue


3844 
3845   virtual void work(uint worker_id) {
3846     _enq_task.work(worker_id);
3847   }
3848 };
3849 
3850 // Driver routine for parallel reference enqueueing.
3851 // Creates an instance of the ref enqueueing gang
3852 // task and has the worker threads execute it.
3853 
3854 void G1STWRefProcTaskExecutor::execute(EnqueueTask& enq_task) {
3855   assert(_workers != NULL, "Need parallel worker threads.");
3856 
3857   G1STWRefEnqueueTaskProxy enq_task_proxy(enq_task);
3858 
3859   _workers->run_task(&enq_task_proxy);
3860 }
3861 
3862 // End of weak reference support closures
3863 
3864 // Weak Reference processing during an evacuation pause (part 1).
3865 void G1CollectedHeap::process_discovered_references(G1ParScanThreadStateSet* per_thread_states) {
3866   double ref_proc_start = os::elapsedTime();
3867 
3868   ReferenceProcessor* rp = _ref_processor_stw;
3869   assert(rp->discovery_enabled(), "should have been enabled");
3870 
3871   // Closure to test whether a referent is alive.
3872   G1STWIsAliveClosure is_alive(this);
3873 
3874   // Even when parallel reference processing is enabled, the processing
3875   // of JNI refs is serial and performed serially by the current thread
3876   // rather than by a worker. The following PSS will be used for processing
3877   // JNI refs.
3878 
3879   // Use only a single queue for this PSS.
3880   G1ParScanThreadState*          pss = per_thread_states->state_for_worker(0);
3881   pss->set_ref_processor(NULL);
3882   assert(pss->queue_is_empty(), "pre-condition");
3883 
3884   // Keep alive closure.


3904     uint no_of_gc_workers = workers()->active_workers();
3905 
3906     // Parallel reference processing
3907     assert(no_of_gc_workers <= rp->max_num_queues(),
3908            "Mismatch between the number of GC workers %u and the maximum number of Reference process queues %u",
3909            no_of_gc_workers,  rp->max_num_queues());
3910 
3911     G1STWRefProcTaskExecutor par_task_executor(this, per_thread_states, workers(), _task_queues, no_of_gc_workers);
3912     stats = rp->process_discovered_references(&is_alive,
3913                                               &keep_alive,
3914                                               &drain_queue,
3915                                               &par_task_executor,
3916                                               pt);
3917   }
3918 
3919   _gc_tracer_stw->report_gc_reference_stats(stats);
3920 
3921   // We have completed copying any necessary live referent objects.
3922   assert(pss->queue_is_empty(), "both queue and overflow should be empty");
3923 




3924   double ref_proc_time = os::elapsedTime() - ref_proc_start;
3925   g1_policy()->phase_times()->record_ref_proc_time(ref_proc_time * 1000.0);
3926 }
3927 
3928 // Weak Reference processing during an evacuation pause (part 2).
3929 void G1CollectedHeap::enqueue_discovered_references(G1ParScanThreadStateSet* per_thread_states) {
3930   double ref_enq_start = os::elapsedTime();
3931 
3932   ReferenceProcessor* rp = _ref_processor_stw;
3933   assert(!rp->discovery_enabled(), "should have been disabled as part of processing");
3934 
3935   ReferenceProcessorPhaseTimes* pt = g1_policy()->phase_times()->ref_phase_times();
3936 
3937   // Now enqueue any remaining on the discovered lists on to
3938   // the pending list.
3939   if (!rp->processing_is_mt()) {
3940     // Serial reference processing...
3941     rp->enqueue_discovered_references(NULL, pt);
3942   } else {
3943     // Parallel reference enqueueing
3944 
3945     uint n_workers = workers()->active_workers();
3946 
3947     assert(n_workers <= rp->max_num_queues(),
3948            "Mismatch between the number of GC workers %u and the maximum number of Reference process queues %u",
3949            n_workers,  rp->max_num_queues());
3950 
3951     G1STWRefProcTaskExecutor par_task_executor(this, per_thread_states, workers(), _task_queues, n_workers);
3952     rp->enqueue_discovered_references(&par_task_executor, pt);
3953   }
3954 
3955   rp->verify_no_references_recorded();
3956   assert(!rp->discovery_enabled(), "should have been disabled");
3957 
3958   // If during an initial mark pause we install a pending list head which is not otherwise reachable
3959   // ensure that it is marked in the bitmap for concurrent marking to discover.
3960   if (collector_state()->in_initial_mark_gc()) {
3961     oop pll_head = Universe::reference_pending_list();
3962     if (pll_head != NULL) {
3963       // Any valid worker id is fine here as we are in the VM thread and single-threaded.
3964       _cm->mark_in_next_bitmap(0 /* worker_id */, pll_head);
3965     }
3966   }
3967 
3968   // FIXME
3969   // CM's reference processing also cleans up the string and symbol tables.
3970   // Should we do that here also? We could, but it is a serial operation
3971   // and could significantly increase the pause time.
3972 
3973   double ref_enq_time = os::elapsedTime() - ref_enq_start;
3974   g1_policy()->phase_times()->record_ref_enq_time(ref_enq_time * 1000.0);
3975 }
3976 
3977 void G1CollectedHeap::merge_per_thread_state_info(G1ParScanThreadStateSet* per_thread_states) {
3978   double merge_pss_time_start = os::elapsedTime();
3979   per_thread_states->flush();
3980   g1_policy()->phase_times()->record_merge_pss_time_ms((os::elapsedTime() - merge_pss_time_start) * 1000.0);
3981 }
3982 
3983 void G1CollectedHeap::pre_evacuate_collection_set() {
3984   _expand_heap_after_alloc_failure = true;
3985   _evacuation_failed = false;
3986 
3987   // Disable the hot card cache.
3988   _hot_card_cache->reset_hot_cache_claimed_index();
3989   _hot_card_cache->set_use_cache(false);
3990 
3991   g1_rem_set()->prepare_for_oops_into_collection_set_do();
3992   _preserved_marks_set.assert_empty();
3993 
3994   G1GCPhaseTimes* phase_times = g1_policy()->phase_times();


4034 
4035   double par_time_ms = (end_par_time_sec - start_par_time_sec) * 1000.0;
4036   phase_times->record_par_time(par_time_ms);
4037 
4038   double code_root_fixup_time_ms =
4039         (os::elapsedTime() - end_par_time_sec) * 1000.0;
4040   phase_times->record_code_root_fixup_time(code_root_fixup_time_ms);
4041 }
4042 
4043 void G1CollectedHeap::post_evacuate_collection_set(EvacuationInfo& evacuation_info, G1ParScanThreadStateSet* per_thread_states) {
4044   // Also cleans the card table from temporary duplicate detection information used
4045   // during UpdateRS/ScanRS.
4046   g1_rem_set()->cleanup_after_oops_into_collection_set_do();
4047 
4048   // Process any discovered reference objects - we have
4049   // to do this _before_ we retire the GC alloc regions
4050   // as we may have to copy some 'reachable' referent
4051   // objects (and their reachable sub-graphs) that were
4052   // not copied during the pause.
4053   process_discovered_references(per_thread_states);
4054   enqueue_discovered_references(per_thread_states);




4055 
4056   G1STWIsAliveClosure is_alive(this);
4057   G1KeepAliveClosure keep_alive(this);
4058 
4059   {
4060     double start = os::elapsedTime();
4061 
4062     WeakProcessor::weak_oops_do(&is_alive, &keep_alive);
4063 
4064     double time_ms = (os::elapsedTime() - start) * 1000.0;
4065     g1_policy()->phase_times()->record_weak_ref_proc_time(time_ms);
4066   }
4067 
4068   if (G1StringDedup::is_enabled()) {
4069     double fixup_start = os::elapsedTime();
4070 
4071     G1StringDedup::unlink_or_oops_do(&is_alive, &keep_alive, true, g1_policy()->phase_times());
4072 
4073     double fixup_time_ms = (os::elapsedTime() - fixup_start) * 1000.0;
4074     g1_policy()->phase_times()->record_string_dedup_fixup_time(fixup_time_ms);




3844 
3845   virtual void work(uint worker_id) {
3846     _enq_task.work(worker_id);
3847   }
3848 };
3849 
3850 // Driver routine for parallel reference enqueueing.
3851 // Creates an instance of the ref enqueueing gang
3852 // task and has the worker threads execute it.
3853 
3854 void G1STWRefProcTaskExecutor::execute(EnqueueTask& enq_task) {
3855   assert(_workers != NULL, "Need parallel worker threads.");
3856 
3857   G1STWRefEnqueueTaskProxy enq_task_proxy(enq_task);
3858 
3859   _workers->run_task(&enq_task_proxy);
3860 }
3861 
3862 // End of weak reference support closures
3863 

3864 void G1CollectedHeap::process_discovered_references(G1ParScanThreadStateSet* per_thread_states) {
3865   double ref_proc_start = os::elapsedTime();
3866 
3867   ReferenceProcessor* rp = _ref_processor_stw;
3868   assert(rp->discovery_enabled(), "should have been enabled");
3869 
3870   // Closure to test whether a referent is alive.
3871   G1STWIsAliveClosure is_alive(this);
3872 
3873   // Even when parallel reference processing is enabled, the processing
3874   // of JNI refs is serial and performed serially by the current thread
3875   // rather than by a worker. The following PSS will be used for processing
3876   // JNI refs.
3877 
3878   // Use only a single queue for this PSS.
3879   G1ParScanThreadState*          pss = per_thread_states->state_for_worker(0);
3880   pss->set_ref_processor(NULL);
3881   assert(pss->queue_is_empty(), "pre-condition");
3882 
3883   // Keep alive closure.


3903     uint no_of_gc_workers = workers()->active_workers();
3904 
3905     // Parallel reference processing
3906     assert(no_of_gc_workers <= rp->max_num_queues(),
3907            "Mismatch between the number of GC workers %u and the maximum number of Reference process queues %u",
3908            no_of_gc_workers,  rp->max_num_queues());
3909 
3910     G1STWRefProcTaskExecutor par_task_executor(this, per_thread_states, workers(), _task_queues, no_of_gc_workers);
3911     stats = rp->process_discovered_references(&is_alive,
3912                                               &keep_alive,
3913                                               &drain_queue,
3914                                               &par_task_executor,
3915                                               pt);
3916   }
3917 
3918   _gc_tracer_stw->report_gc_reference_stats(stats);
3919 
3920   // We have completed copying any necessary live referent objects.
3921   assert(pss->queue_is_empty(), "both queue and overflow should be empty");
3922 
3923   make_pending_list_reachable();
3924   
3925   rp->verify_no_references_recorded();
3926 
3927   double ref_proc_time = os::elapsedTime() - ref_proc_start;
3928   g1_policy()->phase_times()->record_ref_proc_time(ref_proc_time * 1000.0);
3929 }
3930 
3931 void G1CollectedHeap::make_pending_list_reachable() {































3932   if (collector_state()->in_initial_mark_gc()) {
3933     oop pll_head = Universe::reference_pending_list();
3934     if (pll_head != NULL) {
3935       // Any valid worker id is fine here as we are in the VM thread and single-threaded.
3936       _cm->mark_in_next_bitmap(0 /* worker_id */, pll_head);
3937     }
3938   }








3939 }
3940 
3941 void G1CollectedHeap::merge_per_thread_state_info(G1ParScanThreadStateSet* per_thread_states) {
3942   double merge_pss_time_start = os::elapsedTime();
3943   per_thread_states->flush();
3944   g1_policy()->phase_times()->record_merge_pss_time_ms((os::elapsedTime() - merge_pss_time_start) * 1000.0);
3945 }
3946 
3947 void G1CollectedHeap::pre_evacuate_collection_set() {
3948   _expand_heap_after_alloc_failure = true;
3949   _evacuation_failed = false;
3950 
3951   // Disable the hot card cache.
3952   _hot_card_cache->reset_hot_cache_claimed_index();
3953   _hot_card_cache->set_use_cache(false);
3954 
3955   g1_rem_set()->prepare_for_oops_into_collection_set_do();
3956   _preserved_marks_set.assert_empty();
3957 
3958   G1GCPhaseTimes* phase_times = g1_policy()->phase_times();


3998 
3999   double par_time_ms = (end_par_time_sec - start_par_time_sec) * 1000.0;
4000   phase_times->record_par_time(par_time_ms);
4001 
4002   double code_root_fixup_time_ms =
4003         (os::elapsedTime() - end_par_time_sec) * 1000.0;
4004   phase_times->record_code_root_fixup_time(code_root_fixup_time_ms);
4005 }
4006 
4007 void G1CollectedHeap::post_evacuate_collection_set(EvacuationInfo& evacuation_info, G1ParScanThreadStateSet* per_thread_states) {
4008   // Also cleans the card table from temporary duplicate detection information used
4009   // during UpdateRS/ScanRS.
4010   g1_rem_set()->cleanup_after_oops_into_collection_set_do();
4011 
4012   // Process any discovered reference objects - we have
4013   // to do this _before_ we retire the GC alloc regions
4014   // as we may have to copy some 'reachable' referent
4015   // objects (and their reachable sub-graphs) that were
4016   // not copied during the pause.
4017   process_discovered_references(per_thread_states);
4018 
4019   // FIXME
4020   // CM's reference processing also cleans up the string and symbol tables.
4021   // Should we do that here also? We could, but it is a serial operation
4022   // and could significantly increase the pause time.
4023 
4024   G1STWIsAliveClosure is_alive(this);
4025   G1KeepAliveClosure keep_alive(this);
4026 
4027   {
4028     double start = os::elapsedTime();
4029 
4030     WeakProcessor::weak_oops_do(&is_alive, &keep_alive);
4031 
4032     double time_ms = (os::elapsedTime() - start) * 1000.0;
4033     g1_policy()->phase_times()->record_weak_ref_proc_time(time_ms);
4034   }
4035 
4036   if (G1StringDedup::is_enabled()) {
4037     double fixup_start = os::elapsedTime();
4038 
4039     G1StringDedup::unlink_or_oops_do(&is_alive, &keep_alive, true, g1_policy()->phase_times());
4040 
4041     double fixup_time_ms = (os::elapsedTime() - fixup_start) * 1000.0;
4042     g1_policy()->phase_times()->record_string_dedup_fixup_time(fixup_time_ms);


< prev index next >