4823 true); // Need to claim CLDs.
4824 // Weak roots closures.
4825 G1ParCopyClosure<G1BarrierNone, G1MarkPromotedFromRoot> scan_mark_weak_root_cl(_g1h, &pss, rp);
4826 G1CLDClosure<G1MarkPromotedFromRoot> scan_mark_weak_cld_cl(&scan_mark_weak_root_cl,
4827 false, // Process all klasses.
4828 true); // Need to claim CLDs.
4829
4830 G1CodeBlobClosure scan_only_code_cl(&scan_only_root_cl);
4831 G1CodeBlobClosure scan_mark_code_cl(&scan_mark_root_cl);
4832 // IM Weak code roots are handled later.
4833
4834 OopClosure* strong_root_cl;
4835 OopClosure* weak_root_cl;
4836 CLDClosure* strong_cld_cl;
4837 CLDClosure* weak_cld_cl;
4838 CodeBlobClosure* strong_code_cl;
4839
4840 if (_g1h->g1_policy()->during_initial_mark_pause()) {
4841 // We also need to mark copied objects.
4842 strong_root_cl = &scan_mark_root_cl;
4843 weak_root_cl = &scan_mark_weak_root_cl;
4844 strong_cld_cl = &scan_mark_cld_cl;
4845 weak_cld_cl = &scan_mark_weak_cld_cl;
4846 strong_code_cl = &scan_mark_code_cl;
4847 } else {
4848 strong_root_cl = &scan_only_root_cl;
4849 weak_root_cl = &scan_only_root_cl;
4850 strong_cld_cl = &scan_only_cld_cl;
4851 weak_cld_cl = &scan_only_cld_cl;
4852 strong_code_cl = &scan_only_code_cl;
4853 }
4854
4855
4856 G1ParPushHeapRSClosure push_heap_rs_cl(_g1h, &pss);
4857
4858 pss.start_strong_roots();
4859 _g1h->g1_process_roots(strong_root_cl,
4860 weak_root_cl,
4861 &push_heap_rs_cl,
4862 strong_cld_cl,
4863 weak_cld_cl,
4864 strong_code_cl,
4865 worker_id);
4866
4897
4898 // *** Common G1 Evacuation Stuff
4899
4900 // This method is run in a GC worker.
4901
4902 void
4903 G1CollectedHeap::
4904 g1_process_roots(OopClosure* scan_non_heap_roots,
4905 OopClosure* scan_non_heap_weak_roots,
4906 OopsInHeapRegionClosure* scan_rs,
4907 CLDClosure* scan_strong_clds,
4908 CLDClosure* scan_weak_clds,
4909 CodeBlobClosure* scan_strong_code,
4910 uint worker_i) {
4911
4912 // First scan the shared roots.
4913 double ext_roots_start = os::elapsedTime();
4914 double closure_app_time_sec = 0.0;
4915
4916 bool during_im = _g1h->g1_policy()->during_initial_mark_pause();
4917
4918 BufferingOopClosure buf_scan_non_heap_roots(scan_non_heap_roots);
4919 BufferingOopClosure buf_scan_non_heap_weak_roots(scan_non_heap_weak_roots);
4920
4921 process_roots(false, // no scoping; this is parallel code
4922 SharedHeap::SO_None,
4923 &buf_scan_non_heap_roots,
4924 &buf_scan_non_heap_weak_roots,
4925 scan_strong_clds,
4926 // Initial Mark handles the weak CLDs separately.
4927 (during_im ? NULL : scan_weak_clds),
4928 scan_strong_code);
4929
4930 // Now the CM ref_processor roots.
4931 if (!_process_strong_tasks->is_task_claimed(G1H_PS_refProcessor_oops_do)) {
4932 // We need to treat the discovered reference lists of the
4933 // concurrent mark ref processor as roots and keep entries
4934 // (which are added by the marking threads) on them live
4935 // until they can be processed at the end of marking.
4936 ref_processor_cm()->weak_oops_do(&buf_scan_non_heap_roots);
4937 }
4938
4939 if (during_im) {
4940 // Barrier to make sure all workers passed
4941 // the strong CLD and strong nmethods phases.
4942 active_strong_roots_scope()->wait_until_all_workers_done_with_threads(n_par_threads());
4943
4944 // Now take the complement of the strong CLDs.
4945 ClassLoaderDataGraph::roots_cld_do(NULL, scan_weak_clds);
4946 }
4947
4948 // Finish up any enqueued closure apps (attributed as object copy time).
4949 buf_scan_non_heap_roots.done();
4950 buf_scan_non_heap_weak_roots.done();
4951
4952 double obj_copy_time_sec = buf_scan_non_heap_roots.closure_app_seconds()
4953 + buf_scan_non_heap_weak_roots.closure_app_seconds();
4954
4955 g1_policy()->phase_times()->record_obj_copy_time(worker_i, obj_copy_time_sec * 1000.0);
4956
4957 double ext_root_time_ms =
4958 ((os::elapsedTime() - ext_roots_start) - obj_copy_time_sec) * 1000.0;
4959
5936 set_par_threads(n_workers);
5937 } else {
5938 assert(n_par_threads() == 0,
5939 "Should be the original non-parallel value");
5940 n_workers = 1;
5941 }
5942
5943 G1ParTask g1_par_task(this, _task_queues);
5944
5945 init_for_evac_failure(NULL);
5946
5947 rem_set()->prepare_for_younger_refs_iterate(true);
5948
5949 assert(dirty_card_queue_set().completed_buffers_num() == 0, "Should be empty");
5950 double start_par_time_sec = os::elapsedTime();
5951 double end_par_time_sec;
5952
5953 {
5954 StrongRootsScope srs(this);
5955 // InitialMark needs claim bits to keep track of the marked-through CLDs.
5956 if (g1_policy()->during_initial_mark_pause()) {
5957 ClassLoaderDataGraph::clear_claimed_marks();
5958 }
5959
5960 if (G1CollectedHeap::use_parallel_gc_threads()) {
5961 // The individual threads will set their evac-failure closures.
5962 if (ParallelGCVerbose) G1ParScanThreadState::print_termination_stats_hdr();
5963 // These tasks use ShareHeap::_process_strong_tasks
5964 assert(UseDynamicNumberOfGCThreads ||
5965 workers()->active_workers() == workers()->total_workers(),
5966 "If not dynamic should be using all the workers");
5967 workers()->run_task(&g1_par_task);
5968 } else {
5969 g1_par_task.set_for_termination(n_workers);
5970 g1_par_task.work(0);
5971 }
5972 end_par_time_sec = os::elapsedTime();
5973
5974 // Closing the inner scope will execute the destructor
5975 // for the StrongRootsScope object. We record the current
|
4823 true); // Need to claim CLDs.
4824 // Weak roots closures.
4825 G1ParCopyClosure<G1BarrierNone, G1MarkPromotedFromRoot> scan_mark_weak_root_cl(_g1h, &pss, rp);
4826 G1CLDClosure<G1MarkPromotedFromRoot> scan_mark_weak_cld_cl(&scan_mark_weak_root_cl,
4827 false, // Process all klasses.
4828 true); // Need to claim CLDs.
4829
4830 G1CodeBlobClosure scan_only_code_cl(&scan_only_root_cl);
4831 G1CodeBlobClosure scan_mark_code_cl(&scan_mark_root_cl);
4832 // IM Weak code roots are handled later.
4833
4834 OopClosure* strong_root_cl;
4835 OopClosure* weak_root_cl;
4836 CLDClosure* strong_cld_cl;
4837 CLDClosure* weak_cld_cl;
4838 CodeBlobClosure* strong_code_cl;
4839
4840 if (_g1h->g1_policy()->during_initial_mark_pause()) {
4841 // We also need to mark copied objects.
4842 strong_root_cl = &scan_mark_root_cl;
4843 strong_cld_cl = &scan_mark_cld_cl;
4844 strong_code_cl = &scan_mark_code_cl;
4845 if (G1ClassUnloadingEnabled) {
4846 weak_root_cl = &scan_mark_weak_root_cl;
4847 weak_cld_cl = &scan_mark_weak_cld_cl;
4848 } else {
4849 weak_root_cl = &scan_mark_root_cl;
4850 weak_cld_cl = &scan_mark_cld_cl;
4851 }
4852 } else {
4853 strong_root_cl = &scan_only_root_cl;
4854 weak_root_cl = &scan_only_root_cl;
4855 strong_cld_cl = &scan_only_cld_cl;
4856 weak_cld_cl = &scan_only_cld_cl;
4857 strong_code_cl = &scan_only_code_cl;
4858 }
4859
4860
4861 G1ParPushHeapRSClosure push_heap_rs_cl(_g1h, &pss);
4862
4863 pss.start_strong_roots();
4864 _g1h->g1_process_roots(strong_root_cl,
4865 weak_root_cl,
4866 &push_heap_rs_cl,
4867 strong_cld_cl,
4868 weak_cld_cl,
4869 strong_code_cl,
4870 worker_id);
4871
4902
4903 // *** Common G1 Evacuation Stuff
4904
4905 // This method is run in a GC worker.
4906
4907 void
4908 G1CollectedHeap::
4909 g1_process_roots(OopClosure* scan_non_heap_roots,
4910 OopClosure* scan_non_heap_weak_roots,
4911 OopsInHeapRegionClosure* scan_rs,
4912 CLDClosure* scan_strong_clds,
4913 CLDClosure* scan_weak_clds,
4914 CodeBlobClosure* scan_strong_code,
4915 uint worker_i) {
4916
4917 // First scan the shared roots.
4918 double ext_roots_start = os::elapsedTime();
4919 double closure_app_time_sec = 0.0;
4920
4921 bool during_im = _g1h->g1_policy()->during_initial_mark_pause();
4922 bool trace_metadata = during_im && G1ClassUnloadingEnabled;
4923
4924 BufferingOopClosure buf_scan_non_heap_roots(scan_non_heap_roots);
4925 BufferingOopClosure buf_scan_non_heap_weak_roots(scan_non_heap_weak_roots);
4926
4927 process_roots(false, // no scoping; this is parallel code
4928 SharedHeap::SO_None,
4929 &buf_scan_non_heap_roots,
4930 &buf_scan_non_heap_weak_roots,
4931 scan_strong_clds,
4932 // Unloading Initial Marks handle the weak CLDs separately.
4933 (trace_metadata ? NULL : scan_weak_clds),
4934 scan_strong_code);
4935
4936 // Now the CM ref_processor roots.
4937 if (!_process_strong_tasks->is_task_claimed(G1H_PS_refProcessor_oops_do)) {
4938 // We need to treat the discovered reference lists of the
4939 // concurrent mark ref processor as roots and keep entries
4940 // (which are added by the marking threads) on them live
4941 // until they can be processed at the end of marking.
4942 ref_processor_cm()->weak_oops_do(&buf_scan_non_heap_roots);
4943 }
4944
4945 if (trace_metadata) {
4946 // Barrier to make sure all workers passed
4947 // the strong CLD and strong nmethods phases.
4948 active_strong_roots_scope()->wait_until_all_workers_done_with_threads(n_par_threads());
4949
4950 // Now take the complement of the strong CLDs.
4951 ClassLoaderDataGraph::roots_cld_do(NULL, scan_weak_clds);
4952 }
4953
4954 // Finish up any enqueued closure apps (attributed as object copy time).
4955 buf_scan_non_heap_roots.done();
4956 buf_scan_non_heap_weak_roots.done();
4957
4958 double obj_copy_time_sec = buf_scan_non_heap_roots.closure_app_seconds()
4959 + buf_scan_non_heap_weak_roots.closure_app_seconds();
4960
4961 g1_policy()->phase_times()->record_obj_copy_time(worker_i, obj_copy_time_sec * 1000.0);
4962
4963 double ext_root_time_ms =
4964 ((os::elapsedTime() - ext_roots_start) - obj_copy_time_sec) * 1000.0;
4965
5942 set_par_threads(n_workers);
5943 } else {
5944 assert(n_par_threads() == 0,
5945 "Should be the original non-parallel value");
5946 n_workers = 1;
5947 }
5948
5949 G1ParTask g1_par_task(this, _task_queues);
5950
5951 init_for_evac_failure(NULL);
5952
5953 rem_set()->prepare_for_younger_refs_iterate(true);
5954
5955 assert(dirty_card_queue_set().completed_buffers_num() == 0, "Should be empty");
5956 double start_par_time_sec = os::elapsedTime();
5957 double end_par_time_sec;
5958
5959 {
5960 StrongRootsScope srs(this);
5961 // InitialMark needs claim bits to keep track of the marked-through CLDs.
5962 // FIXME: Needed?
5963 if (g1_policy()->during_initial_mark_pause()) {
5964 ClassLoaderDataGraph::clear_claimed_marks();
5965 }
5966
5967 if (G1CollectedHeap::use_parallel_gc_threads()) {
5968 // The individual threads will set their evac-failure closures.
5969 if (ParallelGCVerbose) G1ParScanThreadState::print_termination_stats_hdr();
5970 // These tasks use ShareHeap::_process_strong_tasks
5971 assert(UseDynamicNumberOfGCThreads ||
5972 workers()->active_workers() == workers()->total_workers(),
5973 "If not dynamic should be using all the workers");
5974 workers()->run_task(&g1_par_task);
5975 } else {
5976 g1_par_task.set_for_termination(n_workers);
5977 g1_par_task.work(0);
5978 }
5979 end_par_time_sec = os::elapsedTime();
5980
5981 // Closing the inner scope will execute the destructor
5982 // for the StrongRootsScope object. We record the current
|