11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "precompiled.hpp"
26
27 #include "aot/aotLoader.hpp"
28 #include "classfile/stringTable.hpp"
29 #include "classfile/systemDictionary.hpp"
30 #include "code/codeCache.hpp"
31 #include "gc/g1/bufferingOopClosure.hpp"
32 #include "gc/g1/g1BarrierSet.hpp"
33 #include "gc/g1/g1CodeBlobClosure.hpp"
34 #include "gc/g1/g1CollectedHeap.inline.hpp"
35 #include "gc/g1/g1CollectorState.hpp"
36 #include "gc/g1/g1GCPhaseTimes.hpp"
37 #include "gc/g1/g1Policy.hpp"
38 #include "gc/g1/g1RootClosures.hpp"
39 #include "gc/g1/g1RootProcessor.hpp"
40 #include "gc/g1/heapRegion.inline.hpp"
41 #include "gc/shared/weakProcessor.hpp"
42 #include "memory/allocation.inline.hpp"
43 #include "runtime/mutex.hpp"
44 #include "services/management.hpp"
45 #include "utilities/macros.hpp"
46
47 void G1RootProcessor::worker_has_discovered_all_strong_classes() {
48 assert(ClassUnloadingWithConcurrentMark, "Currently only needed when doing G1 Class Unloading");
49
50 uint new_value = (uint)Atomic::add(1, &_n_workers_discovered_strong_classes);
51 if (new_value == n_workers()) {
52 // This thread is last. Notify the others.
53 MonitorLockerEx ml(&_lock, Mutex::_no_safepoint_check_flag);
54 _lock.notify_all();
55 }
56 }
57
58 void G1RootProcessor::wait_until_all_strong_classes_discovered() {
59 assert(ClassUnloadingWithConcurrentMark, "Currently only needed when doing G1 Class Unloading");
60
61 if ((uint)_n_workers_discovered_strong_classes != n_workers()) {
62 MonitorLockerEx ml(&_lock, Mutex::_no_safepoint_check_flag);
63 while ((uint)_n_workers_discovered_strong_classes != n_workers()) {
64 _lock.wait(Mutex::_no_safepoint_check_flag, 0, false);
65 }
66 }
67 }
68
69 G1RootProcessor::G1RootProcessor(G1CollectedHeap* g1h, uint n_workers) :
70 _g1h(g1h),
71 _process_strong_tasks(G1RP_PS_NumElements),
72 _srs(n_workers),
73 _lock(Mutex::leaf, "G1 Root Scanning barrier lock", false, Monitor::_safepoint_check_never),
74 _n_workers_discovered_strong_classes(0) {}
75
76 void G1RootProcessor::evacuate_roots(G1EvacuationRootClosures* closures, uint worker_i) {
77 double ext_roots_start = os::elapsedTime();
78 G1GCPhaseTimes* phase_times = _g1h->g1_policy()->phase_times();
79
80 process_java_roots(closures, phase_times, worker_i);
81
82 // This is the point where this worker thread will not find more strong CLDs/nmethods.
83 // Report this so G1 can synchronize the strong and weak CLDs/nmethods processing.
84 if (closures->trace_metadata()) {
85 worker_has_discovered_all_strong_classes();
86 }
87
88 process_vm_roots(closures, phase_times, worker_i);
89 process_string_table_roots(closures, phase_times, worker_i);
90
91 {
92 // Now the CM ref_processor roots.
93 G1GCParPhaseTimesTracker x(phase_times, G1GCPhaseTimes::CMRefRoots, worker_i);
94 if (!_process_strong_tasks.is_task_claimed(G1RP_PS_refProcessor_oops_do)) {
95 // We need to treat the discovered reference lists of the
96 // concurrent mark ref processor as roots and keep entries
97 // (which are added by the marking threads) on them live
98 // until they can be processed at the end of marking.
99 _g1h->ref_processor_cm()->weak_oops_do(closures->strong_oops());
100 }
101 }
102
103 if (closures->trace_metadata()) {
104 {
105 G1GCParPhaseTimesTracker x(phase_times, G1GCPhaseTimes::WaitForStrongCLD, worker_i);
106 // Barrier to make sure all workers passed
107 // the strong CLD and strong nmethods phases.
108 wait_until_all_strong_classes_discovered();
109 }
110
111 // Now take the complement of the strong CLDs.
112 G1GCParPhaseTimesTracker x(phase_times, G1GCPhaseTimes::WeakCLDRoots, worker_i);
113 assert(closures->second_pass_weak_clds() != NULL, "Should be non-null if we are tracing metadata.");
114 ClassLoaderDataGraph::roots_cld_do(NULL, closures->second_pass_weak_clds());
115 } else {
116 phase_times->record_time_secs(G1GCPhaseTimes::WaitForStrongCLD, worker_i, 0.0);
117 phase_times->record_time_secs(G1GCPhaseTimes::WeakCLDRoots, worker_i, 0.0);
118 assert(closures->second_pass_weak_clds() == NULL, "Should be null if not tracing metadata.");
119 }
120
121 // Finish up any enqueued closure apps (attributed as object copy time).
122 closures->flush();
123
124 double obj_copy_time_sec = closures->closure_app_seconds();
125
126 phase_times->record_time_secs(G1GCPhaseTimes::ObjCopy, worker_i, obj_copy_time_sec);
127
128 double ext_root_time_sec = os::elapsedTime() - ext_roots_start - obj_copy_time_sec;
129
130 phase_times->record_time_secs(G1GCPhaseTimes::ExtRootScan, worker_i, ext_root_time_sec);
131
132 // During conc marking we have to filter the per-thread SATB buffers
133 // to make sure we remove any oops into the CSet (which will show up
134 // as implicitly live).
135 {
136 G1GCParPhaseTimesTracker x(phase_times, G1GCPhaseTimes::SATBFiltering, worker_i);
137 if (!_process_strong_tasks.is_task_claimed(G1RP_PS_filter_satb_buffers) && _g1h->collector_state()->mark_or_rebuild_in_progress()) {
138 G1BarrierSet::satb_mark_queue_set().filter_thread_buffers();
139 }
140 }
141
142 _process_strong_tasks.all_tasks_completed(n_workers());
143 }
144
145 // Adaptor to pass the closures to the strong roots in the VM.
146 class StrongRootsClosures : public G1RootClosures {
147 OopClosure* _roots;
148 CLDClosure* _clds;
149 CodeBlobClosure* _blobs;
150 public:
|
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "precompiled.hpp"
26
27 #include "aot/aotLoader.hpp"
28 #include "classfile/stringTable.hpp"
29 #include "classfile/systemDictionary.hpp"
30 #include "code/codeCache.hpp"
31 #include "gc/g1/g1BarrierSet.hpp"
32 #include "gc/g1/g1CodeBlobClosure.hpp"
33 #include "gc/g1/g1CollectedHeap.inline.hpp"
34 #include "gc/g1/g1CollectorState.hpp"
35 #include "gc/g1/g1GCPhaseTimes.hpp"
36 #include "gc/g1/g1ParScanThreadState.inline.hpp"
37 #include "gc/g1/g1Policy.hpp"
38 #include "gc/g1/g1RootClosures.hpp"
39 #include "gc/g1/g1RootProcessor.hpp"
40 #include "gc/g1/heapRegion.inline.hpp"
41 #include "gc/shared/weakProcessor.hpp"
42 #include "memory/allocation.inline.hpp"
43 #include "runtime/mutex.hpp"
44 #include "services/management.hpp"
45 #include "utilities/macros.hpp"
46
47 void G1RootProcessor::worker_has_discovered_all_strong_classes() {
48 assert(ClassUnloadingWithConcurrentMark, "Currently only needed when doing G1 Class Unloading");
49
50 uint new_value = (uint)Atomic::add(1, &_n_workers_discovered_strong_classes);
51 if (new_value == n_workers()) {
52 // This thread is last. Notify the others.
53 MonitorLockerEx ml(&_lock, Mutex::_no_safepoint_check_flag);
54 _lock.notify_all();
55 }
56 }
57
58 void G1RootProcessor::wait_until_all_strong_classes_discovered() {
59 assert(ClassUnloadingWithConcurrentMark, "Currently only needed when doing G1 Class Unloading");
60
61 if ((uint)_n_workers_discovered_strong_classes != n_workers()) {
62 MonitorLockerEx ml(&_lock, Mutex::_no_safepoint_check_flag);
63 while ((uint)_n_workers_discovered_strong_classes != n_workers()) {
64 _lock.wait(Mutex::_no_safepoint_check_flag, 0, false);
65 }
66 }
67 }
68
69 G1RootProcessor::G1RootProcessor(G1CollectedHeap* g1h, uint n_workers) :
70 _g1h(g1h),
71 _process_strong_tasks(G1RP_PS_NumElements),
72 _srs(n_workers),
73 _lock(Mutex::leaf, "G1 Root Scanning barrier lock", false, Monitor::_safepoint_check_never),
74 _n_workers_discovered_strong_classes(0) {}
75
76 void G1RootProcessor::evacuate_roots(G1ParScanThreadState* pss, uint worker_i) {
77 G1GCPhaseTimes* phase_times = _g1h->g1_policy()->phase_times();
78
79 G1EvacPhaseTimesTracker timer(phase_times, pss, G1GCPhaseTimes::ExtRootScan, worker_i);
80
81 G1EvacuationRootClosures* closures = pss->closures();
82 process_java_roots(closures, phase_times, worker_i);
83
84 // This is the point where this worker thread will not find more strong CLDs/nmethods.
85 // Report this so G1 can synchronize the strong and weak CLDs/nmethods processing.
86 if (closures->trace_metadata()) {
87 worker_has_discovered_all_strong_classes();
88 }
89
90 process_vm_roots(closures, phase_times, worker_i);
91 process_string_table_roots(closures, phase_times, worker_i);
92
93 {
94 // Now the CM ref_processor roots.
95 G1GCParPhaseTimesTracker x(phase_times, G1GCPhaseTimes::CMRefRoots, worker_i);
96 if (!_process_strong_tasks.is_task_claimed(G1RP_PS_refProcessor_oops_do)) {
97 // We need to treat the discovered reference lists of the
98 // concurrent mark ref processor as roots and keep entries
99 // (which are added by the marking threads) on them live
100 // until they can be processed at the end of marking.
101 _g1h->ref_processor_cm()->weak_oops_do(closures->strong_oops());
102 }
103 }
104
105 if (closures->trace_metadata()) {
106 {
107 G1GCParPhaseTimesTracker x(phase_times, G1GCPhaseTimes::WaitForStrongCLD, worker_i);
108 // Barrier to make sure all workers passed
109 // the strong CLD and strong nmethods phases.
110 wait_until_all_strong_classes_discovered();
111 }
112
113 // Now take the complement of the strong CLDs.
114 G1GCParPhaseTimesTracker x(phase_times, G1GCPhaseTimes::WeakCLDRoots, worker_i);
115 assert(closures->second_pass_weak_clds() != NULL, "Should be non-null if we are tracing metadata.");
116 ClassLoaderDataGraph::roots_cld_do(NULL, closures->second_pass_weak_clds());
117 } else {
118 phase_times->record_time_secs(G1GCPhaseTimes::WaitForStrongCLD, worker_i, 0.0);
119 phase_times->record_time_secs(G1GCPhaseTimes::WeakCLDRoots, worker_i, 0.0);
120 assert(closures->second_pass_weak_clds() == NULL, "Should be null if not tracing metadata.");
121 }
122
123 // During conc marking we have to filter the per-thread SATB buffers
124 // to make sure we remove any oops into the CSet (which will show up
125 // as implicitly live).
126 {
127 G1GCParPhaseTimesTracker x(phase_times, G1GCPhaseTimes::SATBFiltering, worker_i);
128 if (!_process_strong_tasks.is_task_claimed(G1RP_PS_filter_satb_buffers) && _g1h->collector_state()->mark_or_rebuild_in_progress()) {
129 G1BarrierSet::satb_mark_queue_set().filter_thread_buffers();
130 }
131 }
132
133 _process_strong_tasks.all_tasks_completed(n_workers());
134 }
135
136 // Adaptor to pass the closures to the strong roots in the VM.
137 class StrongRootsClosures : public G1RootClosures {
138 OopClosure* _roots;
139 CLDClosure* _clds;
140 CodeBlobClosure* _blobs;
141 public:
|