16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "precompiled.hpp"
26
27 #include "classfile/stringTable.hpp"
28 #include "classfile/systemDictionary.hpp"
29 #include "code/codeCache.hpp"
30 #include "gc/g1/bufferingOopClosure.hpp"
31 #include "gc/g1/g1CodeBlobClosure.hpp"
32 #include "gc/g1/g1CollectedHeap.inline.hpp"
33 #include "gc/g1/g1CollectorPolicy.hpp"
34 #include "gc/g1/g1CollectorState.hpp"
35 #include "gc/g1/g1GCPhaseTimes.hpp"
36 #include "gc/g1/g1RootClosureSet.hpp"
37 #include "gc/g1/g1RootProcessor.hpp"
38 #include "gc/g1/heapRegion.inline.hpp"
39 #include "memory/allocation.inline.hpp"
40 #include "runtime/fprofiler.hpp"
41 #include "runtime/mutex.hpp"
42 #include "services/management.hpp"
43
44 void G1RootProcessor::worker_has_discovered_all_strong_classes() {
45 assert(ClassUnloadingWithConcurrentMark, "Currently only needed when doing G1 Class Unloading");
46
47 uint new_value = (uint)Atomic::add(1, &_n_workers_discovered_strong_classes);
48 if (new_value == n_workers()) {
49 // This thread is last. Notify the others.
50 MonitorLockerEx ml(&_lock, Mutex::_no_safepoint_check_flag);
51 _lock.notify_all();
52 }
53 }
54
55 void G1RootProcessor::wait_until_all_strong_classes_discovered() {
56 assert(ClassUnloadingWithConcurrentMark, "Currently only needed when doing G1 Class Unloading");
57
58 if ((uint)_n_workers_discovered_strong_classes != n_workers()) {
59 MonitorLockerEx ml(&_lock, Mutex::_no_safepoint_check_flag);
60 while ((uint)_n_workers_discovered_strong_classes != n_workers()) {
61 _lock.wait(Mutex::_no_safepoint_check_flag, 0, false);
62 }
63 }
64 }
65
66 G1RootProcessor::G1RootProcessor(G1CollectedHeap* g1h, uint n_workers) :
67 _g1h(g1h),
68 _process_strong_tasks(G1RP_PS_NumElements),
69 _srs(n_workers),
70 _lock(Mutex::leaf, "G1 Root Scanning barrier lock", false, Monitor::_safepoint_check_never),
71 _n_workers_discovered_strong_classes(0) {}
72
73 void G1RootProcessor::evacuate_roots(G1EvacuationRootClosureSet* closures, bool trace_metadata, uint worker_i) {
74 double ext_roots_start = os::elapsedTime();
75 G1GCPhaseTimes* phase_times = _g1h->g1_policy()->phase_times();
76
77 process_java_roots(closures, phase_times, worker_i);
78
79 // This is the point where this worker thread will not find more strong CLDs/nmethods.
80 // Report this so G1 can synchronize the strong and weak CLDs/nmethods processing.
81 if (trace_metadata) {
82 worker_has_discovered_all_strong_classes();
83 }
84
85 process_vm_roots(closures, phase_times, worker_i);
86
87 {
88 // Now the CM ref_processor roots.
89 G1GCParPhaseTimesTracker x(phase_times, G1GCPhaseTimes::CMRefRoots, worker_i);
90 if (!_process_strong_tasks.is_task_claimed(G1RP_PS_refProcessor_oops_do)) {
91 // We need to treat the discovered reference lists of the
92 // concurrent mark ref processor as roots and keep entries
93 // (which are added by the marking threads) on them live
94 // until they can be processed at the end of marking.
95 _g1h->ref_processor_cm()->weak_oops_do(closures->weak_oops());
96 }
97 }
98
99 if (trace_metadata) {
100 {
101 G1GCParPhaseTimesTracker x(phase_times, G1GCPhaseTimes::WaitForStrongCLD, worker_i);
102 // Barrier to make sure all workers passed
103 // the strong CLD and strong nmethods phases.
104 wait_until_all_strong_classes_discovered();
105 }
106
107 // Now take the complement of the strong CLDs.
108 G1GCParPhaseTimesTracker x(phase_times, G1GCPhaseTimes::WeakCLDRoots, worker_i);
109 assert(closures->second_pass_weak_clds() != NULL, "should be null");
110 ClassLoaderDataGraph::roots_cld_do(NULL, closures->second_pass_weak_clds());
111 } else {
112 phase_times->record_time_secs(G1GCPhaseTimes::WaitForStrongCLD, worker_i, 0.0);
113 phase_times->record_time_secs(G1GCPhaseTimes::WeakCLDRoots, worker_i, 0.0);
114 assert(closures->second_pass_weak_clds() == NULL, "should not null");
115 }
116
117 // Finish up any enqueued closure apps (attributed as object copy time).
118 closures->flush();
119
120 double obj_copy_time_sec = closures->closure_app_seconds();
121
122 phase_times->record_time_secs(G1GCPhaseTimes::ObjCopy, worker_i, obj_copy_time_sec);
123
124 double ext_root_time_sec = os::elapsedTime() - ext_roots_start - obj_copy_time_sec;
125
126 phase_times->record_time_secs(G1GCPhaseTimes::ExtRootScan, worker_i, ext_root_time_sec);
127
128 // During conc marking we have to filter the per-thread SATB buffers
129 // to make sure we remove any oops into the CSet (which will show up
130 // as implicitly live).
131 {
132 G1GCParPhaseTimesTracker x(phase_times, G1GCPhaseTimes::SATBFiltering, worker_i);
133 if (!_process_strong_tasks.is_task_claimed(G1RP_PS_filter_satb_buffers) && _g1h->collector_state()->mark_in_progress()) {
134 JavaThread::satb_mark_queue_set().filter_thread_buffers();
135 }
136 }
137
138 _process_strong_tasks.all_tasks_completed(n_workers());
139 }
140
141 // Adaptor to pass the closures to the strong roots in the VM.
142 class StrongRootsClosures : public G1RootClosureSet {
143 OopClosure* _roots;
144 CLDClosure* _clds;
145 CodeBlobClosure* _blobs;
146 public:
147 StrongRootsClosures(OopClosure* roots, CLDClosure* clds, CodeBlobClosure* blobs) :
148 _roots(roots), _clds(clds), _blobs(blobs) {}
149
150 OopClosure* weak_oops() { return NULL; }
151 OopClosure* strong_oops() { return _roots; }
152
153 CLDClosure* weak_clds() { return NULL; }
154 CLDClosure* strong_clds() { return _clds; }
155 CLDClosure* thread_root_clds() { return _clds; }
156 CLDClosure* second_pass_weak_clds() { ShouldNotReachHere(); return NULL; }
157
158 CodeBlobClosure* strong_codeblobs() { return _blobs; }
159 CodeBlobClosure* weak_codeblobs() { return NULL; }
160 CodeBlobClosure* thread_root_codeblobs() { return _blobs; }
161 };
162
163 void G1RootProcessor::process_strong_roots(OopClosure* oops,
164 CLDClosure* clds,
165 CodeBlobClosure* blobs) {
166 StrongRootsClosures closures(oops, clds, blobs);
167
168 process_java_roots(&closures, NULL, 0);
169 process_vm_roots(&closures, NULL, 0);
170
171 _process_strong_tasks.all_tasks_completed(n_workers());
172 }
173
174 // Adaptor to pass the closures to all the roots in the VM.
175 class AllRootsClosures : public G1RootClosureSet {
176 OopClosure* _roots;
177 CLDClosure* _clds;
178 CodeBlobClosure* _blobs;
179 public:
180 AllRootsClosures(OopClosure* roots, CLDClosure* clds, CodeBlobClosure* blobs) :
181 _roots(roots), _clds(clds), _blobs(blobs) {}
182
183 OopClosure* weak_oops() { return _roots; }
184 OopClosure* strong_oops() { return _roots; }
185
186 CLDClosure* weak_clds() { return _clds; }
187 CLDClosure* strong_clds() { return _clds; }
188 CLDClosure* thread_root_clds() { return NULL; }
189 CLDClosure* second_pass_weak_clds() { ShouldNotReachHere(); return NULL; }
190
191 CodeBlobClosure* strong_codeblobs() { return NULL; }
192 CodeBlobClosure* weak_codeblobs() { return _blobs; }
193 CodeBlobClosure* thread_root_codeblobs() { return NULL; }
194 };
195
196 void G1RootProcessor::process_all_roots(OopClosure* oops,
197 CLDClosure* clds,
198 CodeBlobClosure* blobs) {
199 AllRootsClosures closures(oops, clds, blobs);
200
201 process_java_roots(&closures, NULL, 0);
202 process_vm_roots(&closures, NULL, 0);
203
204 _process_strong_tasks.all_tasks_completed(n_workers());
205 }
206
207 void G1RootProcessor::process_java_roots(G1RootClosureSet* closures,
208 G1GCPhaseTimes* phase_times,
209 uint worker_i) {
210 assert(closures->thread_root_clds() == NULL || closures->weak_clds() == NULL, "There is overlap between those, only one may be set");
211 // Iterating over the CLDG and the Threads are done early to allow us to
212 // first process the strong CLDs and nmethods and then, after a barrier,
213 // let the thread process the weak CLDs and nmethods.
214 {
215 G1GCParPhaseTimesTracker x(phase_times, G1GCPhaseTimes::CLDGRoots, worker_i);
216 if (!_process_strong_tasks.is_task_claimed(G1RP_PS_ClassLoaderDataGraph_oops_do)) {
217 ClassLoaderDataGraph::roots_cld_do(closures->strong_clds(), closures->weak_clds());
218 }
219 }
220
221 {
222 G1GCParPhaseTimesTracker x(phase_times, G1GCPhaseTimes::ThreadRoots, worker_i);
223 bool is_par = n_workers() > 1;
224 Threads::possibly_parallel_oops_do(is_par,
225 closures->strong_oops(),
226 closures->thread_root_clds(),
227 closures->thread_root_codeblobs());
228 }
229 }
230
231 void G1RootProcessor::process_vm_roots(G1RootClosureSet* closures,
232 G1GCPhaseTimes* phase_times,
233 uint worker_i) {
234 OopClosure* strong_roots = closures->strong_oops();
235 OopClosure* weak_roots = closures->weak_oops();
236
237 {
238 G1GCParPhaseTimesTracker x(phase_times, G1GCPhaseTimes::UniverseRoots, worker_i);
239 if (!_process_strong_tasks.is_task_claimed(G1RP_PS_Universe_oops_do)) {
240 Universe::oops_do(strong_roots);
241 }
242 }
243
244 {
245 G1GCParPhaseTimesTracker x(phase_times, G1GCPhaseTimes::JNIRoots, worker_i);
246 if (!_process_strong_tasks.is_task_claimed(G1RP_PS_JNIHandles_oops_do)) {
247 JNIHandles::oops_do(strong_roots);
248 }
249 }
250
251 {
|
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "precompiled.hpp"
26
27 #include "classfile/stringTable.hpp"
28 #include "classfile/systemDictionary.hpp"
29 #include "code/codeCache.hpp"
30 #include "gc/g1/bufferingOopClosure.hpp"
31 #include "gc/g1/g1CodeBlobClosure.hpp"
32 #include "gc/g1/g1CollectedHeap.inline.hpp"
33 #include "gc/g1/g1CollectorPolicy.hpp"
34 #include "gc/g1/g1CollectorState.hpp"
35 #include "gc/g1/g1GCPhaseTimes.hpp"
36 #include "gc/g1/g1RootClosures.hpp"
37 #include "gc/g1/g1RootProcessor.hpp"
38 #include "gc/g1/heapRegion.inline.hpp"
39 #include "memory/allocation.inline.hpp"
40 #include "runtime/fprofiler.hpp"
41 #include "runtime/mutex.hpp"
42 #include "services/management.hpp"
43
44 void G1RootProcessor::worker_has_discovered_all_strong_classes() {
45 assert(ClassUnloadingWithConcurrentMark, "Currently only needed when doing G1 Class Unloading");
46
47 uint new_value = (uint)Atomic::add(1, &_n_workers_discovered_strong_classes);
48 if (new_value == n_workers()) {
49 // This thread is last. Notify the others.
50 MonitorLockerEx ml(&_lock, Mutex::_no_safepoint_check_flag);
51 _lock.notify_all();
52 }
53 }
54
55 void G1RootProcessor::wait_until_all_strong_classes_discovered() {
56 assert(ClassUnloadingWithConcurrentMark, "Currently only needed when doing G1 Class Unloading");
57
58 if ((uint)_n_workers_discovered_strong_classes != n_workers()) {
59 MonitorLockerEx ml(&_lock, Mutex::_no_safepoint_check_flag);
60 while ((uint)_n_workers_discovered_strong_classes != n_workers()) {
61 _lock.wait(Mutex::_no_safepoint_check_flag, 0, false);
62 }
63 }
64 }
65
66 G1RootProcessor::G1RootProcessor(G1CollectedHeap* g1h, uint n_workers) :
67 _g1h(g1h),
68 _process_strong_tasks(G1RP_PS_NumElements),
69 _srs(n_workers),
70 _lock(Mutex::leaf, "G1 Root Scanning barrier lock", false, Monitor::_safepoint_check_never),
71 _n_workers_discovered_strong_classes(0) {}
72
73 void G1RootProcessor::evacuate_roots(G1EvacuationRootClosures* closures, bool trace_metadata, uint worker_i) {
74 double ext_roots_start = os::elapsedTime();
75 G1GCPhaseTimes* phase_times = _g1h->g1_policy()->phase_times();
76
77 process_java_roots(closures, phase_times, worker_i);
78
79 // This is the point where this worker thread will not find more strong CLDs/nmethods.
80 // Report this so G1 can synchronize the strong and weak CLDs/nmethods processing.
81 if (trace_metadata) {
82 worker_has_discovered_all_strong_classes();
83 }
84
85 process_vm_roots(closures, phase_times, worker_i);
86
87 {
88 // Now the CM ref_processor roots.
89 G1GCParPhaseTimesTracker x(phase_times, G1GCPhaseTimes::CMRefRoots, worker_i);
90 if (!_process_strong_tasks.is_task_claimed(G1RP_PS_refProcessor_oops_do)) {
91 // We need to treat the discovered reference lists of the
92 // concurrent mark ref processor as roots and keep entries
93 // (which are added by the marking threads) on them live
94 // until they can be processed at the end of marking.
95 _g1h->ref_processor_cm()->weak_oops_do(closures->strong_oops());
96 }
97 }
98
99 if (trace_metadata) {
100 {
101 G1GCParPhaseTimesTracker x(phase_times, G1GCPhaseTimes::WaitForStrongCLD, worker_i);
102 // Barrier to make sure all workers passed
103 // the strong CLD and strong nmethods phases.
104 wait_until_all_strong_classes_discovered();
105 }
106
107 // Now take the complement of the strong CLDs.
108 G1GCParPhaseTimesTracker x(phase_times, G1GCPhaseTimes::WeakCLDRoots, worker_i);
109 assert(closures->second_pass_weak_clds() != NULL, "Should be non-null if we are tracing metadata.");
110 ClassLoaderDataGraph::roots_cld_do(NULL, closures->second_pass_weak_clds());
111 } else {
112 phase_times->record_time_secs(G1GCPhaseTimes::WaitForStrongCLD, worker_i, 0.0);
113 phase_times->record_time_secs(G1GCPhaseTimes::WeakCLDRoots, worker_i, 0.0);
114 assert(closures->second_pass_weak_clds() == NULL, "Should be null if not tracing metadata.");
115 }
116
117 // Finish up any enqueued closure apps (attributed as object copy time).
118 closures->flush();
119
120 double obj_copy_time_sec = closures->closure_app_seconds();
121
122 phase_times->record_time_secs(G1GCPhaseTimes::ObjCopy, worker_i, obj_copy_time_sec);
123
124 double ext_root_time_sec = os::elapsedTime() - ext_roots_start - obj_copy_time_sec;
125
126 phase_times->record_time_secs(G1GCPhaseTimes::ExtRootScan, worker_i, ext_root_time_sec);
127
128 // During conc marking we have to filter the per-thread SATB buffers
129 // to make sure we remove any oops into the CSet (which will show up
130 // as implicitly live).
131 {
132 G1GCParPhaseTimesTracker x(phase_times, G1GCPhaseTimes::SATBFiltering, worker_i);
133 if (!_process_strong_tasks.is_task_claimed(G1RP_PS_filter_satb_buffers) && _g1h->collector_state()->mark_in_progress()) {
134 JavaThread::satb_mark_queue_set().filter_thread_buffers();
135 }
136 }
137
138 _process_strong_tasks.all_tasks_completed(n_workers());
139 }
140
141 // Adaptor to pass the closures to the strong roots in the VM.
142 class StrongRootsClosures : public G1RootClosures {
143 OopClosure* _roots;
144 CLDClosure* _clds;
145 CodeBlobClosure* _blobs;
146 public:
147 StrongRootsClosures(OopClosure* roots, CLDClosure* clds, CodeBlobClosure* blobs) :
148 _roots(roots), _clds(clds), _blobs(blobs) {}
149
150 OopClosure* weak_oops() { return NULL; }
151 OopClosure* strong_oops() { return _roots; }
152
153 CLDClosure* weak_clds() { return NULL; }
154 CLDClosure* strong_clds() { return _clds; }
155 CLDClosure* thread_root_clds() { return _clds; }
156 CLDClosure* second_pass_weak_clds() { ShouldNotReachHere(); return NULL; }
157
158 CodeBlobClosure* strong_codeblobs() { return _blobs; }
159 CodeBlobClosure* weak_codeblobs() { return NULL; }
160 CodeBlobClosure* thread_root_codeblobs() { return _blobs; }
161 };
162
163 void G1RootProcessor::process_strong_roots(OopClosure* oops,
164 CLDClosure* clds,
165 CodeBlobClosure* blobs) {
166 StrongRootsClosures closures(oops, clds, blobs);
167
168 process_java_roots(&closures, NULL, 0);
169 process_vm_roots(&closures, NULL, 0);
170
171 _process_strong_tasks.all_tasks_completed(n_workers());
172 }
173
174 // Adaptor to pass the closures to all the roots in the VM.
175 class AllRootsClosures : public G1RootClosures {
176 OopClosure* _roots;
177 CLDClosure* _clds;
178 CodeBlobClosure* _blobs;
179 public:
180 AllRootsClosures(OopClosure* roots, CLDClosure* clds, CodeBlobClosure* blobs) :
181 _roots(roots), _clds(clds), _blobs(blobs) {}
182
183 OopClosure* weak_oops() { return _roots; }
184 OopClosure* strong_oops() { return _roots; }
185
186 CLDClosure* weak_clds() { return _clds; }
187 CLDClosure* strong_clds() { return _clds; }
188 CLDClosure* thread_root_clds() { return NULL; }
189 CLDClosure* second_pass_weak_clds() { ShouldNotReachHere(); return NULL; }
190
191 CodeBlobClosure* strong_codeblobs() { return NULL; }
192 CodeBlobClosure* weak_codeblobs() { return _blobs; }
193 CodeBlobClosure* thread_root_codeblobs() { return NULL; }
194 };
195
196 void G1RootProcessor::process_all_roots(OopClosure* oops,
197 CLDClosure* clds,
198 CodeBlobClosure* blobs) {
199 AllRootsClosures closures(oops, clds, blobs);
200
201 process_java_roots(&closures, NULL, 0);
202 process_vm_roots(&closures, NULL, 0);
203
204 _process_strong_tasks.all_tasks_completed(n_workers());
205 }
206
207 void G1RootProcessor::process_java_roots(G1RootClosures* closures,
208 G1GCPhaseTimes* phase_times,
209 uint worker_i) {
210 assert(closures->thread_root_clds() == NULL || closures->weak_clds() == NULL, "There is overlap between those, only one may be set");
211 // Iterating over the CLDG and the Threads are done early to allow us to
212 // first process the strong CLDs and nmethods and then, after a barrier,
213 // let the thread process the weak CLDs and nmethods.
214 {
215 G1GCParPhaseTimesTracker x(phase_times, G1GCPhaseTimes::CLDGRoots, worker_i);
216 if (!_process_strong_tasks.is_task_claimed(G1RP_PS_ClassLoaderDataGraph_oops_do)) {
217 ClassLoaderDataGraph::roots_cld_do(closures->strong_clds(), closures->weak_clds());
218 }
219 }
220
221 {
222 G1GCParPhaseTimesTracker x(phase_times, G1GCPhaseTimes::ThreadRoots, worker_i);
223 bool is_par = n_workers() > 1;
224 Threads::possibly_parallel_oops_do(is_par,
225 closures->strong_oops(),
226 closures->thread_root_clds(),
227 closures->thread_root_codeblobs());
228 }
229 }
230
231 void G1RootProcessor::process_vm_roots(G1RootClosures* closures,
232 G1GCPhaseTimes* phase_times,
233 uint worker_i) {
234 OopClosure* strong_roots = closures->strong_oops();
235 OopClosure* weak_roots = closures->weak_oops();
236
237 {
238 G1GCParPhaseTimesTracker x(phase_times, G1GCPhaseTimes::UniverseRoots, worker_i);
239 if (!_process_strong_tasks.is_task_claimed(G1RP_PS_Universe_oops_do)) {
240 Universe::oops_do(strong_roots);
241 }
242 }
243
244 {
245 G1GCParPhaseTimesTracker x(phase_times, G1GCPhaseTimes::JNIRoots, worker_i);
246 if (!_process_strong_tasks.is_task_claimed(G1RP_PS_JNIHandles_oops_do)) {
247 JNIHandles::oops_do(strong_roots);
248 }
249 }
250
251 {
|