5 * under the terms of the GNU General Public License version 2 only, as
6 * published by the Free Software Foundation.
7 *
8 * This code is distributed in the hope that it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
11 * version 2 for more details (a copy is included in the LICENSE file that
12 * accompanied this code).
13 *
14 * You should have received a copy of the GNU General Public License version
15 * 2 along with this work; if not, write to the Free Software Foundation,
16 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
17 *
18 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
19 * or visit www.oracle.com if you need additional information or have any
20 * questions.
21 *
22 */
23
24 #include "precompiled.hpp"
25 #include "classfile/stringTable.hpp"
26 #include "code/codeCache.hpp"
27 #include "gc/parallel/gcTaskManager.hpp"
28 #include "gc/parallel/parallelScavengeHeap.hpp"
29 #include "gc/parallel/psAdaptiveSizePolicy.hpp"
30 #include "gc/parallel/psClosure.inline.hpp"
31 #include "gc/parallel/psMarkSweepProxy.hpp"
32 #include "gc/parallel/psParallelCompact.inline.hpp"
33 #include "gc/parallel/psPromotionManager.inline.hpp"
34 #include "gc/parallel/psScavenge.inline.hpp"
35 #include "gc/parallel/psTasks.hpp"
36 #include "gc/shared/gcCause.hpp"
37 #include "gc/shared/gcHeapSummary.hpp"
38 #include "gc/shared/gcId.hpp"
39 #include "gc/shared/gcLocker.hpp"
40 #include "gc/shared/gcTimer.hpp"
41 #include "gc/shared/gcTrace.hpp"
42 #include "gc/shared/gcTraceTime.inline.hpp"
43 #include "gc/shared/isGCActiveMark.hpp"
44 #include "gc/shared/referencePolicy.hpp"
45 #include "gc/shared/referenceProcessor.hpp"
46 #include "gc/shared/referenceProcessorPhaseTimes.hpp"
47 #include "gc/shared/spaceDecorator.hpp"
48 #include "gc/shared/weakProcessor.hpp"
49 #include "memory/resourceArea.hpp"
50 #include "memory/universe.hpp"
51 #include "logging/log.hpp"
52 #include "oops/access.inline.hpp"
53 #include "oops/compressedOops.inline.hpp"
54 #include "oops/oop.inline.hpp"
55 #include "runtime/biasedLocking.hpp"
56 #include "runtime/handles.inline.hpp"
57 #include "runtime/threadCritical.hpp"
58 #include "runtime/vmThread.hpp"
59 #include "runtime/vmOperations.hpp"
60 #include "services/memoryService.hpp"
61 #include "utilities/stack.inline.hpp"
62
63 HeapWord* PSScavenge::_to_space_top_before_gc = NULL;
64 int PSScavenge::_consecutive_skipped_scavenges = 0;
65 SpanSubjectToDiscoveryClosure PSScavenge::_span_based_discoverer;
66 ReferenceProcessor* PSScavenge::_ref_processor = NULL;
67 PSCardTable* PSScavenge::_card_table = NULL;
68 bool PSScavenge::_survivor_overflow = false;
69 uint PSScavenge::_tenuring_threshold = 0;
70 HeapWord* PSScavenge::_young_generation_boundary = NULL;
71 uintptr_t PSScavenge::_young_generation_boundary_compressed = 0;
72 elapsedTimer PSScavenge::_accumulated_time;
73 STWGCTimer PSScavenge::_gc_timer;
74 ParallelScavengeTracer PSScavenge::_gc_tracer;
75 CollectorCounters* PSScavenge::_counters = NULL;
76
77 // Define before use
78 class PSIsAliveClosure: public BoolObjectClosure {
79 public:
80 bool do_object_b(oop p) {
81 return (!PSScavenge::is_obj_in_young(p)) || p->is_forwarded();
82 }
83 };
84
85 PSIsAliveClosure PSScavenge::_is_alive_closure;
86
87 class PSKeepAliveClosure: public OopClosure {
88 protected:
89 MutableSpace* _to_space;
90 PSPromotionManager* _promotion_manager;
91
92 public:
93 PSKeepAliveClosure(PSPromotionManager* pm) : _promotion_manager(pm) {
94 ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
95 _to_space = heap->young_gen()->to_space();
|
5 * under the terms of the GNU General Public License version 2 only, as
6 * published by the Free Software Foundation.
7 *
8 * This code is distributed in the hope that it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
11 * version 2 for more details (a copy is included in the LICENSE file that
12 * accompanied this code).
13 *
14 * You should have received a copy of the GNU General Public License version
15 * 2 along with this work; if not, write to the Free Software Foundation,
16 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
17 *
18 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
19 * or visit www.oracle.com if you need additional information or have any
20 * questions.
21 *
22 */
23
24 #include "precompiled.hpp"
25 #include "aot/aotLoader.hpp"
26 #include "classfile/classLoaderDataGraph.hpp"
27 #include "classfile/stringTable.hpp"
28 #include "code/codeCache.hpp"
29 #include "gc/parallel/parallelScavengeHeap.hpp"
30 #include "gc/parallel/psAdaptiveSizePolicy.hpp"
31 #include "gc/parallel/psClosure.inline.hpp"
32 #include "gc/parallel/psCompactionManager.hpp"
33 #include "gc/parallel/psMarkSweepProxy.hpp"
34 #include "gc/parallel/psParallelCompact.inline.hpp"
35 #include "gc/parallel/psPromotionManager.inline.hpp"
36 #include "gc/parallel/psRootType.inline.hpp"
37 #include "gc/parallel/psScavenge.inline.hpp"
38 #include "gc/shared/gcCause.hpp"
39 #include "gc/shared/gcHeapSummary.hpp"
40 #include "gc/shared/gcId.hpp"
41 #include "gc/shared/gcLocker.hpp"
42 #include "gc/shared/gcTimer.hpp"
43 #include "gc/shared/gcTrace.hpp"
44 #include "gc/shared/gcTraceTime.inline.hpp"
45 #include "gc/shared/isGCActiveMark.hpp"
46 #include "gc/shared/referencePolicy.hpp"
47 #include "gc/shared/referenceProcessor.hpp"
48 #include "gc/shared/referenceProcessorPhaseTimes.hpp"
49 #include "gc/shared/scavengableNMethods.hpp"
50 #include "gc/shared/spaceDecorator.hpp"
51 #include "gc/shared/weakProcessor.hpp"
52 #include "gc/shared/workerPolicy.hpp"
53 #include "gc/shared/workgroup.hpp"
54 #if INCLUDE_JVMCI
55 #include "jvmci/jvmci.hpp"
56 #endif
57 #include "memory/resourceArea.hpp"
58 #include "memory/universe.hpp"
59 #include "logging/log.hpp"
60 #include "oops/access.inline.hpp"
61 #include "oops/compressedOops.inline.hpp"
62 #include "oops/oop.inline.hpp"
63 #include "runtime/biasedLocking.hpp"
64 #include "runtime/handles.inline.hpp"
65 #include "runtime/threadCritical.hpp"
66 #include "runtime/vmThread.hpp"
67 #include "runtime/vmOperations.hpp"
68 #include "services/management.hpp"
69 #include "services/memoryService.hpp"
70 #include "utilities/stack.inline.hpp"
71
72
73 HeapWord* PSScavenge::_to_space_top_before_gc = NULL;
74 int PSScavenge::_consecutive_skipped_scavenges = 0;
75 SpanSubjectToDiscoveryClosure PSScavenge::_span_based_discoverer;
76 ReferenceProcessor* PSScavenge::_ref_processor = NULL;
77 PSCardTable* PSScavenge::_card_table = NULL;
78 bool PSScavenge::_survivor_overflow = false;
79 uint PSScavenge::_tenuring_threshold = 0;
80 HeapWord* PSScavenge::_young_generation_boundary = NULL;
81 uintptr_t PSScavenge::_young_generation_boundary_compressed = 0;
82 elapsedTimer PSScavenge::_accumulated_time;
83 STWGCTimer PSScavenge::_gc_timer;
84 ParallelScavengeTracer PSScavenge::_gc_tracer;
85 CollectorCounters* PSScavenge::_counters = NULL;
86
87 void scavenge_roots_task(Parallel::RootType::Value root_type, uint which) {
88 assert(ParallelScavengeHeap::heap()->is_gc_active(), "called outside gc");
89
90 PSPromotionManager* pm = PSPromotionManager::gc_thread_promotion_manager(which);
91 PSScavengeRootsClosure roots_closure(pm);
92 PSPromoteRootsClosure roots_to_old_closure(pm);
93
94 switch (root_type) {
95 case Parallel::RootType::universe:
96 Universe::oops_do(&roots_closure);
97 break;
98
99 case Parallel::RootType::jni_handles:
100 JNIHandles::oops_do(&roots_closure);
101 break;
102
103 case Parallel::RootType::object_synchronizer:
104 ObjectSynchronizer::oops_do(&roots_closure);
105 break;
106
107 case Parallel::RootType::system_dictionary:
108 SystemDictionary::oops_do(&roots_closure);
109 break;
110
111 case Parallel::RootType::class_loader_data:
112 {
113 PSScavengeCLDClosure cld_closure(pm);
114 ClassLoaderDataGraph::cld_do(&cld_closure);
115 }
116 break;
117
118 case Parallel::RootType::management:
119 Management::oops_do(&roots_closure);
120 break;
121
122 case Parallel::RootType::jvmti:
123 JvmtiExport::oops_do(&roots_closure);
124 break;
125
126 case Parallel::RootType::code_cache:
127 {
128 MarkingCodeBlobClosure code_closure(&roots_to_old_closure, CodeBlobToOopClosure::FixRelocations);
129 ScavengableNMethods::nmethods_do(&code_closure);
130 AOTLoader::oops_do(&roots_closure);
131 }
132 break;
133
134 #if INCLUDE_JVMCI
135 case Parallel::RootType::jvmci:
136 JVMCI::oops_do(&roots_closure);
137 break;
138 #endif
139
140 case Parallel::RootType::sentinel:
141 DEBUG_ONLY(default:) // DEBUG_ONLY hack will create compile error on release builds (-Wswitch) and runtime check on deb
142 fatal("Bad enumeration value: %u", root_type);
143 break;
144 }
145
146 // Do the real work
147 pm->drain_stacks(false);
148 }
149
150 void steal_task(ParallelTaskTerminator& terminator, uint worker_id) {
151 assert(ParallelScavengeHeap::heap()->is_gc_active(), "called outside gc");
152
153 PSPromotionManager* pm =
154 PSPromotionManager::gc_thread_promotion_manager(worker_id);
155 pm->drain_stacks(true);
156 guarantee(pm->stacks_empty(),
157 "stacks should be empty at this point");
158
159 while (true) {
160 StarTask p;
161 if (PSPromotionManager::steal_depth(worker_id, p)) {
162 TASKQUEUE_STATS_ONLY(pm->record_steal(p));
163 pm->process_popped_location_depth(p);
164 pm->drain_stacks_depth(true);
165 } else {
166 if (terminator.offer_termination()) {
167 break;
168 }
169 }
170 }
171 guarantee(pm->stacks_empty(), "stacks should be empty at this point");
172 }
173
174 // Define before use
175 class PSIsAliveClosure: public BoolObjectClosure {
176 public:
177 bool do_object_b(oop p) {
178 return (!PSScavenge::is_obj_in_young(p)) || p->is_forwarded();
179 }
180 };
181
182 PSIsAliveClosure PSScavenge::_is_alive_closure;
183
184 class PSKeepAliveClosure: public OopClosure {
185 protected:
186 MutableSpace* _to_space;
187 PSPromotionManager* _promotion_manager;
188
189 public:
190 PSKeepAliveClosure(PSPromotionManager* pm) : _promotion_manager(pm) {
191 ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
192 _to_space = heap->young_gen()->to_space();
|
107 }
108 }
109 virtual void do_oop(oop* p) { PSKeepAliveClosure::do_oop_work(p); }
110 virtual void do_oop(narrowOop* p) { PSKeepAliveClosure::do_oop_work(p); }
111 };
112
113 class PSEvacuateFollowersClosure: public VoidClosure {
114 private:
115 PSPromotionManager* _promotion_manager;
116 public:
117 PSEvacuateFollowersClosure(PSPromotionManager* pm) : _promotion_manager(pm) {}
118
119 virtual void do_void() {
120 assert(_promotion_manager != NULL, "Sanity");
121 _promotion_manager->drain_stacks(true);
122 guarantee(_promotion_manager->stacks_empty(),
123 "stacks should be empty at this point");
124 }
125 };
126
127 class PSRefProcTaskProxy: public GCTask {
128 typedef AbstractRefProcTaskExecutor::ProcessTask ProcessTask;
129 ProcessTask & _rp_task;
130 uint _work_id;
131 public:
132 PSRefProcTaskProxy(ProcessTask & rp_task, uint work_id)
133 : _rp_task(rp_task),
134 _work_id(work_id)
135 { }
136
137 private:
138 virtual char* name() { return (char *)"Process referents by policy in parallel"; }
139 virtual void do_it(GCTaskManager* manager, uint which);
140 };
141
142 void PSRefProcTaskProxy::do_it(GCTaskManager* manager, uint which)
143 {
144 PSPromotionManager* promotion_manager =
145 PSPromotionManager::gc_thread_promotion_manager(which);
146 assert(promotion_manager != NULL, "sanity check");
147 PSKeepAliveClosure keep_alive(promotion_manager);
148 PSEvacuateFollowersClosure evac_followers(promotion_manager);
149 PSIsAliveClosure is_alive;
150 _rp_task.work(_work_id, is_alive, keep_alive, evac_followers);
151 }
152
153 class PSRefProcTaskExecutor: public AbstractRefProcTaskExecutor {
154 virtual void execute(ProcessTask& task, uint ergo_workers);
155 };
156
157 void PSRefProcTaskExecutor::execute(ProcessTask& task, uint ergo_workers)
158 {
159 GCTaskQueue* q = GCTaskQueue::create();
160 GCTaskManager* manager = ParallelScavengeHeap::gc_task_manager();
161 uint active_workers = manager->active_workers();
162
163 assert(active_workers == ergo_workers,
164 "Ergonomically chosen workers (%u) must be equal to active workers (%u)",
165 ergo_workers, active_workers);
166
167 for(uint i=0; i < active_workers; i++) {
168 q->enqueue(new PSRefProcTaskProxy(task, i));
169 }
170 TaskTerminator terminator(active_workers,
171 (TaskQueueSetSuper*) PSPromotionManager::stack_array_depth());
172 if (task.marks_oops_alive() && active_workers > 1) {
173 for (uint j = 0; j < active_workers; j++) {
174 q->enqueue(new StealTask(terminator.terminator()));
175 }
176 }
177 manager->execute_and_wait(q);
178 }
179
180 // This method contains all heap specific policy for invoking scavenge.
181 // PSScavenge::invoke_no_policy() will do nothing but attempt to
182 // scavenge. It will not clean up after failed promotions, bail out if
183 // we've exceeded policy time limits, or any other special behavior.
184 // All such policy should be placed here.
185 //
186 // Note that this method should only be called from the vm_thread while
187 // at a safepoint!
188 bool PSScavenge::invoke() {
189 assert(SafepointSynchronize::is_at_safepoint(), "should be at safepoint");
190 assert(Thread::current() == (Thread*)VMThread::vm_thread(), "should be in vm thread");
191 assert(!ParallelScavengeHeap::heap()->is_gc_active(), "not reentrant");
192
193 ParallelScavengeHeap* const heap = ParallelScavengeHeap::heap();
194 PSAdaptiveSizePolicy* policy = heap->size_policy();
195 IsGCActiveMark mark;
196
|
204 }
205 }
206 virtual void do_oop(oop* p) { PSKeepAliveClosure::do_oop_work(p); }
207 virtual void do_oop(narrowOop* p) { PSKeepAliveClosure::do_oop_work(p); }
208 };
209
210 class PSEvacuateFollowersClosure: public VoidClosure {
211 private:
212 PSPromotionManager* _promotion_manager;
213 public:
214 PSEvacuateFollowersClosure(PSPromotionManager* pm) : _promotion_manager(pm) {}
215
216 virtual void do_void() {
217 assert(_promotion_manager != NULL, "Sanity");
218 _promotion_manager->drain_stacks(true);
219 guarantee(_promotion_manager->stacks_empty(),
220 "stacks should be empty at this point");
221 }
222 };
223
224 class PSRefProcTaskExecutor: public AbstractRefProcTaskExecutor {
225 virtual void execute(ProcessTask& process_task, uint ergo_workers);
226 };
227
228 class PSRefProcTask : public AbstractGangTask {
229 typedef AbstractRefProcTaskExecutor::ProcessTask ProcessTask;
230 TaskTerminator _terminator;
231 ProcessTask& _task;
232 uint _active_workers;
233
234 public:
235 PSRefProcTask(ProcessTask& task, uint active_workers)
236 : AbstractGangTask("PSRefProcTask"),
237 _terminator(active_workers, PSPromotionManager::stack_array_depth()),
238 _task(task),
239 _active_workers(active_workers) {
240 }
241
242 virtual void work(uint worker_id) {
243 PSPromotionManager* promotion_manager =
244 PSPromotionManager::gc_thread_promotion_manager(worker_id);
245 assert(promotion_manager != NULL, "sanity check");
246 PSKeepAliveClosure keep_alive(promotion_manager);
247 PSEvacuateFollowersClosure evac_followers(promotion_manager);
248 PSIsAliveClosure is_alive;
249 _task.work(worker_id, is_alive, keep_alive, evac_followers);
250
251 if (_task.marks_oops_alive() && _active_workers > 1) {
252 steal_task(*_terminator.terminator(), worker_id);
253 }
254 }
255 };
256
257 void PSRefProcTaskExecutor::execute(ProcessTask& process_task, uint ergo_workers) {
258 PSRefProcTask task(process_task, ergo_workers);
259 ParallelScavengeHeap::heap()->workers().run_task(&task);
260 }
261
262 // This method contains all heap specific policy for invoking scavenge.
263 // PSScavenge::invoke_no_policy() will do nothing but attempt to
264 // scavenge. It will not clean up after failed promotions, bail out if
265 // we've exceeded policy time limits, or any other special behavior.
266 // All such policy should be placed here.
267 //
268 // Note that this method should only be called from the vm_thread while
269 // at a safepoint!
270 bool PSScavenge::invoke() {
271 assert(SafepointSynchronize::is_at_safepoint(), "should be at safepoint");
272 assert(Thread::current() == (Thread*)VMThread::vm_thread(), "should be in vm thread");
273 assert(!ParallelScavengeHeap::heap()->is_gc_active(), "not reentrant");
274
275 ParallelScavengeHeap* const heap = ParallelScavengeHeap::heap();
276 PSAdaptiveSizePolicy* policy = heap->size_policy();
277 IsGCActiveMark mark;
278
|
203 PSGCAdaptivePolicyCounters* const counters = heap->gc_policy_counters();
204 const int ffs_val = need_full_gc ? full_follows_scavenge : not_skipped;
205 counters->update_full_follows_scavenge(ffs_val);
206 }
207
208 if (need_full_gc) {
209 GCCauseSetter gccs(heap, GCCause::_adaptive_size_policy);
210 SoftRefPolicy* srp = heap->soft_ref_policy();
211 const bool clear_all_softrefs = srp->should_clear_all_soft_refs();
212
213 if (UseParallelOldGC) {
214 full_gc_done = PSParallelCompact::invoke_no_policy(clear_all_softrefs);
215 } else {
216 full_gc_done = PSMarkSweepProxy::invoke_no_policy(clear_all_softrefs);
217 }
218 }
219
220 return full_gc_done;
221 }
222
223 class PSAddThreadRootsTaskClosure : public ThreadClosure {
224 private:
225 GCTaskQueue* _q;
226
227 public:
228 PSAddThreadRootsTaskClosure(GCTaskQueue* q) : _q(q) { }
229 void do_thread(Thread* t) {
230 _q->enqueue(new ThreadRootsTask(t));
231 }
232 };
233
234 // This method contains no policy. You should probably
235 // be calling invoke() instead.
236 bool PSScavenge::invoke_no_policy() {
237 assert(SafepointSynchronize::is_at_safepoint(), "should be at safepoint");
238 assert(Thread::current() == (Thread*)VMThread::vm_thread(), "should be in vm thread");
239
240 _gc_timer.register_gc_start();
241
242 TimeStamp scavenge_entry;
243 TimeStamp scavenge_midpoint;
244 TimeStamp scavenge_exit;
245
246 scavenge_entry.update();
247
248 if (GCLocker::check_active_before_gc()) {
249 return false;
|
285 PSGCAdaptivePolicyCounters* const counters = heap->gc_policy_counters();
286 const int ffs_val = need_full_gc ? full_follows_scavenge : not_skipped;
287 counters->update_full_follows_scavenge(ffs_val);
288 }
289
290 if (need_full_gc) {
291 GCCauseSetter gccs(heap, GCCause::_adaptive_size_policy);
292 SoftRefPolicy* srp = heap->soft_ref_policy();
293 const bool clear_all_softrefs = srp->should_clear_all_soft_refs();
294
295 if (UseParallelOldGC) {
296 full_gc_done = PSParallelCompact::invoke_no_policy(clear_all_softrefs);
297 } else {
298 full_gc_done = PSMarkSweepProxy::invoke_no_policy(clear_all_softrefs);
299 }
300 }
301
302 return full_gc_done;
303 }
304
305 class PSThreadRootsTaskClosure : public ThreadClosure {
306 uint _worker_id;
307 public:
308 PSThreadRootsTaskClosure(uint worker_id) : _worker_id(worker_id) { }
309 virtual void do_thread(Thread* thread) {
310 assert(ParallelScavengeHeap::heap()->is_gc_active(), "called outside gc");
311
312 PSPromotionManager* pm = PSPromotionManager::gc_thread_promotion_manager(_worker_id);
313 PSScavengeRootsClosure roots_closure(pm);
314 MarkingCodeBlobClosure roots_in_blobs(&roots_closure, CodeBlobToOopClosure::FixRelocations);
315
316 thread->oops_do(&roots_closure, &roots_in_blobs);
317
318 // Do the real work
319 pm->drain_stacks(false);
320 }
321 };
322 //
323 // OldToYoungRootsTask
324 //
325 // This task is used to scan old to young roots in parallel
326 //
327 // A GC thread executing this tasks divides the generation (old gen)
328 // into slices and takes a stripe in the slice as its part of the
329 // work.
330 //
331 // +===============+ slice 0
332 // | stripe 0 |
333 // +---------------+
334 // | stripe 1 |
335 // +---------------+
336 // | stripe 2 |
337 // +---------------+
338 // | stripe 3 |
339 // +===============+ slice 1
340 // | stripe 0 |
341 // +---------------+
342 // | stripe 1 |
343 // +---------------+
344 // | stripe 2 |
345 // +---------------+
346 // | stripe 3 |
347 // +===============+ slice 2
348 // ...
349 //
350 // A task is created for each stripe. In this case there are 4 tasks
351 // created. A GC thread first works on its stripe within slice 0
352 // and then moves to its stripe in the next slice until all stripes
353 // exceed the top of the generation. Note that having fewer GC threads
354 // than stripes works because all the tasks are executed so all stripes
355 // will be covered. In this example if 4 tasks have been created to cover
356 // all the stripes and there are only 3 threads, one of the threads will
357 // get the tasks with the 4th stripe. However, there is a dependence in
358 // PSCardTable::scavenge_contents_parallel() on the number
359 // of tasks created. In scavenge_contents_parallel the distance
360 // to the next stripe is calculated based on the number of tasks.
361 // If the stripe width is ssize, a task's next stripe is at
362 // ssize * number_of_tasks (= slice_stride). In this case after
363 // finishing stripe 0 in slice 0, the thread finds the stripe 0 in slice1
364 // by adding slice_stride to the start of stripe 0 in slice 0 to get
365 // to the start of stride 0 in slice 1.
366
367 class ScavengeRootsTask : public AbstractGangTask {
368 StrongRootsScope _strong_roots_scope; // needed for Threads::possibly_parallel_threads_do
369 EnumClaimer<Parallel::RootType::Value> _enum_claimer;
370 PSOldGen* _old_gen;
371 HeapWord* _gen_top;
372 uint _active_workers;
373 bool _is_empty;
374 TaskTerminator _terminator;
375
376 public:
377 ScavengeRootsTask(
378 PSOldGen* old_gen,
379 HeapWord* gen_top,
380 uint active_workers,
381 bool is_empty)
382 : AbstractGangTask("ScavengeRootsTask"),
383 _strong_roots_scope(active_workers),
384 _enum_claimer(Parallel::RootType::sentinel),
385 _old_gen(old_gen),
386 _gen_top(gen_top),
387 _active_workers(active_workers),
388 _is_empty(is_empty),
389 _terminator(active_workers, PSPromotionManager::vm_thread_promotion_manager()->stack_array_depth()) {
390 }
391
392 virtual void work(uint worker_id) {
393 ResourceMark rm;
394
395 if (!_is_empty) {
396 // There are only old-to-young pointers if there are objects
397 // in the old gen.
398
399 // There are not old-to-young pointers if the old gen is empty.
400 assert(!_old_gen->object_space()->is_empty(),
401 "Should not be called is there is no work");
402 assert(_old_gen != NULL, "Sanity");
403 assert(_old_gen->object_space()->contains(_gen_top) || _gen_top == _old_gen->object_space()->top(), "Sanity");
404 assert(worker_id < ParallelGCThreads, "Sanity");
405
406 {
407 PSPromotionManager* pm = PSPromotionManager::gc_thread_promotion_manager(worker_id);
408 PSCardTable* card_table = ParallelScavengeHeap::heap()->card_table();
409
410 card_table->scavenge_contents_parallel(_old_gen->start_array(),
411 _old_gen->object_space(),
412 _gen_top,
413 pm,
414 worker_id,
415 _active_workers);
416
417 // Do the real work
418 pm->drain_stacks(false);
419 }
420 }
421
422 for (Parallel::RootType::Value root_type; _enum_claimer.try_claim(root_type); /* empty */) {
423 scavenge_roots_task(root_type, worker_id);
424 }
425
426 PSThreadRootsTaskClosure closure(worker_id);
427 Threads::possibly_parallel_threads_do(true /*parallel */, &closure);
428
429
430 // If active_workers can exceed 1, add a StrealTask.
431 // PSPromotionManager::drain_stacks_depth() does not fully drain its
432 // stacks and expects a StealTask to complete the draining if
433 // ParallelGCThreads is > 1.
434
435 if (_active_workers > 1) {
436 steal_task(*_terminator.terminator() , worker_id);
437 }
438 }
439 };
440
441 // This method contains no policy. You should probably
442 // be calling invoke() instead.
443 bool PSScavenge::invoke_no_policy() {
444 assert(SafepointSynchronize::is_at_safepoint(), "should be at safepoint");
445 assert(Thread::current() == (Thread*)VMThread::vm_thread(), "should be in vm thread");
446
447 _gc_timer.register_gc_start();
448
449 TimeStamp scavenge_entry;
450 TimeStamp scavenge_midpoint;
451 TimeStamp scavenge_exit;
452
453 scavenge_entry.update();
454
455 if (GCLocker::check_active_before_gc()) {
456 return false;
|
321 save_to_space_top_before_gc();
322
323 #if COMPILER2_OR_JVMCI
324 DerivedPointerTable::clear();
325 #endif
326
327 reference_processor()->enable_discovery();
328 reference_processor()->setup_policy(false);
329
330 PreGCValues pre_gc_values(heap);
331
332 // Reset our survivor overflow.
333 set_survivor_overflow(false);
334
335 // We need to save the old top values before
336 // creating the promotion_manager. We pass the top
337 // values to the card_table, to prevent it from
338 // straying into the promotion labs.
339 HeapWord* old_top = old_gen->object_space()->top();
340
341 // Release all previously held resources
342 gc_task_manager()->release_all_resources();
343
344 // Set the number of GC threads to be used in this collection
345 gc_task_manager()->set_active_gang();
346 gc_task_manager()->task_idle_workers();
347 // Get the active number of workers here and use that value
348 // throughout the methods.
349 uint active_workers = gc_task_manager()->active_workers();
350
351 PSPromotionManager::pre_scavenge();
352
353 // We'll use the promotion manager again later.
354 PSPromotionManager* promotion_manager = PSPromotionManager::vm_thread_promotion_manager();
355 {
356 GCTraceTime(Debug, gc, phases) tm("Scavenge", &_gc_timer);
357 ParallelScavengeHeap::ParStrongRootsScope psrs;
358
359 GCTaskQueue* q = GCTaskQueue::create();
360
361 if (!old_gen->object_space()->is_empty()) {
362 // There are only old-to-young pointers if there are objects
363 // in the old gen.
364 uint stripe_total = active_workers;
365 for(uint i=0; i < stripe_total; i++) {
366 q->enqueue(new OldToYoungRootsTask(old_gen, old_top, i, stripe_total));
367 }
368 }
369
370 q->enqueue(new ScavengeRootsTask(ScavengeRootsTask::universe));
371 q->enqueue(new ScavengeRootsTask(ScavengeRootsTask::jni_handles));
372 // We scan the thread roots in parallel
373 PSAddThreadRootsTaskClosure cl(q);
374 Threads::java_threads_and_vm_thread_do(&cl);
375 q->enqueue(new ScavengeRootsTask(ScavengeRootsTask::object_synchronizer));
376 q->enqueue(new ScavengeRootsTask(ScavengeRootsTask::management));
377 q->enqueue(new ScavengeRootsTask(ScavengeRootsTask::system_dictionary));
378 q->enqueue(new ScavengeRootsTask(ScavengeRootsTask::class_loader_data));
379 q->enqueue(new ScavengeRootsTask(ScavengeRootsTask::jvmti));
380 q->enqueue(new ScavengeRootsTask(ScavengeRootsTask::code_cache));
381 JVMCI_ONLY(q->enqueue(new ScavengeRootsTask(ScavengeRootsTask::jvmci));)
382
383 TaskTerminator terminator(active_workers,
384 (TaskQueueSetSuper*) promotion_manager->stack_array_depth());
385 // If active_workers can exceed 1, add a StrealTask.
386 // PSPromotionManager::drain_stacks_depth() does not fully drain its
387 // stacks and expects a StealTask to complete the draining if
388 // ParallelGCThreads is > 1.
389 if (gc_task_manager()->workers() > 1) {
390 for (uint j = 0; j < active_workers; j++) {
391 q->enqueue(new StealTask(terminator.terminator()));
392 }
393 }
394
395 gc_task_manager()->execute_and_wait(q);
396 }
397
398 scavenge_midpoint.update();
399
400 // Process reference objects discovered during scavenge
401 {
402 GCTraceTime(Debug, gc, phases) tm("Reference Processing", &_gc_timer);
403
404 reference_processor()->setup_policy(false); // not always_clear
405 reference_processor()->set_active_mt_degree(active_workers);
406 PSKeepAliveClosure keep_alive(promotion_manager);
407 PSEvacuateFollowersClosure evac_followers(promotion_manager);
408 ReferenceProcessorStats stats;
409 ReferenceProcessorPhaseTimes pt(&_gc_timer, reference_processor()->max_num_queues());
410 if (reference_processor()->processing_is_mt()) {
411 PSRefProcTaskExecutor task_executor;
412 stats = reference_processor()->process_discovered_references(
413 &_is_alive_closure, &keep_alive, &evac_followers, &task_executor,
414 &pt);
|
528 save_to_space_top_before_gc();
529
530 #if COMPILER2_OR_JVMCI
531 DerivedPointerTable::clear();
532 #endif
533
534 reference_processor()->enable_discovery();
535 reference_processor()->setup_policy(false);
536
537 PreGCValues pre_gc_values(heap);
538
539 // Reset our survivor overflow.
540 set_survivor_overflow(false);
541
542 // We need to save the old top values before
543 // creating the promotion_manager. We pass the top
544 // values to the card_table, to prevent it from
545 // straying into the promotion labs.
546 HeapWord* old_top = old_gen->object_space()->top();
547
548 uint active_workers = ParallelScavengeHeap::heap()->workers().update_active_workers(WorkerPolicy::calc_active_workers(
549 ParallelScavengeHeap::heap()->workers().total_workers(),
550 ParallelScavengeHeap::heap()->workers().active_workers(),
551 Threads::number_of_non_daemon_threads()));
552
553 PSPromotionManager::pre_scavenge();
554
555 // We'll use the promotion manager again later.
556 PSPromotionManager* promotion_manager = PSPromotionManager::vm_thread_promotion_manager();
557 {
558 GCTraceTime(Debug, gc, phases) tm("Scavenge", &_gc_timer);
559
560 ScavengeRootsTask task(old_gen, old_top, active_workers, old_gen->object_space()->is_empty());
561 ParallelScavengeHeap::heap()->workers().run_task(&task);
562 }
563
564 scavenge_midpoint.update();
565
566 // Process reference objects discovered during scavenge
567 {
568 GCTraceTime(Debug, gc, phases) tm("Reference Processing", &_gc_timer);
569
570 reference_processor()->setup_policy(false); // not always_clear
571 reference_processor()->set_active_mt_degree(active_workers);
572 PSKeepAliveClosure keep_alive(promotion_manager);
573 PSEvacuateFollowersClosure evac_followers(promotion_manager);
574 ReferenceProcessorStats stats;
575 ReferenceProcessorPhaseTimes pt(&_gc_timer, reference_processor()->max_num_queues());
576 if (reference_processor()->processing_is_mt()) {
577 PSRefProcTaskExecutor task_executor;
578 stats = reference_processor()->process_discovered_references(
579 &_is_alive_closure, &keep_alive, &evac_followers, &task_executor,
580 &pt);
|
588
589 // Verify all old -> young cards are now precise
590 if (VerifyRememberedSets) {
591 // Precise verification will give false positives. Until this is fixed,
592 // use imprecise verification.
593 // heap->card_table()->verify_all_young_refs_precise();
594 heap->card_table()->verify_all_young_refs_imprecise();
595 }
596
597 if (log_is_enabled(Debug, gc, heap, exit)) {
598 accumulated_time()->stop();
599 }
600
601 young_gen->print_used_change(pre_gc_values.young_gen_used());
602 old_gen->print_used_change(pre_gc_values.old_gen_used());
603 MetaspaceUtils::print_metaspace_change(pre_gc_values.metadata_used());
604
605 // Track memory usage and detect low memory
606 MemoryService::track_memory_usage();
607 heap->update_counters();
608
609 gc_task_manager()->release_idle_workers();
610 }
611
612 if (VerifyAfterGC && heap->total_collections() >= VerifyGCStartAt) {
613 HandleMark hm; // Discard invalid handles created during verification
614 Universe::verify("After GC");
615 }
616
617 heap->print_heap_after_gc();
618 heap->trace_heap_after_gc(&_gc_tracer);
619
620 scavenge_exit.update();
621
622 log_debug(gc, task, time)("VM-Thread " JLONG_FORMAT " " JLONG_FORMAT " " JLONG_FORMAT,
623 scavenge_entry.ticks(), scavenge_midpoint.ticks(),
624 scavenge_exit.ticks());
625 gc_task_manager()->print_task_time_stamps();
626
627 #ifdef TRACESPINNING
628 ParallelTaskTerminator::print_termination_counts();
629 #endif
630
631 AdaptiveSizePolicyOutput::print(size_policy, heap->total_collections());
632
633 _gc_timer.register_gc_end();
634
635 _gc_tracer.report_gc_end(_gc_timer.gc_end(), _gc_timer.time_partitions());
636
637 return !promotion_failure_occurred;
638 }
639
640 // This method iterates over all objects in the young generation,
641 // removing all forwarding references. It then restores any preserved marks.
642 void PSScavenge::clean_up_failed_promotion() {
643 ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
644 PSYoungGen* young_gen = heap->young_gen();
|
754
755 // Verify all old -> young cards are now precise
756 if (VerifyRememberedSets) {
757 // Precise verification will give false positives. Until this is fixed,
758 // use imprecise verification.
759 // heap->card_table()->verify_all_young_refs_precise();
760 heap->card_table()->verify_all_young_refs_imprecise();
761 }
762
763 if (log_is_enabled(Debug, gc, heap, exit)) {
764 accumulated_time()->stop();
765 }
766
767 young_gen->print_used_change(pre_gc_values.young_gen_used());
768 old_gen->print_used_change(pre_gc_values.old_gen_used());
769 MetaspaceUtils::print_metaspace_change(pre_gc_values.metadata_used());
770
771 // Track memory usage and detect low memory
772 MemoryService::track_memory_usage();
773 heap->update_counters();
774 }
775
776 if (VerifyAfterGC && heap->total_collections() >= VerifyGCStartAt) {
777 HandleMark hm; // Discard invalid handles created during verification
778 Universe::verify("After GC");
779 }
780
781 heap->print_heap_after_gc();
782 heap->trace_heap_after_gc(&_gc_tracer);
783
784 scavenge_exit.update();
785
786 log_debug(gc, task, time)("VM-Thread " JLONG_FORMAT " " JLONG_FORMAT " " JLONG_FORMAT,
787 scavenge_entry.ticks(), scavenge_midpoint.ticks(),
788 scavenge_exit.ticks());
789
790 #ifdef TRACESPINNING
791 ParallelTaskTerminator::print_termination_counts();
792 #endif
793
794 AdaptiveSizePolicyOutput::print(size_policy, heap->total_collections());
795
796 _gc_timer.register_gc_end();
797
798 _gc_tracer.report_gc_end(_gc_timer.gc_end(), _gc_timer.time_partitions());
799
800 return !promotion_failure_occurred;
801 }
802
803 // This method iterates over all objects in the young generation,
804 // removing all forwarding references. It then restores any preserved marks.
805 void PSScavenge::clean_up_failed_promotion() {
806 ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
807 PSYoungGen* young_gen = heap->young_gen();
|
681 size_t promotion_estimate = MIN2(avg_promoted, young_gen->used_in_bytes());
682 bool result = promotion_estimate < old_gen->free_in_bytes();
683
684 log_trace(ergo)("%s scavenge: average_promoted " SIZE_FORMAT " padded_average_promoted " SIZE_FORMAT " free in old gen "
685 result ? "Do" : "Skip", (size_t) policy->average_promoted_in_bytes(),
686 (size_t) policy->padded_average_promoted_in_bytes(),
687 old_gen->free_in_bytes());
688 if (young_gen->used_in_bytes() < (size_t) policy->padded_average_promoted_in_bytes()) {
689 log_trace(ergo)(" padded_promoted_average is greater than maximum promotion = " SIZE_FORMAT, young_gen->used_in_bytes()
690 }
691
692 if (result) {
693 _consecutive_skipped_scavenges = 0;
694 } else {
695 _consecutive_skipped_scavenges++;
696 if (UsePerfData) {
697 counters->update_scavenge_skipped(promoted_too_large);
698 }
699 }
700 return result;
701 }
702
703 // Used to add tasks
704 GCTaskManager* const PSScavenge::gc_task_manager() {
705 assert(ParallelScavengeHeap::gc_task_manager() != NULL,
706 "shouldn't return NULL");
707 return ParallelScavengeHeap::gc_task_manager();
708 }
709
710 // Adaptive size policy support. When the young generation/old generation
711 // boundary moves, _young_generation_boundary must be reset
712 void PSScavenge::set_young_generation_boundary(HeapWord* v) {
713 _young_generation_boundary = v;
714 if (UseCompressedOops) {
715 _young_generation_boundary_compressed = (uintptr_t)CompressedOops::encode((oop)v);
716 }
717 }
718
719 void PSScavenge::initialize() {
720 // Arguments must have been parsed
721
722 if (AlwaysTenure || NeverTenure) {
723 assert(MaxTenuringThreshold == 0 || MaxTenuringThreshold == markOopDesc::max_age + 1,
724 "MaxTenuringThreshold should be 0 or markOopDesc::max_age + 1, but is %d", (int) MaxTenuringThreshold);
725 _tenuring_threshold = MaxTenuringThreshold;
726 } else {
|
844 size_t promotion_estimate = MIN2(avg_promoted, young_gen->used_in_bytes());
845 bool result = promotion_estimate < old_gen->free_in_bytes();
846
847 log_trace(ergo)("%s scavenge: average_promoted " SIZE_FORMAT " padded_average_promoted " SIZE_FORMAT " free in old gen "
848 result ? "Do" : "Skip", (size_t) policy->average_promoted_in_bytes(),
849 (size_t) policy->padded_average_promoted_in_bytes(),
850 old_gen->free_in_bytes());
851 if (young_gen->used_in_bytes() < (size_t) policy->padded_average_promoted_in_bytes()) {
852 log_trace(ergo)(" padded_promoted_average is greater than maximum promotion = " SIZE_FORMAT, young_gen->used_in_bytes()
853 }
854
855 if (result) {
856 _consecutive_skipped_scavenges = 0;
857 } else {
858 _consecutive_skipped_scavenges++;
859 if (UsePerfData) {
860 counters->update_scavenge_skipped(promoted_too_large);
861 }
862 }
863 return result;
864 }
865
866 // Adaptive size policy support. When the young generation/old generation
867 // boundary moves, _young_generation_boundary must be reset
868 void PSScavenge::set_young_generation_boundary(HeapWord* v) {
869 _young_generation_boundary = v;
870 if (UseCompressedOops) {
871 _young_generation_boundary_compressed = (uintptr_t)CompressedOops::encode((oop)v);
872 }
873 }
874
875 void PSScavenge::initialize() {
876 // Arguments must have been parsed
877
878 if (AlwaysTenure || NeverTenure) {
879 assert(MaxTenuringThreshold == 0 || MaxTenuringThreshold == markOopDesc::max_age + 1,
880 "MaxTenuringThreshold should be 0 or markOopDesc::max_age + 1, but is %d", (int) MaxTenuringThreshold);
881 _tenuring_threshold = MaxTenuringThreshold;
882 } else {
|