Print this page
rev 2691 : [mq]: g1-reference-processing
Split |
Close |
Expand all |
Collapse all |
--- old/src/share/vm/gc_implementation/parallelScavenge/psScavenge.cpp
+++ new/src/share/vm/gc_implementation/parallelScavenge/psScavenge.cpp
1 1 /*
2 2 * Copyright (c) 2002, 2011, Oracle and/or its affiliates. All rights reserved.
3 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 4 *
5 5 * This code is free software; you can redistribute it and/or modify it
6 6 * under the terms of the GNU General Public License version 2 only, as
7 7 * published by the Free Software Foundation.
8 8 *
9 9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 12 * version 2 for more details (a copy is included in the LICENSE file that
13 13 * accompanied this code).
14 14 *
15 15 * You should have received a copy of the GNU General Public License version
16 16 * 2 along with this work; if not, write to the Free Software Foundation,
17 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 18 *
19 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 20 * or visit www.oracle.com if you need additional information or have any
21 21 * questions.
22 22 *
23 23 */
24 24
25 25 #include "precompiled.hpp"
26 26 #include "classfile/symbolTable.hpp"
27 27 #include "gc_implementation/parallelScavenge/cardTableExtension.hpp"
28 28 #include "gc_implementation/parallelScavenge/gcTaskManager.hpp"
29 29 #include "gc_implementation/parallelScavenge/generationSizer.hpp"
30 30 #include "gc_implementation/parallelScavenge/parallelScavengeHeap.hpp"
31 31 #include "gc_implementation/parallelScavenge/psAdaptiveSizePolicy.hpp"
32 32 #include "gc_implementation/parallelScavenge/psMarkSweep.hpp"
33 33 #include "gc_implementation/parallelScavenge/psParallelCompact.hpp"
34 34 #include "gc_implementation/parallelScavenge/psScavenge.inline.hpp"
35 35 #include "gc_implementation/parallelScavenge/psTasks.hpp"
36 36 #include "gc_implementation/shared/isGCActiveMark.hpp"
37 37 #include "gc_implementation/shared/spaceDecorator.hpp"
38 38 #include "gc_interface/gcCause.hpp"
39 39 #include "memory/collectorPolicy.hpp"
40 40 #include "memory/gcLocker.inline.hpp"
41 41 #include "memory/referencePolicy.hpp"
42 42 #include "memory/referenceProcessor.hpp"
43 43 #include "memory/resourceArea.hpp"
44 44 #include "oops/oop.inline.hpp"
45 45 #include "oops/oop.psgc.inline.hpp"
46 46 #include "runtime/biasedLocking.hpp"
47 47 #include "runtime/fprofiler.hpp"
48 48 #include "runtime/handles.inline.hpp"
49 49 #include "runtime/threadCritical.hpp"
50 50 #include "runtime/vmThread.hpp"
51 51 #include "runtime/vm_operations.hpp"
52 52 #include "services/memoryService.hpp"
53 53 #include "utilities/stack.inline.hpp"
54 54
55 55
56 56 HeapWord* PSScavenge::_to_space_top_before_gc = NULL;
57 57 int PSScavenge::_consecutive_skipped_scavenges = 0;
58 58 ReferenceProcessor* PSScavenge::_ref_processor = NULL;
59 59 CardTableExtension* PSScavenge::_card_table = NULL;
60 60 bool PSScavenge::_survivor_overflow = false;
61 61 int PSScavenge::_tenuring_threshold = 0;
62 62 HeapWord* PSScavenge::_young_generation_boundary = NULL;
63 63 elapsedTimer PSScavenge::_accumulated_time;
64 64 Stack<markOop> PSScavenge::_preserved_mark_stack;
65 65 Stack<oop> PSScavenge::_preserved_oop_stack;
66 66 CollectorCounters* PSScavenge::_counters = NULL;
67 67 bool PSScavenge::_promotion_failed = false;
68 68
69 69 // Define before use
70 70 class PSIsAliveClosure: public BoolObjectClosure {
71 71 public:
72 72 void do_object(oop p) {
73 73 assert(false, "Do not call.");
74 74 }
75 75 bool do_object_b(oop p) {
76 76 return (!PSScavenge::is_obj_in_young((HeapWord*) p)) || p->is_forwarded();
77 77 }
78 78 };
79 79
80 80 PSIsAliveClosure PSScavenge::_is_alive_closure;
81 81
82 82 class PSKeepAliveClosure: public OopClosure {
83 83 protected:
84 84 MutableSpace* _to_space;
85 85 PSPromotionManager* _promotion_manager;
86 86
87 87 public:
88 88 PSKeepAliveClosure(PSPromotionManager* pm) : _promotion_manager(pm) {
89 89 ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
90 90 assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity");
91 91 _to_space = heap->young_gen()->to_space();
92 92
93 93 assert(_promotion_manager != NULL, "Sanity");
94 94 }
95 95
96 96 template <class T> void do_oop_work(T* p) {
97 97 assert (!oopDesc::is_null(*p), "expected non-null ref");
98 98 assert ((oopDesc::load_decode_heap_oop_not_null(p))->is_oop(),
99 99 "expected an oop while scanning weak refs");
100 100
101 101 // Weak refs may be visited more than once.
102 102 if (PSScavenge::should_scavenge(p, _to_space)) {
103 103 PSScavenge::copy_and_push_safe_barrier(_promotion_manager, p);
104 104 }
105 105 }
106 106 virtual void do_oop(oop* p) { PSKeepAliveClosure::do_oop_work(p); }
107 107 virtual void do_oop(narrowOop* p) { PSKeepAliveClosure::do_oop_work(p); }
108 108 };
109 109
110 110 class PSEvacuateFollowersClosure: public VoidClosure {
111 111 private:
112 112 PSPromotionManager* _promotion_manager;
113 113 public:
114 114 PSEvacuateFollowersClosure(PSPromotionManager* pm) : _promotion_manager(pm) {}
115 115
116 116 virtual void do_void() {
117 117 assert(_promotion_manager != NULL, "Sanity");
118 118 _promotion_manager->drain_stacks(true);
119 119 guarantee(_promotion_manager->stacks_empty(),
120 120 "stacks should be empty at this point");
121 121 }
122 122 };
123 123
124 124 class PSPromotionFailedClosure : public ObjectClosure {
125 125 virtual void do_object(oop obj) {
126 126 if (obj->is_forwarded()) {
127 127 obj->init_mark();
128 128 }
129 129 }
130 130 };
131 131
132 132 class PSRefProcTaskProxy: public GCTask {
133 133 typedef AbstractRefProcTaskExecutor::ProcessTask ProcessTask;
134 134 ProcessTask & _rp_task;
135 135 uint _work_id;
136 136 public:
137 137 PSRefProcTaskProxy(ProcessTask & rp_task, uint work_id)
138 138 : _rp_task(rp_task),
139 139 _work_id(work_id)
140 140 { }
141 141
142 142 private:
143 143 virtual char* name() { return (char *)"Process referents by policy in parallel"; }
144 144 virtual void do_it(GCTaskManager* manager, uint which);
145 145 };
146 146
147 147 void PSRefProcTaskProxy::do_it(GCTaskManager* manager, uint which)
148 148 {
149 149 PSPromotionManager* promotion_manager =
150 150 PSPromotionManager::gc_thread_promotion_manager(which);
151 151 assert(promotion_manager != NULL, "sanity check");
152 152 PSKeepAliveClosure keep_alive(promotion_manager);
153 153 PSEvacuateFollowersClosure evac_followers(promotion_manager);
154 154 PSIsAliveClosure is_alive;
155 155 _rp_task.work(_work_id, is_alive, keep_alive, evac_followers);
156 156 }
157 157
158 158 class PSRefEnqueueTaskProxy: public GCTask {
159 159 typedef AbstractRefProcTaskExecutor::EnqueueTask EnqueueTask;
160 160 EnqueueTask& _enq_task;
161 161 uint _work_id;
162 162
163 163 public:
164 164 PSRefEnqueueTaskProxy(EnqueueTask& enq_task, uint work_id)
165 165 : _enq_task(enq_task),
166 166 _work_id(work_id)
167 167 { }
168 168
169 169 virtual char* name() { return (char *)"Enqueue reference objects in parallel"; }
170 170 virtual void do_it(GCTaskManager* manager, uint which)
171 171 {
172 172 _enq_task.work(_work_id);
173 173 }
174 174 };
175 175
176 176 class PSRefProcTaskExecutor: public AbstractRefProcTaskExecutor {
177 177 virtual void execute(ProcessTask& task);
178 178 virtual void execute(EnqueueTask& task);
179 179 };
180 180
181 181 void PSRefProcTaskExecutor::execute(ProcessTask& task)
182 182 {
183 183 GCTaskQueue* q = GCTaskQueue::create();
184 184 for(uint i=0; i<ParallelGCThreads; i++) {
185 185 q->enqueue(new PSRefProcTaskProxy(task, i));
186 186 }
187 187 ParallelTaskTerminator terminator(
188 188 ParallelScavengeHeap::gc_task_manager()->workers(),
189 189 (TaskQueueSetSuper*) PSPromotionManager::stack_array_depth());
190 190 if (task.marks_oops_alive() && ParallelGCThreads > 1) {
191 191 for (uint j=0; j<ParallelGCThreads; j++) {
192 192 q->enqueue(new StealTask(&terminator));
193 193 }
194 194 }
195 195 ParallelScavengeHeap::gc_task_manager()->execute_and_wait(q);
196 196 }
197 197
198 198
199 199 void PSRefProcTaskExecutor::execute(EnqueueTask& task)
200 200 {
201 201 GCTaskQueue* q = GCTaskQueue::create();
202 202 for(uint i=0; i<ParallelGCThreads; i++) {
203 203 q->enqueue(new PSRefEnqueueTaskProxy(task, i));
204 204 }
205 205 ParallelScavengeHeap::gc_task_manager()->execute_and_wait(q);
206 206 }
207 207
208 208 // This method contains all heap specific policy for invoking scavenge.
209 209 // PSScavenge::invoke_no_policy() will do nothing but attempt to
210 210 // scavenge. It will not clean up after failed promotions, bail out if
211 211 // we've exceeded policy time limits, or any other special behavior.
212 212 // All such policy should be placed here.
213 213 //
214 214 // Note that this method should only be called from the vm_thread while
215 215 // at a safepoint!
216 216 void PSScavenge::invoke() {
217 217 assert(SafepointSynchronize::is_at_safepoint(), "should be at safepoint");
218 218 assert(Thread::current() == (Thread*)VMThread::vm_thread(), "should be in vm thread");
219 219 assert(!Universe::heap()->is_gc_active(), "not reentrant");
220 220
221 221 ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
222 222 assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity");
223 223
224 224 PSAdaptiveSizePolicy* policy = heap->size_policy();
225 225 IsGCActiveMark mark;
226 226
227 227 bool scavenge_was_done = PSScavenge::invoke_no_policy();
228 228
229 229 PSGCAdaptivePolicyCounters* counters = heap->gc_policy_counters();
230 230 if (UsePerfData)
231 231 counters->update_full_follows_scavenge(0);
232 232 if (!scavenge_was_done ||
233 233 policy->should_full_GC(heap->old_gen()->free_in_bytes())) {
234 234 if (UsePerfData)
235 235 counters->update_full_follows_scavenge(full_follows_scavenge);
236 236 GCCauseSetter gccs(heap, GCCause::_adaptive_size_policy);
237 237 CollectorPolicy* cp = heap->collector_policy();
238 238 const bool clear_all_softrefs = cp->should_clear_all_soft_refs();
239 239
240 240 if (UseParallelOldGC) {
241 241 PSParallelCompact::invoke_no_policy(clear_all_softrefs);
242 242 } else {
243 243 PSMarkSweep::invoke_no_policy(clear_all_softrefs);
244 244 }
245 245 }
246 246 }
247 247
248 248 // This method contains no policy. You should probably
249 249 // be calling invoke() instead.
250 250 bool PSScavenge::invoke_no_policy() {
251 251 assert(SafepointSynchronize::is_at_safepoint(), "should be at safepoint");
252 252 assert(Thread::current() == (Thread*)VMThread::vm_thread(), "should be in vm thread");
253 253
254 254 assert(_preserved_mark_stack.is_empty(), "should be empty");
255 255 assert(_preserved_oop_stack.is_empty(), "should be empty");
256 256
257 257 TimeStamp scavenge_entry;
258 258 TimeStamp scavenge_midpoint;
259 259 TimeStamp scavenge_exit;
260 260
261 261 scavenge_entry.update();
262 262
263 263 if (GC_locker::check_active_before_gc()) {
264 264 return false;
265 265 }
266 266
267 267 ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
268 268 GCCause::Cause gc_cause = heap->gc_cause();
269 269 assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity");
270 270
271 271 // Check for potential problems.
272 272 if (!should_attempt_scavenge()) {
273 273 return false;
274 274 }
275 275
276 276 bool promotion_failure_occurred = false;
277 277
278 278 PSYoungGen* young_gen = heap->young_gen();
279 279 PSOldGen* old_gen = heap->old_gen();
280 280 PSPermGen* perm_gen = heap->perm_gen();
281 281 PSAdaptiveSizePolicy* size_policy = heap->size_policy();
282 282 heap->increment_total_collections();
283 283
284 284 AdaptiveSizePolicyOutput(size_policy, heap->total_collections());
285 285
286 286 if ((gc_cause != GCCause::_java_lang_system_gc) ||
287 287 UseAdaptiveSizePolicyWithSystemGC) {
288 288 // Gather the feedback data for eden occupancy.
289 289 young_gen->eden_space()->accumulate_statistics();
290 290 }
291 291
292 292 if (ZapUnusedHeapArea) {
293 293 // Save information needed to minimize mangling
294 294 heap->record_gen_tops_before_GC();
295 295 }
296 296
297 297 if (PrintHeapAtGC) {
298 298 Universe::print_heap_before_gc();
299 299 }
300 300
301 301 assert(!NeverTenure || _tenuring_threshold == markOopDesc::max_age + 1, "Sanity");
302 302 assert(!AlwaysTenure || _tenuring_threshold == 0, "Sanity");
303 303
304 304 size_t prev_used = heap->used();
305 305 assert(promotion_failed() == false, "Sanity");
306 306
307 307 // Fill in TLABs
308 308 heap->accumulate_statistics_all_tlabs();
309 309 heap->ensure_parsability(true); // retire TLABs
310 310
311 311 if (VerifyBeforeGC && heap->total_collections() >= VerifyGCStartAt) {
312 312 HandleMark hm; // Discard invalid handles created during verification
313 313 gclog_or_tty->print(" VerifyBeforeGC:");
314 314 Universe::verify(true);
315 315 }
316 316
317 317 {
318 318 ResourceMark rm;
319 319 HandleMark hm;
320 320
321 321 gclog_or_tty->date_stamp(PrintGC && PrintGCDateStamps);
322 322 TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty);
323 323 TraceTime t1("GC", PrintGC, !PrintGCDetails, gclog_or_tty);
324 324 TraceCollectorStats tcs(counters());
325 325 TraceMemoryManagerStats tms(false /* not full GC */,gc_cause);
326 326
327 327 if (TraceGen0Time) accumulated_time()->start();
328 328
329 329 // Let the size policy know we're starting
330 330 size_policy->minor_collection_begin();
331 331
332 332 // Verify the object start arrays.
333 333 if (VerifyObjectStartArray &&
334 334 VerifyBeforeGC) {
335 335 old_gen->verify_object_start_array();
336 336 perm_gen->verify_object_start_array();
337 337 }
338 338
339 339 // Verify no unmarked old->young roots
340 340 if (VerifyRememberedSets) {
341 341 CardTableExtension::verify_all_young_refs_imprecise();
342 342 }
↓ open down ↓ |
342 lines elided |
↑ open up ↑ |
343 343
344 344 if (!ScavengeWithObjectsInToSpace) {
345 345 assert(young_gen->to_space()->is_empty(),
346 346 "Attempt to scavenge with live objects in to_space");
347 347 young_gen->to_space()->clear(SpaceDecorator::Mangle);
348 348 } else if (ZapUnusedHeapArea) {
349 349 young_gen->to_space()->mangle_unused_area();
350 350 }
351 351 save_to_space_top_before_gc();
352 352
353 - NOT_PRODUCT(reference_processor()->verify_no_references_recorded());
354 353 COMPILER2_PRESENT(DerivedPointerTable::clear());
355 354
356 - reference_processor()->enable_discovery();
355 + reference_processor()->enable_discovery(true /*verify_disabled*/, true /*verify_no_refs*/);
357 356 reference_processor()->setup_policy(false);
358 357
359 358 // We track how much was promoted to the next generation for
360 359 // the AdaptiveSizePolicy.
361 360 size_t old_gen_used_before = old_gen->used_in_bytes();
362 361
363 362 // For PrintGCDetails
364 363 size_t young_gen_used_before = young_gen->used_in_bytes();
365 364
366 365 // Reset our survivor overflow.
367 366 set_survivor_overflow(false);
368 367
369 368 // We need to save the old/perm top values before
370 369 // creating the promotion_manager. We pass the top
371 370 // values to the card_table, to prevent it from
372 371 // straying into the promotion labs.
373 372 HeapWord* old_top = old_gen->object_space()->top();
374 373 HeapWord* perm_top = perm_gen->object_space()->top();
375 374
376 375 // Release all previously held resources
377 376 gc_task_manager()->release_all_resources();
378 377
379 378 PSPromotionManager::pre_scavenge();
380 379
381 380 // We'll use the promotion manager again later.
382 381 PSPromotionManager* promotion_manager = PSPromotionManager::vm_thread_promotion_manager();
383 382 {
384 383 // TraceTime("Roots");
385 384 ParallelScavengeHeap::ParStrongRootsScope psrs;
386 385
387 386 GCTaskQueue* q = GCTaskQueue::create();
388 387
389 388 for(uint i=0; i<ParallelGCThreads; i++) {
390 389 q->enqueue(new OldToYoungRootsTask(old_gen, old_top, i));
391 390 }
392 391
393 392 q->enqueue(new SerialOldToYoungRootsTask(perm_gen, perm_top));
394 393
395 394 q->enqueue(new ScavengeRootsTask(ScavengeRootsTask::universe));
396 395 q->enqueue(new ScavengeRootsTask(ScavengeRootsTask::jni_handles));
397 396 // We scan the thread roots in parallel
398 397 Threads::create_thread_roots_tasks(q);
399 398 q->enqueue(new ScavengeRootsTask(ScavengeRootsTask::object_synchronizer));
400 399 q->enqueue(new ScavengeRootsTask(ScavengeRootsTask::flat_profiler));
401 400 q->enqueue(new ScavengeRootsTask(ScavengeRootsTask::management));
402 401 q->enqueue(new ScavengeRootsTask(ScavengeRootsTask::system_dictionary));
403 402 q->enqueue(new ScavengeRootsTask(ScavengeRootsTask::jvmti));
404 403 q->enqueue(new ScavengeRootsTask(ScavengeRootsTask::code_cache));
405 404
406 405 ParallelTaskTerminator terminator(
407 406 gc_task_manager()->workers(),
408 407 (TaskQueueSetSuper*) promotion_manager->stack_array_depth());
409 408 if (ParallelGCThreads>1) {
410 409 for (uint j=0; j<ParallelGCThreads; j++) {
411 410 q->enqueue(new StealTask(&terminator));
412 411 }
413 412 }
414 413
415 414 gc_task_manager()->execute_and_wait(q);
416 415 }
417 416
418 417 scavenge_midpoint.update();
419 418
420 419 // Process reference objects discovered during scavenge
421 420 {
422 421 reference_processor()->setup_policy(false); // not always_clear
423 422 PSKeepAliveClosure keep_alive(promotion_manager);
424 423 PSEvacuateFollowersClosure evac_followers(promotion_manager);
425 424 if (reference_processor()->processing_is_mt()) {
426 425 PSRefProcTaskExecutor task_executor;
427 426 reference_processor()->process_discovered_references(
428 427 &_is_alive_closure, &keep_alive, &evac_followers, &task_executor);
429 428 } else {
430 429 reference_processor()->process_discovered_references(
431 430 &_is_alive_closure, &keep_alive, &evac_followers, NULL);
432 431 }
433 432 }
434 433
435 434 // Enqueue reference objects discovered during scavenge.
436 435 if (reference_processor()->processing_is_mt()) {
437 436 PSRefProcTaskExecutor task_executor;
438 437 reference_processor()->enqueue_discovered_references(&task_executor);
439 438 } else {
440 439 reference_processor()->enqueue_discovered_references(NULL);
441 440 }
442 441
443 442 if (!JavaObjectsInPerm) {
444 443 // Unlink any dead interned Strings
445 444 StringTable::unlink(&_is_alive_closure);
446 445 // Process the remaining live ones
447 446 PSScavengeRootsClosure root_closure(promotion_manager);
448 447 StringTable::oops_do(&root_closure);
449 448 }
450 449
451 450 // Finally, flush the promotion_manager's labs, and deallocate its stacks.
452 451 PSPromotionManager::post_scavenge();
453 452
454 453 promotion_failure_occurred = promotion_failed();
455 454 if (promotion_failure_occurred) {
456 455 clean_up_failed_promotion();
457 456 if (PrintGC) {
458 457 gclog_or_tty->print("--");
459 458 }
460 459 }
461 460
462 461 // Let the size policy know we're done. Note that we count promotion
463 462 // failure cleanup time as part of the collection (otherwise, we're
464 463 // implicitly saying it's mutator time).
465 464 size_policy->minor_collection_end(gc_cause);
466 465
467 466 if (!promotion_failure_occurred) {
468 467 // Swap the survivor spaces.
469 468
470 469
471 470 young_gen->eden_space()->clear(SpaceDecorator::Mangle);
472 471 young_gen->from_space()->clear(SpaceDecorator::Mangle);
473 472 young_gen->swap_spaces();
474 473
475 474 size_t survived = young_gen->from_space()->used_in_bytes();
476 475 size_t promoted = old_gen->used_in_bytes() - old_gen_used_before;
477 476 size_policy->update_averages(_survivor_overflow, survived, promoted);
478 477
479 478 // A successful scavenge should restart the GC time limit count which is
480 479 // for full GC's.
481 480 size_policy->reset_gc_overhead_limit_count();
482 481 if (UseAdaptiveSizePolicy) {
483 482 // Calculate the new survivor size and tenuring threshold
484 483
485 484 if (PrintAdaptiveSizePolicy) {
486 485 gclog_or_tty->print("AdaptiveSizeStart: ");
487 486 gclog_or_tty->stamp();
488 487 gclog_or_tty->print_cr(" collection: %d ",
489 488 heap->total_collections());
490 489
491 490 if (Verbose) {
492 491 gclog_or_tty->print("old_gen_capacity: %d young_gen_capacity: %d"
493 492 " perm_gen_capacity: %d ",
494 493 old_gen->capacity_in_bytes(), young_gen->capacity_in_bytes(),
495 494 perm_gen->capacity_in_bytes());
496 495 }
497 496 }
498 497
499 498
500 499 if (UsePerfData) {
501 500 PSGCAdaptivePolicyCounters* counters = heap->gc_policy_counters();
502 501 counters->update_old_eden_size(
503 502 size_policy->calculated_eden_size_in_bytes());
504 503 counters->update_old_promo_size(
505 504 size_policy->calculated_promo_size_in_bytes());
506 505 counters->update_old_capacity(old_gen->capacity_in_bytes());
507 506 counters->update_young_capacity(young_gen->capacity_in_bytes());
508 507 counters->update_survived(survived);
509 508 counters->update_promoted(promoted);
510 509 counters->update_survivor_overflowed(_survivor_overflow);
511 510 }
512 511
513 512 size_t survivor_limit =
514 513 size_policy->max_survivor_size(young_gen->max_size());
515 514 _tenuring_threshold =
516 515 size_policy->compute_survivor_space_size_and_threshold(
517 516 _survivor_overflow,
518 517 _tenuring_threshold,
519 518 survivor_limit);
520 519
521 520 if (PrintTenuringDistribution) {
522 521 gclog_or_tty->cr();
523 522 gclog_or_tty->print_cr("Desired survivor size %ld bytes, new threshold %d (max %d)",
524 523 size_policy->calculated_survivor_size_in_bytes(),
525 524 _tenuring_threshold, MaxTenuringThreshold);
526 525 }
527 526
528 527 if (UsePerfData) {
529 528 PSGCAdaptivePolicyCounters* counters = heap->gc_policy_counters();
530 529 counters->update_tenuring_threshold(_tenuring_threshold);
531 530 counters->update_survivor_size_counters();
532 531 }
533 532
534 533 // Do call at minor collections?
535 534 // Don't check if the size_policy is ready at this
536 535 // level. Let the size_policy check that internally.
537 536 if (UseAdaptiveSizePolicy &&
538 537 UseAdaptiveGenerationSizePolicyAtMinorCollection &&
539 538 ((gc_cause != GCCause::_java_lang_system_gc) ||
540 539 UseAdaptiveSizePolicyWithSystemGC)) {
541 540
542 541 // Calculate optimial free space amounts
543 542 assert(young_gen->max_size() >
544 543 young_gen->from_space()->capacity_in_bytes() +
545 544 young_gen->to_space()->capacity_in_bytes(),
546 545 "Sizes of space in young gen are out-of-bounds");
547 546 size_t max_eden_size = young_gen->max_size() -
548 547 young_gen->from_space()->capacity_in_bytes() -
549 548 young_gen->to_space()->capacity_in_bytes();
550 549 size_policy->compute_generation_free_space(young_gen->used_in_bytes(),
551 550 young_gen->eden_space()->used_in_bytes(),
552 551 old_gen->used_in_bytes(),
553 552 perm_gen->used_in_bytes(),
554 553 young_gen->eden_space()->capacity_in_bytes(),
555 554 old_gen->max_gen_size(),
556 555 max_eden_size,
557 556 false /* full gc*/,
558 557 gc_cause,
559 558 heap->collector_policy());
560 559
561 560 }
562 561 // Resize the young generation at every collection
563 562 // even if new sizes have not been calculated. This is
564 563 // to allow resizes that may have been inhibited by the
565 564 // relative location of the "to" and "from" spaces.
566 565
567 566 // Resizing the old gen at minor collects can cause increases
568 567 // that don't feed back to the generation sizing policy until
569 568 // a major collection. Don't resize the old gen here.
570 569
571 570 heap->resize_young_gen(size_policy->calculated_eden_size_in_bytes(),
572 571 size_policy->calculated_survivor_size_in_bytes());
573 572
574 573 if (PrintAdaptiveSizePolicy) {
575 574 gclog_or_tty->print_cr("AdaptiveSizeStop: collection: %d ",
576 575 heap->total_collections());
577 576 }
578 577 }
579 578
580 579 // Update the structure of the eden. With NUMA-eden CPU hotplugging or offlining can
581 580 // cause the change of the heap layout. Make sure eden is reshaped if that's the case.
582 581 // Also update() will case adaptive NUMA chunk resizing.
583 582 assert(young_gen->eden_space()->is_empty(), "eden space should be empty now");
584 583 young_gen->eden_space()->update();
585 584
586 585 heap->gc_policy_counters()->update_counters();
587 586
588 587 heap->resize_all_tlabs();
589 588
590 589 assert(young_gen->to_space()->is_empty(), "to space should be empty now");
591 590 }
592 591
593 592 COMPILER2_PRESENT(DerivedPointerTable::update_pointers());
594 593
595 594 NOT_PRODUCT(reference_processor()->verify_no_references_recorded());
596 595
597 596 // Re-verify object start arrays
598 597 if (VerifyObjectStartArray &&
599 598 VerifyAfterGC) {
600 599 old_gen->verify_object_start_array();
601 600 perm_gen->verify_object_start_array();
602 601 }
603 602
604 603 // Verify all old -> young cards are now precise
605 604 if (VerifyRememberedSets) {
606 605 // Precise verification will give false positives. Until this is fixed,
607 606 // use imprecise verification.
608 607 // CardTableExtension::verify_all_young_refs_precise();
609 608 CardTableExtension::verify_all_young_refs_imprecise();
610 609 }
611 610
612 611 if (TraceGen0Time) accumulated_time()->stop();
613 612
614 613 if (PrintGC) {
615 614 if (PrintGCDetails) {
616 615 // Don't print a GC timestamp here. This is after the GC so
617 616 // would be confusing.
618 617 young_gen->print_used_change(young_gen_used_before);
619 618 }
620 619 heap->print_heap_change(prev_used);
621 620 }
622 621
623 622 // Track memory usage and detect low memory
624 623 MemoryService::track_memory_usage();
625 624 heap->update_counters();
626 625 }
627 626
628 627 if (VerifyAfterGC && heap->total_collections() >= VerifyGCStartAt) {
629 628 HandleMark hm; // Discard invalid handles created during verification
630 629 gclog_or_tty->print(" VerifyAfterGC:");
631 630 Universe::verify(false);
632 631 }
633 632
634 633 if (PrintHeapAtGC) {
635 634 Universe::print_heap_after_gc();
636 635 }
637 636
638 637 if (ZapUnusedHeapArea) {
639 638 young_gen->eden_space()->check_mangled_unused_area_complete();
640 639 young_gen->from_space()->check_mangled_unused_area_complete();
641 640 young_gen->to_space()->check_mangled_unused_area_complete();
642 641 }
643 642
644 643 scavenge_exit.update();
645 644
646 645 if (PrintGCTaskTimeStamps) {
647 646 tty->print_cr("VM-Thread " INT64_FORMAT " " INT64_FORMAT " " INT64_FORMAT,
648 647 scavenge_entry.ticks(), scavenge_midpoint.ticks(),
649 648 scavenge_exit.ticks());
650 649 gc_task_manager()->print_task_time_stamps();
651 650 }
652 651
653 652 #ifdef TRACESPINNING
654 653 ParallelTaskTerminator::print_termination_counts();
655 654 #endif
656 655
657 656 return !promotion_failure_occurred;
658 657 }
659 658
660 659 // This method iterates over all objects in the young generation,
661 660 // unforwarding markOops. It then restores any preserved mark oops,
662 661 // and clears the _preserved_mark_stack.
663 662 void PSScavenge::clean_up_failed_promotion() {
664 663 ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
665 664 assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity");
666 665 assert(promotion_failed(), "Sanity");
667 666
668 667 PSYoungGen* young_gen = heap->young_gen();
669 668
670 669 {
671 670 ResourceMark rm;
672 671
673 672 // Unforward all pointers in the young gen.
674 673 PSPromotionFailedClosure unforward_closure;
675 674 young_gen->object_iterate(&unforward_closure);
676 675
677 676 if (PrintGC && Verbose) {
678 677 gclog_or_tty->print_cr("Restoring %d marks", _preserved_oop_stack.size());
679 678 }
680 679
681 680 // Restore any saved marks.
682 681 while (!_preserved_oop_stack.is_empty()) {
683 682 oop obj = _preserved_oop_stack.pop();
684 683 markOop mark = _preserved_mark_stack.pop();
685 684 obj->set_mark(mark);
686 685 }
687 686
688 687 // Clear the preserved mark and oop stack caches.
689 688 _preserved_mark_stack.clear(true);
690 689 _preserved_oop_stack.clear(true);
691 690 _promotion_failed = false;
692 691 }
693 692
694 693 // Reset the PromotionFailureALot counters.
695 694 NOT_PRODUCT(Universe::heap()->reset_promotion_should_fail();)
696 695 }
697 696
698 697 // This method is called whenever an attempt to promote an object
699 698 // fails. Some markOops will need preservation, some will not. Note
700 699 // that the entire eden is traversed after a failed promotion, with
701 700 // all forwarded headers replaced by the default markOop. This means
702 701 // it is not neccessary to preserve most markOops.
703 702 void PSScavenge::oop_promotion_failed(oop obj, markOop obj_mark) {
704 703 _promotion_failed = true;
705 704 if (obj_mark->must_be_preserved_for_promotion_failure(obj)) {
706 705 // Should use per-worker private stakcs hetre rather than
707 706 // locking a common pair of stacks.
708 707 ThreadCritical tc;
709 708 _preserved_oop_stack.push(obj);
710 709 _preserved_mark_stack.push(obj_mark);
711 710 }
712 711 }
713 712
714 713 bool PSScavenge::should_attempt_scavenge() {
715 714 ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
716 715 assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity");
717 716 PSGCAdaptivePolicyCounters* counters = heap->gc_policy_counters();
718 717
719 718 if (UsePerfData) {
720 719 counters->update_scavenge_skipped(not_skipped);
721 720 }
722 721
723 722 PSYoungGen* young_gen = heap->young_gen();
724 723 PSOldGen* old_gen = heap->old_gen();
725 724
726 725 if (!ScavengeWithObjectsInToSpace) {
727 726 // Do not attempt to promote unless to_space is empty
728 727 if (!young_gen->to_space()->is_empty()) {
729 728 _consecutive_skipped_scavenges++;
730 729 if (UsePerfData) {
731 730 counters->update_scavenge_skipped(to_space_not_empty);
732 731 }
733 732 return false;
734 733 }
735 734 }
736 735
737 736 // Test to see if the scavenge will likely fail.
738 737 PSAdaptiveSizePolicy* policy = heap->size_policy();
739 738
740 739 // A similar test is done in the policy's should_full_GC(). If this is
741 740 // changed, decide if that test should also be changed.
742 741 size_t avg_promoted = (size_t) policy->padded_average_promoted_in_bytes();
743 742 size_t promotion_estimate = MIN2(avg_promoted, young_gen->used_in_bytes());
744 743 bool result = promotion_estimate < old_gen->free_in_bytes();
745 744
746 745 if (PrintGCDetails && Verbose) {
747 746 gclog_or_tty->print(result ? " do scavenge: " : " skip scavenge: ");
748 747 gclog_or_tty->print_cr(" average_promoted " SIZE_FORMAT
749 748 " padded_average_promoted " SIZE_FORMAT
750 749 " free in old gen " SIZE_FORMAT,
751 750 (size_t) policy->average_promoted_in_bytes(),
752 751 (size_t) policy->padded_average_promoted_in_bytes(),
753 752 old_gen->free_in_bytes());
754 753 if (young_gen->used_in_bytes() <
755 754 (size_t) policy->padded_average_promoted_in_bytes()) {
756 755 gclog_or_tty->print_cr(" padded_promoted_average is greater"
757 756 " than maximum promotion = " SIZE_FORMAT, young_gen->used_in_bytes());
758 757 }
759 758 }
760 759
761 760 if (result) {
762 761 _consecutive_skipped_scavenges = 0;
763 762 } else {
764 763 _consecutive_skipped_scavenges++;
765 764 if (UsePerfData) {
766 765 counters->update_scavenge_skipped(promoted_too_large);
767 766 }
768 767 }
769 768 return result;
770 769 }
771 770
772 771 // Used to add tasks
773 772 GCTaskManager* const PSScavenge::gc_task_manager() {
774 773 assert(ParallelScavengeHeap::gc_task_manager() != NULL,
775 774 "shouldn't return NULL");
776 775 return ParallelScavengeHeap::gc_task_manager();
777 776 }
778 777
779 778 void PSScavenge::initialize() {
780 779 // Arguments must have been parsed
781 780
782 781 if (AlwaysTenure) {
783 782 _tenuring_threshold = 0;
784 783 } else if (NeverTenure) {
785 784 _tenuring_threshold = markOopDesc::max_age + 1;
786 785 } else {
787 786 // We want to smooth out our startup times for the AdaptiveSizePolicy
788 787 _tenuring_threshold = (UseAdaptiveSizePolicy) ? InitialTenuringThreshold :
789 788 MaxTenuringThreshold;
790 789 }
791 790
792 791 ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
793 792 assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity");
794 793
795 794 PSYoungGen* young_gen = heap->young_gen();
796 795 PSOldGen* old_gen = heap->old_gen();
797 796 PSPermGen* perm_gen = heap->perm_gen();
798 797
799 798 // Set boundary between young_gen and old_gen
800 799 assert(perm_gen->reserved().end() <= old_gen->object_space()->bottom(),
801 800 "perm above old");
802 801 assert(old_gen->reserved().end() <= young_gen->eden_space()->bottom(),
803 802 "old above young");
804 803 _young_generation_boundary = young_gen->eden_space()->bottom();
805 804
806 805 // Initialize ref handling object for scavenging.
807 806 MemRegion mr = young_gen->reserved();
808 807 _ref_processor =
809 808 new ReferenceProcessor(mr, // span
810 809 ParallelRefProcEnabled && (ParallelGCThreads > 1), // mt processing
811 810 (int) ParallelGCThreads, // mt processing degree
812 811 true, // mt discovery
813 812 (int) ParallelGCThreads, // mt discovery degree
814 813 true, // atomic_discovery
815 814 NULL, // header provides liveness info
816 815 false); // next field updates do not need write barrier
817 816
818 817 // Cache the cardtable
819 818 BarrierSet* bs = Universe::heap()->barrier_set();
820 819 assert(bs->kind() == BarrierSet::CardTableModRef, "Wrong barrier set kind");
821 820 _card_table = (CardTableExtension*)bs;
822 821
823 822 _counters = new CollectorCounters("PSScavenge", 0);
824 823 }
↓ open down ↓ |
458 lines elided |
↑ open up ↑ |
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX