1 #ifdef USE_PRAGMA_IDENT_SRC
2 #pragma ident "@(#)psMarkSweep.cpp 1.92 07/06/08 23:11:01 JVM"
3 #endif
4 /*
5 * Copyright 2001-2007 Sun Microsystems, Inc. All Rights Reserved.
6 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
7 *
8 * This code is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License version 2 only, as
10 * published by the Free Software Foundation.
11 *
12 * This code is distributed in the hope that it will be useful, but WITHOUT
13 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
15 * version 2 for more details (a copy is included in the LICENSE file that
16 * accompanied this code).
17 *
18 * You should have received a copy of the GNU General Public License version
19 * 2 along with this work; if not, write to the Free Software Foundation,
20 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
21 *
22 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
23 * CA 95054 USA or visit www.sun.com if you need additional information or
24 * have any questions.
25 *
26 */
27
28 #include "incls/_precompiled.incl"
29 #include "incls/_psMarkSweep.cpp.incl"
30
31 elapsedTimer PSMarkSweep::_accumulated_time;
32 unsigned int PSMarkSweep::_total_invocations = 0;
33 jlong PSMarkSweep::_time_of_last_gc = 0;
34 CollectorCounters* PSMarkSweep::_counters = NULL;
35
36 void PSMarkSweep::initialize() {
37 MemRegion mr = Universe::heap()->reserved_region();
38 _ref_processor = new ReferenceProcessor(mr,
39 true, // atomic_discovery
40 false); // mt_discovery
41 if (!UseParallelOldGC || !VerifyParallelOldWithMarkSweep) {
42 _counters = new CollectorCounters("PSMarkSweep", 1);
43 }
44 }
45
46 // This method contains all heap specific policy for invoking mark sweep.
47 // PSMarkSweep::invoke_no_policy() will only attempt to mark-sweep-compact
48 // the heap. It will do nothing further. If we need to bail out for policy
49 // reasons, scavenge before full gc, or any other specialized behavior, it
50 // needs to be added here.
51 //
52 // Note that this method should only be called from the vm_thread while
53 // at a safepoint!
54 void PSMarkSweep::invoke(bool maximum_heap_compaction) {
55 assert(SafepointSynchronize::is_at_safepoint(), "should be at safepoint");
56 assert(Thread::current() == (Thread*)VMThread::vm_thread(), "should be in vm thread");
57 assert(!Universe::heap()->is_gc_active(), "not reentrant");
58
59 ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
60 GCCause::Cause gc_cause = heap->gc_cause();
61 PSAdaptiveSizePolicy* policy = heap->size_policy();
62
63 // Before each allocation/collection attempt, find out from the
84 void PSMarkSweep::invoke_no_policy(bool clear_all_softrefs) {
85 assert(SafepointSynchronize::is_at_safepoint(), "must be at a safepoint");
86 assert(ref_processor() != NULL, "Sanity");
87
88 if (GC_locker::check_active_before_gc()) {
89 return;
90 }
91
92 ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
93 GCCause::Cause gc_cause = heap->gc_cause();
94 assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity");
95 PSAdaptiveSizePolicy* size_policy = heap->size_policy();
96
97 PSYoungGen* young_gen = heap->young_gen();
98 PSOldGen* old_gen = heap->old_gen();
99 PSPermGen* perm_gen = heap->perm_gen();
100
101 // Increment the invocation count
102 heap->increment_total_collections(true /* full */);
103
104 // We need to track unique mark sweep invocations as well.
105 _total_invocations++;
106
107 AdaptiveSizePolicyOutput(size_policy, heap->total_collections());
108
109 if (PrintHeapAtGC) {
110 Universe::print_heap_before_gc();
111 }
112
113 // Fill in TLABs
114 heap->accumulate_statistics_all_tlabs();
115 heap->ensure_parsability(true); // retire TLABs
116
117 if (VerifyBeforeGC && heap->total_collections() >= VerifyGCStartAt) {
118 HandleMark hm; // Discard invalid handles created during verification
119 gclog_or_tty->print(" VerifyBeforeGC:");
120 Universe::verify(true);
121 }
122
123 // Verify object start arrays
157 CodeCache::gc_prologue();
158 Threads::gc_prologue();
159 BiasedLocking::preserve_marks();
160
161 // Capture heap size before collection for printing.
162 size_t prev_used = heap->used();
163
164 // Capture perm gen size before collection for sizing.
165 size_t perm_gen_prev_used = perm_gen->used_in_bytes();
166
167 // For PrintGCDetails
168 size_t old_gen_prev_used = old_gen->used_in_bytes();
169 size_t young_gen_prev_used = young_gen->used_in_bytes();
170
171 allocate_stacks();
172
173 NOT_PRODUCT(ref_processor()->verify_no_references_recorded());
174 COMPILER2_PRESENT(DerivedPointerTable::clear());
175
176 ref_processor()->enable_discovery();
177
178 mark_sweep_phase1(clear_all_softrefs);
179
180 mark_sweep_phase2();
181
182 // Don't add any more derived pointers during phase3
183 COMPILER2_PRESENT(assert(DerivedPointerTable::is_active(), "Sanity"));
184 COMPILER2_PRESENT(DerivedPointerTable::set_active(false));
185
186 mark_sweep_phase3();
187
188 mark_sweep_phase4();
189
190 restore_marks();
191
192 deallocate_stacks();
193
194 eden_empty = young_gen->eden_space()->is_empty();
195 if (!eden_empty) {
196 eden_empty = absorb_live_data_from_eden(size_policy, young_gen, old_gen);
197 }
198
199 // Update heap occupancy information which is used as
200 // input to soft ref clearing policy at the next gc.
201 Universe::update_heap_info_at_gc();
202
203 survivors_empty = young_gen->from_space()->is_empty() &&
204 young_gen->to_space()->is_empty();
205 young_gen_empty = eden_empty && survivors_empty;
206
207 BarrierSet* bs = heap->barrier_set();
208 if (bs->is_a(BarrierSet::ModRef)) {
209 ModRefBarrierSet* modBS = (ModRefBarrierSet*)bs;
210 MemRegion old_mr = heap->old_gen()->reserved();
211 MemRegion perm_mr = heap->perm_gen()->reserved();
212 assert(perm_mr.end() <= old_mr.start(), "Generations out of order");
213
330 "of %d%%", GCTimeLimit);
331 }
332 }
333 size_policy->set_print_gc_time_limit_would_be_exceeded(false);
334 }
335 }
336
337 if (VerifyAfterGC && heap->total_collections() >= VerifyGCStartAt) {
338 HandleMark hm; // Discard invalid handles created during verification
339 gclog_or_tty->print(" VerifyAfterGC:");
340 Universe::verify(false);
341 }
342
343 // Re-verify object start arrays
344 if (VerifyObjectStartArray &&
345 VerifyAfterGC) {
346 old_gen->verify_object_start_array();
347 perm_gen->verify_object_start_array();
348 }
349
350 NOT_PRODUCT(ref_processor()->verify_no_references_recorded());
351
352 if (PrintHeapAtGC) {
353 Universe::print_heap_after_gc();
354 }
355 }
356
357 bool PSMarkSweep::absorb_live_data_from_eden(PSAdaptiveSizePolicy* size_policy,
358 PSYoungGen* young_gen,
359 PSOldGen* old_gen) {
360 MutableSpace* const eden_space = young_gen->eden_space();
361 assert(!eden_space->is_empty(), "eden must be non-empty");
362 assert(young_gen->virtual_space()->alignment() ==
363 old_gen->virtual_space()->alignment(), "alignments do not match");
364
365 if (!(UseAdaptiveSizePolicy && UseAdaptiveGCBoundary)) {
366 return false;
367 }
368
369 // Both generations must be completely committed.
370 if (young_gen->virtual_space()->uncommitted_size() != 0) {
371 return false;
372 }
373 if (old_gen->virtual_space()->uncommitted_size() != 0) {
374 return false;
375 }
376
377 // Figure out how much to take from eden. Include the average amount promoted
378 // in the total; otherwise the next young gen GC will simply bail out to a
379 // full GC.
380 const size_t alignment = old_gen->virtual_space()->alignment();
381 const size_t eden_used = eden_space->used_in_bytes();
382 const size_t promoted = (size_t)(size_policy->avg_promoted()->padded_average());
383 const size_t absorb_size = align_size_up(eden_used + promoted, alignment);
384 const size_t eden_capacity = eden_space->capacity_in_bytes();
385
386 if (absorb_size >= eden_capacity) {
387 return false; // Must leave some space in eden.
388 }
389
390 const size_t new_young_size = young_gen->capacity_in_bytes() - absorb_size;
391 if (new_young_size < young_gen->min_gen_size()) {
392 return false; // Respect young gen minimum size.
393 }
394
395 if (TraceAdaptiveGCBoundary && Verbose) {
396 gclog_or_tty->print(" absorbing " SIZE_FORMAT "K: "
397 "eden " SIZE_FORMAT "K->" SIZE_FORMAT "K "
398 "from " SIZE_FORMAT "K, to " SIZE_FORMAT "K "
399 "young_gen " SIZE_FORMAT "K->" SIZE_FORMAT "K ",
400 absorb_size / K,
401 eden_capacity / K, (eden_capacity - absorb_size) / K,
402 young_gen->from_space()->used_in_bytes() / K,
403 young_gen->to_space()->used_in_bytes() / K,
404 young_gen->capacity_in_bytes() / K, new_young_size / K);
405 }
406
407 // Fill the unused part of the old gen.
408 MutableSpace* const old_space = old_gen->object_space();
409 MemRegion old_gen_unused(old_space->top(), old_space->end());
410
411 // If the unused part of the old gen cannot be filled, skip
412 // absorbing eden.
413 if (old_gen_unused.word_size() < SharedHeap::min_fill_size()) {
414 return false;
415 }
416
417 if (!old_gen_unused.is_empty()) {
418 SharedHeap::fill_region_with_object(old_gen_unused);
419 }
420
421 // Take the live data from eden and set both top and end in the old gen to
422 // eden top. (Need to set end because reset_after_change() mangles the region
423 // from end to virtual_space->high() in debug builds).
424 HeapWord* const new_top = eden_space->top();
425 old_gen->virtual_space()->expand_into(young_gen->virtual_space(),
426 absorb_size);
427 young_gen->reset_after_change();
428 old_space->set_top(new_top);
429 old_space->set_end(new_top);
430 old_gen->reset_after_change();
431
432 // Update the object start array for the filler object and the data from eden.
433 ObjectStartArray* const start_array = old_gen->start_array();
434 HeapWord* const start = old_gen_unused.start();
435 for (HeapWord* addr = start; addr < new_top; addr += oop(addr)->size()) {
436 start_array->allocate_block(addr);
437 }
438
439 // Could update the promoted average here, but it is not typically updated at
440 // full GCs and the value to use is unclear. Something like
441 //
442 // cur_promoted_avg + absorb_size / number_of_scavenges_since_last_full_gc.
443
444 size_policy->set_bytes_absorbed_from_eden(absorb_size);
445 return true;
446 }
447
448 void PSMarkSweep::allocate_stacks() {
449 ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
450 assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity");
451
452 PSYoungGen* young_gen = heap->young_gen();
453
454 MutableSpace* to_space = young_gen->to_space();
455 _preserved_marks = (PreservedMark*)to_space->top();
456 _preserved_count = 0;
490
491 ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
492 assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity");
493
494 // General strong roots.
495 Universe::oops_do(mark_and_push_closure());
496 ReferenceProcessor::oops_do(mark_and_push_closure());
497 JNIHandles::oops_do(mark_and_push_closure()); // Global (strong) JNI handles
498 Threads::oops_do(mark_and_push_closure());
499 ObjectSynchronizer::oops_do(mark_and_push_closure());
500 FlatProfiler::oops_do(mark_and_push_closure());
501 Management::oops_do(mark_and_push_closure());
502 JvmtiExport::oops_do(mark_and_push_closure());
503 SystemDictionary::always_strong_oops_do(mark_and_push_closure());
504 vmSymbols::oops_do(mark_and_push_closure());
505
506 // Flush marking stack.
507 follow_stack();
508
509 // Process reference objects found during marking
510
511 // Skipping the reference processing for VerifyParallelOldWithMarkSweep
512 // affects the marking (makes it different).
513 {
514 ReferencePolicy *soft_ref_policy;
515 if (clear_all_softrefs) {
516 soft_ref_policy = new AlwaysClearPolicy();
517 } else {
518 #ifdef COMPILER2
519 soft_ref_policy = new LRUMaxHeapPolicy();
520 #else
521 soft_ref_policy = new LRUCurrentHeapPolicy();
522 #endif // COMPILER2
523 }
524 assert(soft_ref_policy != NULL,"No soft reference policy");
525 ref_processor()->process_discovered_references(
526 soft_ref_policy, is_alive_closure(), mark_and_push_closure(),
527 follow_stack_closure(), NULL);
528 }
529
530 // Follow system dictionary roots and unload classes
531 bool purged_class = SystemDictionary::do_unloading(is_alive_closure());
532
533 // Follow code cache roots
534 CodeCache::do_unloading(is_alive_closure(), mark_and_push_closure(),
535 purged_class);
536 follow_stack(); // Flush marking stack
537
538 // Update subklass/sibling/implementor links of live klasses
539 follow_weak_klass_links();
540 assert(_marking_stack->is_empty(), "just drained");
541
542 // Visit symbol and interned string tables and delete unmarked oops
543 SymbolTable::unlink(is_alive_closure());
544 StringTable::unlink(is_alive_closure());
545
546 assert(_marking_stack->is_empty(), "stack should be empty by now");
547 }
|
1 #ifdef USE_PRAGMA_IDENT_SRC
2 #pragma ident "@(#)psMarkSweep.cpp 1.92 07/06/08 23:11:01 JVM"
3 #endif
4 /*
5 * Copyright 2001-2008 Sun Microsystems, Inc. All Rights Reserved.
6 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
7 *
8 * This code is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License version 2 only, as
10 * published by the Free Software Foundation.
11 *
12 * This code is distributed in the hope that it will be useful, but WITHOUT
13 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
15 * version 2 for more details (a copy is included in the LICENSE file that
16 * accompanied this code).
17 *
18 * You should have received a copy of the GNU General Public License version
19 * 2 along with this work; if not, write to the Free Software Foundation,
20 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
21 *
22 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
23 * CA 95054 USA or visit www.sun.com if you need additional information or
24 * have any questions.
25 *
26 */
27
28 #include "incls/_precompiled.incl"
29 #include "incls/_psMarkSweep.cpp.incl"
30
31 elapsedTimer PSMarkSweep::_accumulated_time;
32 unsigned int PSMarkSweep::_total_invocations = 0;
33 jlong PSMarkSweep::_time_of_last_gc = 0;
34 CollectorCounters* PSMarkSweep::_counters = NULL;
35
36 void PSMarkSweep::initialize() {
37 MemRegion mr = Universe::heap()->reserved_region();
38 _ref_processor = new ReferenceProcessor(mr,
39 true, // atomic_discovery
40 false); // mt_discovery
41 _counters = new CollectorCounters("PSMarkSweep", 1);
42 }
43
44 // This method contains all heap specific policy for invoking mark sweep.
45 // PSMarkSweep::invoke_no_policy() will only attempt to mark-sweep-compact
46 // the heap. It will do nothing further. If we need to bail out for policy
47 // reasons, scavenge before full gc, or any other specialized behavior, it
48 // needs to be added here.
49 //
50 // Note that this method should only be called from the vm_thread while
51 // at a safepoint!
52 void PSMarkSweep::invoke(bool maximum_heap_compaction) {
53 assert(SafepointSynchronize::is_at_safepoint(), "should be at safepoint");
54 assert(Thread::current() == (Thread*)VMThread::vm_thread(), "should be in vm thread");
55 assert(!Universe::heap()->is_gc_active(), "not reentrant");
56
57 ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
58 GCCause::Cause gc_cause = heap->gc_cause();
59 PSAdaptiveSizePolicy* policy = heap->size_policy();
60
61 // Before each allocation/collection attempt, find out from the
82 void PSMarkSweep::invoke_no_policy(bool clear_all_softrefs) {
83 assert(SafepointSynchronize::is_at_safepoint(), "must be at a safepoint");
84 assert(ref_processor() != NULL, "Sanity");
85
86 if (GC_locker::check_active_before_gc()) {
87 return;
88 }
89
90 ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
91 GCCause::Cause gc_cause = heap->gc_cause();
92 assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity");
93 PSAdaptiveSizePolicy* size_policy = heap->size_policy();
94
95 PSYoungGen* young_gen = heap->young_gen();
96 PSOldGen* old_gen = heap->old_gen();
97 PSPermGen* perm_gen = heap->perm_gen();
98
99 // Increment the invocation count
100 heap->increment_total_collections(true /* full */);
101
102 // Save information needed to minimize mangling
103 heap->record_gen_tops_before_GC();
104
105 // We need to track unique mark sweep invocations as well.
106 _total_invocations++;
107
108 AdaptiveSizePolicyOutput(size_policy, heap->total_collections());
109
110 if (PrintHeapAtGC) {
111 Universe::print_heap_before_gc();
112 }
113
114 // Fill in TLABs
115 heap->accumulate_statistics_all_tlabs();
116 heap->ensure_parsability(true); // retire TLABs
117
118 if (VerifyBeforeGC && heap->total_collections() >= VerifyGCStartAt) {
119 HandleMark hm; // Discard invalid handles created during verification
120 gclog_or_tty->print(" VerifyBeforeGC:");
121 Universe::verify(true);
122 }
123
124 // Verify object start arrays
158 CodeCache::gc_prologue();
159 Threads::gc_prologue();
160 BiasedLocking::preserve_marks();
161
162 // Capture heap size before collection for printing.
163 size_t prev_used = heap->used();
164
165 // Capture perm gen size before collection for sizing.
166 size_t perm_gen_prev_used = perm_gen->used_in_bytes();
167
168 // For PrintGCDetails
169 size_t old_gen_prev_used = old_gen->used_in_bytes();
170 size_t young_gen_prev_used = young_gen->used_in_bytes();
171
172 allocate_stacks();
173
174 NOT_PRODUCT(ref_processor()->verify_no_references_recorded());
175 COMPILER2_PRESENT(DerivedPointerTable::clear());
176
177 ref_processor()->enable_discovery();
178 ref_processor()->setup_policy(clear_all_softrefs);
179
180 mark_sweep_phase1(clear_all_softrefs);
181
182 mark_sweep_phase2();
183
184 // Don't add any more derived pointers during phase3
185 COMPILER2_PRESENT(assert(DerivedPointerTable::is_active(), "Sanity"));
186 COMPILER2_PRESENT(DerivedPointerTable::set_active(false));
187
188 mark_sweep_phase3();
189
190 mark_sweep_phase4();
191
192 restore_marks();
193
194 deallocate_stacks();
195
196 if (ZapUnusedHeapArea) {
197 // Do a complete mangle (top to end) because the usage for
198 // scratch does not maintain a top pointer.
199 young_gen->to_space()->mangle_unused_area_complete();
200 }
201
202 eden_empty = young_gen->eden_space()->is_empty();
203 if (!eden_empty) {
204 eden_empty = absorb_live_data_from_eden(size_policy, young_gen, old_gen);
205 }
206
207 // Update heap occupancy information which is used as
208 // input to soft ref clearing policy at the next gc.
209 Universe::update_heap_info_at_gc();
210
211 survivors_empty = young_gen->from_space()->is_empty() &&
212 young_gen->to_space()->is_empty();
213 young_gen_empty = eden_empty && survivors_empty;
214
215 BarrierSet* bs = heap->barrier_set();
216 if (bs->is_a(BarrierSet::ModRef)) {
217 ModRefBarrierSet* modBS = (ModRefBarrierSet*)bs;
218 MemRegion old_mr = heap->old_gen()->reserved();
219 MemRegion perm_mr = heap->perm_gen()->reserved();
220 assert(perm_mr.end() <= old_mr.start(), "Generations out of order");
221
338 "of %d%%", GCTimeLimit);
339 }
340 }
341 size_policy->set_print_gc_time_limit_would_be_exceeded(false);
342 }
343 }
344
345 if (VerifyAfterGC && heap->total_collections() >= VerifyGCStartAt) {
346 HandleMark hm; // Discard invalid handles created during verification
347 gclog_or_tty->print(" VerifyAfterGC:");
348 Universe::verify(false);
349 }
350
351 // Re-verify object start arrays
352 if (VerifyObjectStartArray &&
353 VerifyAfterGC) {
354 old_gen->verify_object_start_array();
355 perm_gen->verify_object_start_array();
356 }
357
358 if (ZapUnusedHeapArea) {
359 old_gen->object_space()->check_mangled_unused_area_complete();
360 perm_gen->object_space()->check_mangled_unused_area_complete();
361 }
362
363 NOT_PRODUCT(ref_processor()->verify_no_references_recorded());
364
365 if (PrintHeapAtGC) {
366 Universe::print_heap_after_gc();
367 }
368 }
369
370 bool PSMarkSweep::absorb_live_data_from_eden(PSAdaptiveSizePolicy* size_policy,
371 PSYoungGen* young_gen,
372 PSOldGen* old_gen) {
373 MutableSpace* const eden_space = young_gen->eden_space();
374 assert(!eden_space->is_empty(), "eden must be non-empty");
375 assert(young_gen->virtual_space()->alignment() ==
376 old_gen->virtual_space()->alignment(), "alignments do not match");
377
378 if (!(UseAdaptiveSizePolicy && UseAdaptiveGCBoundary)) {
379 return false;
380 }
381
382 // Both generations must be completely committed.
383 if (young_gen->virtual_space()->uncommitted_size() != 0) {
384 return false;
385 }
386 if (old_gen->virtual_space()->uncommitted_size() != 0) {
387 return false;
388 }
389
390 // Figure out how much to take from eden. Include the average amount promoted
391 // in the total; otherwise the next young gen GC will simply bail out to a
392 // full GC.
393 const size_t alignment = old_gen->virtual_space()->alignment();
394 const size_t eden_used = eden_space->used_in_bytes();
395 const size_t promoted = (size_t)size_policy->avg_promoted()->padded_average();
396 const size_t absorb_size = align_size_up(eden_used + promoted, alignment);
397 const size_t eden_capacity = eden_space->capacity_in_bytes();
398
399 if (absorb_size >= eden_capacity) {
400 return false; // Must leave some space in eden.
401 }
402
403 const size_t new_young_size = young_gen->capacity_in_bytes() - absorb_size;
404 if (new_young_size < young_gen->min_gen_size()) {
405 return false; // Respect young gen minimum size.
406 }
407
408 if (TraceAdaptiveGCBoundary && Verbose) {
409 gclog_or_tty->print(" absorbing " SIZE_FORMAT "K: "
410 "eden " SIZE_FORMAT "K->" SIZE_FORMAT "K "
411 "from " SIZE_FORMAT "K, to " SIZE_FORMAT "K "
412 "young_gen " SIZE_FORMAT "K->" SIZE_FORMAT "K ",
413 absorb_size / K,
414 eden_capacity / K, (eden_capacity - absorb_size) / K,
415 young_gen->from_space()->used_in_bytes() / K,
416 young_gen->to_space()->used_in_bytes() / K,
417 young_gen->capacity_in_bytes() / K, new_young_size / K);
418 }
419
420 // Fill the unused part of the old gen.
421 MutableSpace* const old_space = old_gen->object_space();
422 HeapWord* const unused_start = old_space->top();
423 size_t const unused_words = pointer_delta(old_space->end(), unused_start);
424
425 if (unused_words > 0) {
426 if (unused_words < CollectedHeap::min_fill_size()) {
427 return false; // If the old gen cannot be filled, must give up.
428 }
429 CollectedHeap::fill_with_objects(unused_start, unused_words);
430 }
431
432 // Take the live data from eden and set both top and end in the old gen to
433 // eden top. (Need to set end because reset_after_change() mangles the region
434 // from end to virtual_space->high() in debug builds).
435 HeapWord* const new_top = eden_space->top();
436 old_gen->virtual_space()->expand_into(young_gen->virtual_space(),
437 absorb_size);
438 young_gen->reset_after_change();
439 old_space->set_top(new_top);
440 old_space->set_end(new_top);
441 old_gen->reset_after_change();
442
443 // Update the object start array for the filler object and the data from eden.
444 ObjectStartArray* const start_array = old_gen->start_array();
445 for (HeapWord* p = unused_start; p < new_top; p += oop(p)->size()) {
446 start_array->allocate_block(p);
447 }
448
449 // Could update the promoted average here, but it is not typically updated at
450 // full GCs and the value to use is unclear. Something like
451 //
452 // cur_promoted_avg + absorb_size / number_of_scavenges_since_last_full_gc.
453
454 size_policy->set_bytes_absorbed_from_eden(absorb_size);
455 return true;
456 }
457
458 void PSMarkSweep::allocate_stacks() {
459 ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
460 assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity");
461
462 PSYoungGen* young_gen = heap->young_gen();
463
464 MutableSpace* to_space = young_gen->to_space();
465 _preserved_marks = (PreservedMark*)to_space->top();
466 _preserved_count = 0;
500
501 ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
502 assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity");
503
504 // General strong roots.
505 Universe::oops_do(mark_and_push_closure());
506 ReferenceProcessor::oops_do(mark_and_push_closure());
507 JNIHandles::oops_do(mark_and_push_closure()); // Global (strong) JNI handles
508 Threads::oops_do(mark_and_push_closure());
509 ObjectSynchronizer::oops_do(mark_and_push_closure());
510 FlatProfiler::oops_do(mark_and_push_closure());
511 Management::oops_do(mark_and_push_closure());
512 JvmtiExport::oops_do(mark_and_push_closure());
513 SystemDictionary::always_strong_oops_do(mark_and_push_closure());
514 vmSymbols::oops_do(mark_and_push_closure());
515
516 // Flush marking stack.
517 follow_stack();
518
519 // Process reference objects found during marking
520 {
521 ref_processor()->setup_policy(clear_all_softrefs);
522 ref_processor()->process_discovered_references(
523 is_alive_closure(), mark_and_push_closure(), follow_stack_closure(), NULL);
524 }
525
526 // Follow system dictionary roots and unload classes
527 bool purged_class = SystemDictionary::do_unloading(is_alive_closure());
528
529 // Follow code cache roots
530 CodeCache::do_unloading(is_alive_closure(), mark_and_push_closure(),
531 purged_class);
532 follow_stack(); // Flush marking stack
533
534 // Update subklass/sibling/implementor links of live klasses
535 follow_weak_klass_links();
536 assert(_marking_stack->is_empty(), "just drained");
537
538 // Visit symbol and interned string tables and delete unmarked oops
539 SymbolTable::unlink(is_alive_closure());
540 StringTable::unlink(is_alive_closure());
541
542 assert(_marking_stack->is_empty(), "stack should be empty by now");
543 }
|