161 cld->oops_do(_scavenge_closure, false, /*clear_modified_oops*/true);
162
163 _scavenge_closure->set_scanned_cld(NULL);
164 }
165 }
166
167 ScanWeakRefClosure::ScanWeakRefClosure(DefNewGeneration* g) :
168 _g(g)
169 {
170 _boundary = _g->reserved().end();
171 }
172
173 void ScanWeakRefClosure::do_oop(oop* p) { ScanWeakRefClosure::do_oop_work(p); }
174 void ScanWeakRefClosure::do_oop(narrowOop* p) { ScanWeakRefClosure::do_oop_work(p); }
175
176 void FilteringClosure::do_oop(oop* p) { FilteringClosure::do_oop_work(p); }
177 void FilteringClosure::do_oop(narrowOop* p) { FilteringClosure::do_oop_work(p); }
178
179 DefNewGeneration::DefNewGeneration(ReservedSpace rs,
180 size_t initial_size,
181 const char* policy)
182 : Generation(rs, initial_size),
183 _preserved_marks_set(false /* in_c_heap */),
184 _promo_failure_drain_in_progress(false),
185 _should_allocate_from_space(false)
186 {
187 MemRegion cmr((HeapWord*)_virtual_space.low(),
188 (HeapWord*)_virtual_space.high());
189 GenCollectedHeap* gch = GenCollectedHeap::heap();
190
191 gch->barrier_set()->resize_covered_region(cmr);
192
193 _eden_space = new ContiguousSpace();
194 _from_space = new ContiguousSpace();
195 _to_space = new ContiguousSpace();
196
197 if (_eden_space == NULL || _from_space == NULL || _to_space == NULL) {
198 vm_exit_during_initialization("Could not allocate a new gen space");
199 }
200
201 // Compute the maximum eden and survivor space sizes. These sizes
202 // are computed assuming the entire reserved space is committed.
203 // These values are exported as performance counters.
204 uintx alignment = gch->collector_policy()->space_alignment();
205 uintx size = _virtual_space.reserved_size();
206 _max_survivor_size = compute_survivor_size(size, alignment);
207 _max_eden_size = size - (2*_max_survivor_size);
208
209 // allocate the performance counters
210 GenCollectorPolicy* gcp = gch->gen_policy();
211
212 // Generation counters -- generation 0, 3 subspaces
213 _gen_counters = new GenerationCounters("new", 0, 3,
214 gcp->min_young_size(), gcp->max_young_size(), &_virtual_space);
215 _gc_counters = new CollectorCounters(policy, 0);
216
217 _eden_counters = new CSpaceCounters("eden", 0, _max_eden_size, _eden_space,
218 _gen_counters);
219 _from_counters = new CSpaceCounters("s0", 1, _max_survivor_size, _from_space,
220 _gen_counters);
221 _to_counters = new CSpaceCounters("s1", 2, _max_survivor_size, _to_space,
222 _gen_counters);
223
224 compute_space_boundaries(0, SpaceDecorator::Clear, SpaceDecorator::Mangle);
225 update_counters();
226 _old_gen = NULL;
227 _tenuring_threshold = MaxTenuringThreshold;
228 _pretenure_size_threshold_words = PretenureSizeThreshold >> LogHeapWordSize;
229
230 _gc_timer = new (ResourceObj::C_HEAP, mtGC) STWGCTimer();
231 }
232
233 void DefNewGeneration::compute_space_boundaries(uintx minimum_eden_size,
234 bool clear_space,
235 bool mangle_space) {
236 uintx alignment =
237 GenCollectedHeap::heap()->collector_policy()->space_alignment();
238
239 // If the spaces are being cleared (only done at heap initialization
240 // currently), the survivor spaces need not be empty.
241 // Otherwise, no care is taken for used areas in the survivor spaces
242 // so check.
243 assert(clear_space || (to()->is_empty() && from()->is_empty()),
244 "Initialization of the survivor spaces assumes these are empty");
245
246 // Compute sizes
247 uintx size = _virtual_space.committed_size();
248 uintx survivor_size = compute_survivor_size(size, alignment);
249 uintx eden_size = size - (2*survivor_size);
250 assert(eden_size > 0 && survivor_size <= eden_size, "just checking");
251
252 if (eden_size < minimum_eden_size) {
253 // May happen due to 64Kb rounding, if so adjust eden size back up
254 minimum_eden_size = align_up(minimum_eden_size, alignment);
255 uintx maximum_survivor_size = (size - minimum_eden_size) / 2;
256 uintx unaligned_survivor_size =
257 align_down(maximum_survivor_size, alignment);
471
472
473 size_t DefNewGeneration::capacity() const {
474 return eden()->capacity()
475 + from()->capacity(); // to() is only used during scavenge
476 }
477
478
479 size_t DefNewGeneration::used() const {
480 return eden()->used()
481 + from()->used(); // to() is only used during scavenge
482 }
483
484
485 size_t DefNewGeneration::free() const {
486 return eden()->free()
487 + from()->free(); // to() is only used during scavenge
488 }
489
490 size_t DefNewGeneration::max_capacity() const {
491 const size_t alignment = GenCollectedHeap::heap()->collector_policy()->space_alignment();
492 const size_t reserved_bytes = reserved().byte_size();
493 return reserved_bytes - compute_survivor_size(reserved_bytes, alignment);
494 }
495
496 size_t DefNewGeneration::unsafe_max_alloc_nogc() const {
497 return eden()->free();
498 }
499
500 size_t DefNewGeneration::capacity_before_gc() const {
501 return eden()->capacity();
502 }
503
504 size_t DefNewGeneration::contiguous_available() const {
505 return eden()->free();
506 }
507
508
509 HeapWord* volatile* DefNewGeneration::top_addr() const { return eden()->top_addr(); }
510 HeapWord** DefNewGeneration::end_addr() const { return eden()->end_addr(); }
511
547 result == NULL ? "NULL" : "object");
548
549 return result;
550 }
551
552 HeapWord* DefNewGeneration::expand_and_allocate(size_t size,
553 bool is_tlab,
554 bool parallel) {
555 // We don't attempt to expand the young generation (but perhaps we should.)
556 return allocate(size, is_tlab);
557 }
558
559 void DefNewGeneration::adjust_desired_tenuring_threshold() {
560 // Set the desired survivor size to half the real survivor space
561 size_t const survivor_capacity = to()->capacity() / HeapWordSize;
562 size_t const desired_survivor_size = (size_t)((((double)survivor_capacity) * TargetSurvivorRatio) / 100);
563
564 _tenuring_threshold = age_table()->compute_tenuring_threshold(desired_survivor_size);
565
566 if (UsePerfData) {
567 GCPolicyCounters* gc_counters = GenCollectedHeap::heap()->gen_policy()->counters();
568 gc_counters->tenuring_threshold()->set_value(_tenuring_threshold);
569 gc_counters->desired_survivor_size()->set_value(desired_survivor_size * oopSize);
570 }
571
572 age_table()->print_age_table(_tenuring_threshold);
573 }
574
575 void DefNewGeneration::collect(bool full,
576 bool clear_all_soft_refs,
577 size_t size,
578 bool is_tlab) {
579 assert(full || size > 0, "otherwise we don't want to collect");
580
581 GenCollectedHeap* gch = GenCollectedHeap::heap();
582
583 _gc_timer->register_gc_start();
584 DefNewTracer gc_tracer;
585 gc_tracer.report_gc_start(gch->gc_cause(), _gc_timer->gc_start());
586
587 _old_gen = gch->old_gen();
599 init_assuming_no_promotion_failure();
600
601 GCTraceTime(Trace, gc, phases) tm("DefNew", NULL, gch->gc_cause());
602
603 gch->trace_heap_before_gc(&gc_tracer);
604
605 // These can be shared for all code paths
606 IsAliveClosure is_alive(this);
607 ScanWeakRefClosure scan_weak_ref(this);
608
609 age_table()->clear();
610 to()->clear(SpaceDecorator::Mangle);
611 // The preserved marks should be empty at the start of the GC.
612 _preserved_marks_set.init(1);
613
614 gch->rem_set()->prepare_for_younger_refs_iterate(false);
615
616 assert(gch->no_allocs_since_save_marks(),
617 "save marks have not been newly set.");
618
619 // Not very pretty.
620 CollectorPolicy* cp = gch->collector_policy();
621
622 FastScanClosure fsc_with_no_gc_barrier(this, false);
623 FastScanClosure fsc_with_gc_barrier(this, true);
624
625 CLDScanClosure cld_scan_closure(&fsc_with_no_gc_barrier,
626 gch->rem_set()->cld_rem_set()->accumulate_modified_oops());
627
628 set_promo_failure_scan_stack_closure(&fsc_with_no_gc_barrier);
629 FastEvacuateFollowersClosure evacuate_followers(gch,
630 &fsc_with_no_gc_barrier,
631 &fsc_with_gc_barrier);
632
633 assert(gch->no_allocs_since_save_marks(),
634 "save marks have not been newly set.");
635
636 {
637 // DefNew needs to run with n_threads == 0, to make sure the serial
638 // version of the card table scanning code is used.
639 // See: CardTableModRefBSForCTRS::non_clean_card_iterate_possibly_parallel.
640 StrongRootsScope srs(0);
641
671 eden()->clear(SpaceDecorator::Mangle);
672 from()->clear(SpaceDecorator::Mangle);
673 if (ZapUnusedHeapArea) {
674 // This is now done here because of the piece-meal mangling which
675 // can check for valid mangling at intermediate points in the
676 // collection(s). When a young collection fails to collect
677 // sufficient space resizing of the young generation can occur
678 // an redistribute the spaces in the young generation. Mangle
679 // here so that unzapped regions don't get distributed to
680 // other spaces.
681 to()->mangle_unused_area();
682 }
683 swap_spaces();
684
685 assert(to()->is_empty(), "to space should be empty now");
686
687 adjust_desired_tenuring_threshold();
688
689 // A successful scavenge should restart the GC time limit count which is
690 // for full GC's.
691 AdaptiveSizePolicy* size_policy = gch->gen_policy()->size_policy();
692 size_policy->reset_gc_overhead_limit_count();
693 assert(!gch->incremental_collection_failed(), "Should be clear");
694 } else {
695 assert(_promo_failure_scan_stack.is_empty(), "post condition");
696 _promo_failure_scan_stack.clear(true); // Clear cached segments.
697
698 remove_forwarding_pointers();
699 log_info(gc, promotion)("Promotion failed");
700 // Add to-space to the list of space to compact
701 // when a promotion failure has occurred. In that
702 // case there can be live objects in to-space
703 // as a result of a partial evacuation of eden
704 // and from-space.
705 swap_spaces(); // For uniformity wrt ParNewGeneration.
706 from()->set_next_compaction_space(to());
707 gch->set_incremental_collection_failed();
708
709 // Inform the next generation that a promotion failure occurred.
710 _old_gen->promotion_failure_occurred();
711 gc_tracer.report_promotion_failed(_promotion_failed_info);
936 (GCCause::is_user_requested_gc(gch->gc_cause()) && UseConcMarkSweepGC && ExplicitGCInvokesConcurrent) ||
937 !gch->incremental_collection_failed(),
938 "Twice in a row");
939 seen_incremental_collection_failed = false;
940 }
941 #endif // ASSERT
942 }
943
944 if (ZapUnusedHeapArea) {
945 eden()->check_mangled_unused_area_complete();
946 from()->check_mangled_unused_area_complete();
947 to()->check_mangled_unused_area_complete();
948 }
949
950 if (!CleanChunkPoolAsync) {
951 Chunk::clean_chunk_pool();
952 }
953
954 // update the generation and space performance counters
955 update_counters();
956 gch->gen_policy()->counters()->update_counters();
957 }
958
959 void DefNewGeneration::record_spaces_top() {
960 assert(ZapUnusedHeapArea, "Not mangling unused space");
961 eden()->set_top_for_allocations();
962 to()->set_top_for_allocations();
963 from()->set_top_for_allocations();
964 }
965
966 void DefNewGeneration::ref_processor_init() {
967 Generation::ref_processor_init();
968 }
969
970
971 void DefNewGeneration::update_counters() {
972 if (UsePerfData) {
973 _eden_counters->update_all();
974 _from_counters->update_all();
975 _to_counters->update_all();
976 _gen_counters->update_all();
|
161 cld->oops_do(_scavenge_closure, false, /*clear_modified_oops*/true);
162
163 _scavenge_closure->set_scanned_cld(NULL);
164 }
165 }
166
167 ScanWeakRefClosure::ScanWeakRefClosure(DefNewGeneration* g) :
168 _g(g)
169 {
170 _boundary = _g->reserved().end();
171 }
172
173 void ScanWeakRefClosure::do_oop(oop* p) { ScanWeakRefClosure::do_oop_work(p); }
174 void ScanWeakRefClosure::do_oop(narrowOop* p) { ScanWeakRefClosure::do_oop_work(p); }
175
176 void FilteringClosure::do_oop(oop* p) { FilteringClosure::do_oop_work(p); }
177 void FilteringClosure::do_oop(narrowOop* p) { FilteringClosure::do_oop_work(p); }
178
179 DefNewGeneration::DefNewGeneration(ReservedSpace rs,
180 size_t initial_size,
181 size_t min_size,
182 size_t max_size,
183 const char* policy)
184 : Generation(rs, initial_size),
185 _preserved_marks_set(false /* in_c_heap */),
186 _promo_failure_drain_in_progress(false),
187 _should_allocate_from_space(false)
188 {
189 MemRegion cmr((HeapWord*)_virtual_space.low(),
190 (HeapWord*)_virtual_space.high());
191 GenCollectedHeap* gch = GenCollectedHeap::heap();
192
193 gch->barrier_set()->resize_covered_region(cmr);
194
195 _eden_space = new ContiguousSpace();
196 _from_space = new ContiguousSpace();
197 _to_space = new ContiguousSpace();
198
199 if (_eden_space == NULL || _from_space == NULL || _to_space == NULL) {
200 vm_exit_during_initialization("Could not allocate a new gen space");
201 }
202
203 // Compute the maximum eden and survivor space sizes. These sizes
204 // are computed assuming the entire reserved space is committed.
205 // These values are exported as performance counters.
206 uintx alignment = gch->space_alignment();
207 uintx size = _virtual_space.reserved_size();
208 _max_survivor_size = compute_survivor_size(size, alignment);
209 _max_eden_size = size - (2*_max_survivor_size);
210
211 // allocate the performance counters
212
213 // Generation counters -- generation 0, 3 subspaces
214 _gen_counters = new GenerationCounters("new", 0, 3,
215 min_size, max_size, &_virtual_space);
216 _gc_counters = new CollectorCounters(policy, 0);
217
218 _eden_counters = new CSpaceCounters("eden", 0, _max_eden_size, _eden_space,
219 _gen_counters);
220 _from_counters = new CSpaceCounters("s0", 1, _max_survivor_size, _from_space,
221 _gen_counters);
222 _to_counters = new CSpaceCounters("s1", 2, _max_survivor_size, _to_space,
223 _gen_counters);
224
225 compute_space_boundaries(0, SpaceDecorator::Clear, SpaceDecorator::Mangle);
226 update_counters();
227 _old_gen = NULL;
228 _tenuring_threshold = MaxTenuringThreshold;
229 _pretenure_size_threshold_words = PretenureSizeThreshold >> LogHeapWordSize;
230
231 _gc_timer = new (ResourceObj::C_HEAP, mtGC) STWGCTimer();
232 }
233
234 void DefNewGeneration::compute_space_boundaries(uintx minimum_eden_size,
235 bool clear_space,
236 bool mangle_space) {
237 uintx alignment =
238 GenCollectedHeap::heap()->space_alignment();
239
240 // If the spaces are being cleared (only done at heap initialization
241 // currently), the survivor spaces need not be empty.
242 // Otherwise, no care is taken for used areas in the survivor spaces
243 // so check.
244 assert(clear_space || (to()->is_empty() && from()->is_empty()),
245 "Initialization of the survivor spaces assumes these are empty");
246
247 // Compute sizes
248 uintx size = _virtual_space.committed_size();
249 uintx survivor_size = compute_survivor_size(size, alignment);
250 uintx eden_size = size - (2*survivor_size);
251 assert(eden_size > 0 && survivor_size <= eden_size, "just checking");
252
253 if (eden_size < minimum_eden_size) {
254 // May happen due to 64Kb rounding, if so adjust eden size back up
255 minimum_eden_size = align_up(minimum_eden_size, alignment);
256 uintx maximum_survivor_size = (size - minimum_eden_size) / 2;
257 uintx unaligned_survivor_size =
258 align_down(maximum_survivor_size, alignment);
472
473
474 size_t DefNewGeneration::capacity() const {
475 return eden()->capacity()
476 + from()->capacity(); // to() is only used during scavenge
477 }
478
479
480 size_t DefNewGeneration::used() const {
481 return eden()->used()
482 + from()->used(); // to() is only used during scavenge
483 }
484
485
486 size_t DefNewGeneration::free() const {
487 return eden()->free()
488 + from()->free(); // to() is only used during scavenge
489 }
490
491 size_t DefNewGeneration::max_capacity() const {
492 const size_t alignment = GenCollectedHeap::heap()->space_alignment();
493 const size_t reserved_bytes = reserved().byte_size();
494 return reserved_bytes - compute_survivor_size(reserved_bytes, alignment);
495 }
496
497 size_t DefNewGeneration::unsafe_max_alloc_nogc() const {
498 return eden()->free();
499 }
500
501 size_t DefNewGeneration::capacity_before_gc() const {
502 return eden()->capacity();
503 }
504
505 size_t DefNewGeneration::contiguous_available() const {
506 return eden()->free();
507 }
508
509
510 HeapWord* volatile* DefNewGeneration::top_addr() const { return eden()->top_addr(); }
511 HeapWord** DefNewGeneration::end_addr() const { return eden()->end_addr(); }
512
548 result == NULL ? "NULL" : "object");
549
550 return result;
551 }
552
553 HeapWord* DefNewGeneration::expand_and_allocate(size_t size,
554 bool is_tlab,
555 bool parallel) {
556 // We don't attempt to expand the young generation (but perhaps we should.)
557 return allocate(size, is_tlab);
558 }
559
560 void DefNewGeneration::adjust_desired_tenuring_threshold() {
561 // Set the desired survivor size to half the real survivor space
562 size_t const survivor_capacity = to()->capacity() / HeapWordSize;
563 size_t const desired_survivor_size = (size_t)((((double)survivor_capacity) * TargetSurvivorRatio) / 100);
564
565 _tenuring_threshold = age_table()->compute_tenuring_threshold(desired_survivor_size);
566
567 if (UsePerfData) {
568 GCPolicyCounters* gc_counters = GenCollectedHeap::heap()->counters();
569 gc_counters->tenuring_threshold()->set_value(_tenuring_threshold);
570 gc_counters->desired_survivor_size()->set_value(desired_survivor_size * oopSize);
571 }
572
573 age_table()->print_age_table(_tenuring_threshold);
574 }
575
576 void DefNewGeneration::collect(bool full,
577 bool clear_all_soft_refs,
578 size_t size,
579 bool is_tlab) {
580 assert(full || size > 0, "otherwise we don't want to collect");
581
582 GenCollectedHeap* gch = GenCollectedHeap::heap();
583
584 _gc_timer->register_gc_start();
585 DefNewTracer gc_tracer;
586 gc_tracer.report_gc_start(gch->gc_cause(), _gc_timer->gc_start());
587
588 _old_gen = gch->old_gen();
600 init_assuming_no_promotion_failure();
601
602 GCTraceTime(Trace, gc, phases) tm("DefNew", NULL, gch->gc_cause());
603
604 gch->trace_heap_before_gc(&gc_tracer);
605
606 // These can be shared for all code paths
607 IsAliveClosure is_alive(this);
608 ScanWeakRefClosure scan_weak_ref(this);
609
610 age_table()->clear();
611 to()->clear(SpaceDecorator::Mangle);
612 // The preserved marks should be empty at the start of the GC.
613 _preserved_marks_set.init(1);
614
615 gch->rem_set()->prepare_for_younger_refs_iterate(false);
616
617 assert(gch->no_allocs_since_save_marks(),
618 "save marks have not been newly set.");
619
620 FastScanClosure fsc_with_no_gc_barrier(this, false);
621 FastScanClosure fsc_with_gc_barrier(this, true);
622
623 CLDScanClosure cld_scan_closure(&fsc_with_no_gc_barrier,
624 gch->rem_set()->cld_rem_set()->accumulate_modified_oops());
625
626 set_promo_failure_scan_stack_closure(&fsc_with_no_gc_barrier);
627 FastEvacuateFollowersClosure evacuate_followers(gch,
628 &fsc_with_no_gc_barrier,
629 &fsc_with_gc_barrier);
630
631 assert(gch->no_allocs_since_save_marks(),
632 "save marks have not been newly set.");
633
634 {
635 // DefNew needs to run with n_threads == 0, to make sure the serial
636 // version of the card table scanning code is used.
637 // See: CardTableModRefBSForCTRS::non_clean_card_iterate_possibly_parallel.
638 StrongRootsScope srs(0);
639
669 eden()->clear(SpaceDecorator::Mangle);
670 from()->clear(SpaceDecorator::Mangle);
671 if (ZapUnusedHeapArea) {
672 // This is now done here because of the piece-meal mangling which
673 // can check for valid mangling at intermediate points in the
674 // collection(s). When a young collection fails to collect
675 // sufficient space resizing of the young generation can occur
676 // an redistribute the spaces in the young generation. Mangle
677 // here so that unzapped regions don't get distributed to
678 // other spaces.
679 to()->mangle_unused_area();
680 }
681 swap_spaces();
682
683 assert(to()->is_empty(), "to space should be empty now");
684
685 adjust_desired_tenuring_threshold();
686
687 // A successful scavenge should restart the GC time limit count which is
688 // for full GC's.
689 AdaptiveSizePolicy* size_policy = gch->size_policy();
690 size_policy->reset_gc_overhead_limit_count();
691 assert(!gch->incremental_collection_failed(), "Should be clear");
692 } else {
693 assert(_promo_failure_scan_stack.is_empty(), "post condition");
694 _promo_failure_scan_stack.clear(true); // Clear cached segments.
695
696 remove_forwarding_pointers();
697 log_info(gc, promotion)("Promotion failed");
698 // Add to-space to the list of space to compact
699 // when a promotion failure has occurred. In that
700 // case there can be live objects in to-space
701 // as a result of a partial evacuation of eden
702 // and from-space.
703 swap_spaces(); // For uniformity wrt ParNewGeneration.
704 from()->set_next_compaction_space(to());
705 gch->set_incremental_collection_failed();
706
707 // Inform the next generation that a promotion failure occurred.
708 _old_gen->promotion_failure_occurred();
709 gc_tracer.report_promotion_failed(_promotion_failed_info);
934 (GCCause::is_user_requested_gc(gch->gc_cause()) && UseConcMarkSweepGC && ExplicitGCInvokesConcurrent) ||
935 !gch->incremental_collection_failed(),
936 "Twice in a row");
937 seen_incremental_collection_failed = false;
938 }
939 #endif // ASSERT
940 }
941
942 if (ZapUnusedHeapArea) {
943 eden()->check_mangled_unused_area_complete();
944 from()->check_mangled_unused_area_complete();
945 to()->check_mangled_unused_area_complete();
946 }
947
948 if (!CleanChunkPoolAsync) {
949 Chunk::clean_chunk_pool();
950 }
951
952 // update the generation and space performance counters
953 update_counters();
954 gch->counters()->update_counters();
955 }
956
957 void DefNewGeneration::record_spaces_top() {
958 assert(ZapUnusedHeapArea, "Not mangling unused space");
959 eden()->set_top_for_allocations();
960 to()->set_top_for_allocations();
961 from()->set_top_for_allocations();
962 }
963
964 void DefNewGeneration::ref_processor_init() {
965 Generation::ref_processor_init();
966 }
967
968
969 void DefNewGeneration::update_counters() {
970 if (UsePerfData) {
971 _eden_counters->update_all();
972 _from_counters->update_all();
973 _to_counters->update_all();
974 _gen_counters->update_all();
|