177 void FilteringClosure::do_oop(narrowOop* p) { FilteringClosure::do_oop_work(p); }
178
179 KlassScanClosure::KlassScanClosure(OopsInKlassOrGenClosure* scavenge_closure,
180 KlassRemSet* klass_rem_set)
181 : _scavenge_closure(scavenge_closure),
182 _accumulate_modified_oops(klass_rem_set->accumulate_modified_oops()) {}
183
184
185 DefNewGeneration::DefNewGeneration(ReservedSpace rs,
186 size_t initial_size,
187 const char* policy)
188 : Generation(rs, initial_size),
189 _preserved_marks_set(false /* in_c_heap */),
190 _promo_failure_drain_in_progress(false),
191 _should_allocate_from_space(false)
192 {
193 MemRegion cmr((HeapWord*)_virtual_space.low(),
194 (HeapWord*)_virtual_space.high());
195 GenCollectedHeap* gch = GenCollectedHeap::heap();
196
197 gch->barrier_set()->resize_covered_region(cmr);
198
199 _eden_space = new ContiguousSpace();
200 _from_space = new ContiguousSpace();
201 _to_space = new ContiguousSpace();
202
203 if (_eden_space == NULL || _from_space == NULL || _to_space == NULL) {
204 vm_exit_during_initialization("Could not allocate a new gen space");
205 }
206
207 // Compute the maximum eden and survivor space sizes. These sizes
208 // are computed assuming the entire reserved space is committed.
209 // These values are exported as performance counters.
210 uintx alignment = gch->collector_policy()->space_alignment();
211 uintx size = _virtual_space.reserved_size();
212 _max_survivor_size = compute_survivor_size(size, alignment);
213 _max_eden_size = size - (2*_max_survivor_size);
214
215 // allocate the performance counters
216 GenCollectorPolicy* gcp = gch->gen_policy();
217
442 // "changed" will be false. If the expansion failed
443 // (and at this point it was expected to succeed),
444 // ignore the failure (leaving "changed" as false).
445 }
446 if (desired_new_size < new_size_before && eden()->is_empty()) {
447 // bail out of shrinking if objects in eden
448 size_t change = new_size_before - desired_new_size;
449 assert(change % alignment == 0, "just checking");
450 _virtual_space.shrink_by(change);
451 changed = true;
452 }
453 if (changed) {
454 // The spaces have already been mangled at this point but
455 // may not have been cleared (set top = bottom) and should be.
456 // Mangling was done when the heap was being expanded.
457 compute_space_boundaries(eden()->used(),
458 SpaceDecorator::Clear,
459 SpaceDecorator::DontMangle);
460 MemRegion cmr((HeapWord*)_virtual_space.low(),
461 (HeapWord*)_virtual_space.high());
462 gch->barrier_set()->resize_covered_region(cmr);
463
464 log_debug(gc, ergo, heap)(
465 "New generation size " SIZE_FORMAT "K->" SIZE_FORMAT "K [eden=" SIZE_FORMAT "K,survivor=" SIZE_FORMAT "K]",
466 new_size_before/K, _virtual_space.committed_size()/K,
467 eden()->capacity()/K, from()->capacity()/K);
468 log_trace(gc, ergo, heap)(
469 " [allowed " SIZE_FORMAT "K extra for %d threads]",
470 thread_increase_size/K, threads_count);
471 }
472 }
473
474 void DefNewGeneration::younger_refs_iterate(OopsInGenClosure* cl, uint n_threads) {
475 assert(false, "NYI -- are you sure you want to call this?");
476 }
477
478
479 size_t DefNewGeneration::capacity() const {
480 return eden()->capacity()
481 + from()->capacity(); // to() is only used during scavenge
482 }
628 FastScanClosure fsc_with_no_gc_barrier(this, false);
629 FastScanClosure fsc_with_gc_barrier(this, true);
630
631 KlassScanClosure klass_scan_closure(&fsc_with_no_gc_barrier,
632 gch->rem_set()->klass_rem_set());
633 CLDToKlassAndOopClosure cld_scan_closure(&klass_scan_closure,
634 &fsc_with_no_gc_barrier,
635 false);
636
637 set_promo_failure_scan_stack_closure(&fsc_with_no_gc_barrier);
638 FastEvacuateFollowersClosure evacuate_followers(gch,
639 &fsc_with_no_gc_barrier,
640 &fsc_with_gc_barrier);
641
642 assert(gch->no_allocs_since_save_marks(),
643 "save marks have not been newly set.");
644
645 {
646 // DefNew needs to run with n_threads == 0, to make sure the serial
647 // version of the card table scanning code is used.
648 // See: CardTableModRefBSForCTRS::non_clean_card_iterate_possibly_parallel.
649 StrongRootsScope srs(0);
650
651 gch->young_process_roots(&srs,
652 &fsc_with_no_gc_barrier,
653 &fsc_with_gc_barrier,
654 &cld_scan_closure);
655 }
656
657 // "evacuate followers".
658 evacuate_followers.do_void();
659
660 FastKeepAliveClosure keep_alive(this, &scan_weak_ref);
661 ReferenceProcessor* rp = ref_processor();
662 rp->setup_policy(clear_all_soft_refs);
663 const ReferenceProcessorStats& stats =
664 rp->process_discovered_references(&is_alive, &keep_alive, &evacuate_followers,
665 NULL, _gc_timer);
666 gc_tracer.report_gc_reference_stats(stats);
667 gc_tracer.report_tenuring_threshold(tenuring_threshold());
668
|
177 void FilteringClosure::do_oop(narrowOop* p) { FilteringClosure::do_oop_work(p); }
178
179 KlassScanClosure::KlassScanClosure(OopsInKlassOrGenClosure* scavenge_closure,
180 KlassRemSet* klass_rem_set)
181 : _scavenge_closure(scavenge_closure),
182 _accumulate_modified_oops(klass_rem_set->accumulate_modified_oops()) {}
183
184
185 DefNewGeneration::DefNewGeneration(ReservedSpace rs,
186 size_t initial_size,
187 const char* policy)
188 : Generation(rs, initial_size),
189 _preserved_marks_set(false /* in_c_heap */),
190 _promo_failure_drain_in_progress(false),
191 _should_allocate_from_space(false)
192 {
193 MemRegion cmr((HeapWord*)_virtual_space.low(),
194 (HeapWord*)_virtual_space.high());
195 GenCollectedHeap* gch = GenCollectedHeap::heap();
196
197 gch->rem_set()->resize_covered_region(cmr);
198
199 _eden_space = new ContiguousSpace();
200 _from_space = new ContiguousSpace();
201 _to_space = new ContiguousSpace();
202
203 if (_eden_space == NULL || _from_space == NULL || _to_space == NULL) {
204 vm_exit_during_initialization("Could not allocate a new gen space");
205 }
206
207 // Compute the maximum eden and survivor space sizes. These sizes
208 // are computed assuming the entire reserved space is committed.
209 // These values are exported as performance counters.
210 uintx alignment = gch->collector_policy()->space_alignment();
211 uintx size = _virtual_space.reserved_size();
212 _max_survivor_size = compute_survivor_size(size, alignment);
213 _max_eden_size = size - (2*_max_survivor_size);
214
215 // allocate the performance counters
216 GenCollectorPolicy* gcp = gch->gen_policy();
217
442 // "changed" will be false. If the expansion failed
443 // (and at this point it was expected to succeed),
444 // ignore the failure (leaving "changed" as false).
445 }
446 if (desired_new_size < new_size_before && eden()->is_empty()) {
447 // bail out of shrinking if objects in eden
448 size_t change = new_size_before - desired_new_size;
449 assert(change % alignment == 0, "just checking");
450 _virtual_space.shrink_by(change);
451 changed = true;
452 }
453 if (changed) {
454 // The spaces have already been mangled at this point but
455 // may not have been cleared (set top = bottom) and should be.
456 // Mangling was done when the heap was being expanded.
457 compute_space_boundaries(eden()->used(),
458 SpaceDecorator::Clear,
459 SpaceDecorator::DontMangle);
460 MemRegion cmr((HeapWord*)_virtual_space.low(),
461 (HeapWord*)_virtual_space.high());
462 gch->rem_set()->resize_covered_region(cmr);
463
464 log_debug(gc, ergo, heap)(
465 "New generation size " SIZE_FORMAT "K->" SIZE_FORMAT "K [eden=" SIZE_FORMAT "K,survivor=" SIZE_FORMAT "K]",
466 new_size_before/K, _virtual_space.committed_size()/K,
467 eden()->capacity()/K, from()->capacity()/K);
468 log_trace(gc, ergo, heap)(
469 " [allowed " SIZE_FORMAT "K extra for %d threads]",
470 thread_increase_size/K, threads_count);
471 }
472 }
473
474 void DefNewGeneration::younger_refs_iterate(OopsInGenClosure* cl, uint n_threads) {
475 assert(false, "NYI -- are you sure you want to call this?");
476 }
477
478
479 size_t DefNewGeneration::capacity() const {
480 return eden()->capacity()
481 + from()->capacity(); // to() is only used during scavenge
482 }
628 FastScanClosure fsc_with_no_gc_barrier(this, false);
629 FastScanClosure fsc_with_gc_barrier(this, true);
630
631 KlassScanClosure klass_scan_closure(&fsc_with_no_gc_barrier,
632 gch->rem_set()->klass_rem_set());
633 CLDToKlassAndOopClosure cld_scan_closure(&klass_scan_closure,
634 &fsc_with_no_gc_barrier,
635 false);
636
637 set_promo_failure_scan_stack_closure(&fsc_with_no_gc_barrier);
638 FastEvacuateFollowersClosure evacuate_followers(gch,
639 &fsc_with_no_gc_barrier,
640 &fsc_with_gc_barrier);
641
642 assert(gch->no_allocs_since_save_marks(),
643 "save marks have not been newly set.");
644
645 {
646 // DefNew needs to run with n_threads == 0, to make sure the serial
647 // version of the card table scanning code is used.
648 // See: CardTableRS::non_clean_card_iterate_possibly_parallel.
649 StrongRootsScope srs(0);
650
651 gch->young_process_roots(&srs,
652 &fsc_with_no_gc_barrier,
653 &fsc_with_gc_barrier,
654 &cld_scan_closure);
655 }
656
657 // "evacuate followers".
658 evacuate_followers.do_void();
659
660 FastKeepAliveClosure keep_alive(this, &scan_weak_ref);
661 ReferenceProcessor* rp = ref_processor();
662 rp->setup_policy(clear_all_soft_refs);
663 const ReferenceProcessorStats& stats =
664 rp->process_discovered_references(&is_alive, &keep_alive, &evacuate_followers,
665 NULL, _gc_timer);
666 gc_tracer.report_gc_reference_stats(stats);
667 gc_tracer.report_tenuring_threshold(tenuring_threshold());
668
|