176
177 void FilteringClosure::do_oop(oop* p) { FilteringClosure::do_oop_work(p); }
178 void FilteringClosure::do_oop(narrowOop* p) { FilteringClosure::do_oop_work(p); }
179
180 KlassScanClosure::KlassScanClosure(OopsInKlassOrGenClosure* scavenge_closure,
181 KlassRemSet* klass_rem_set)
182 : _scavenge_closure(scavenge_closure),
183 _accumulate_modified_oops(klass_rem_set->accumulate_modified_oops()) {}
184
185
186 DefNewGeneration::DefNewGeneration(ReservedSpace rs,
187 size_t initial_size,
188 int level,
189 const char* policy)
190 : Generation(rs, initial_size, level),
191 _promo_failure_drain_in_progress(false),
192 _should_allocate_from_space(false)
193 {
194 MemRegion cmr((HeapWord*)_virtual_space.low(),
195 (HeapWord*)_virtual_space.high());
196 Universe::heap()->barrier_set()->resize_covered_region(cmr);
197
198 _eden_space = new ContiguousSpace();
199 _from_space = new ContiguousSpace();
200 _to_space = new ContiguousSpace();
201
202 if (_eden_space == NULL || _from_space == NULL || _to_space == NULL)
203 vm_exit_during_initialization("Could not allocate a new gen space");
204
205 // Compute the maximum eden and survivor space sizes. These sizes
206 // are computed assuming the entire reserved space is committed.
207 // These values are exported as performance counters.
208 uintx alignment = GenCollectedHeap::heap()->collector_policy()->space_alignment();
209 uintx size = _virtual_space.reserved_size();
210 _max_survivor_size = compute_survivor_size(size, alignment);
211 _max_eden_size = size - (2*_max_survivor_size);
212
213 // allocate the performance counters
214 GenCollectorPolicy* gcp = (GenCollectorPolicy*) GenCollectedHeap::heap()->collector_policy();
215
216 // Generation counters -- generation 0, 3 subspaces
217 _gen_counters = new GenerationCounters("new", 0, 3,
218 gcp->min_young_size(), gcp->max_young_size(), &_virtual_space);
219 _gc_counters = new CollectorCounters(policy, 0);
220
221 _eden_counters = new CSpaceCounters("eden", 0, _max_eden_size, _eden_space,
222 _gen_counters);
223 _from_counters = new CSpaceCounters("s0", 1, _max_survivor_size, _from_space,
224 _gen_counters);
225 _to_counters = new CSpaceCounters("s1", 2, _max_survivor_size, _to_space,
226 _gen_counters);
227
228 compute_space_boundaries(0, SpaceDecorator::Clear, SpaceDecorator::Mangle);
229 update_counters();
230 _old_gen = NULL;
231 _tenuring_threshold = MaxTenuringThreshold;
232 _pretenure_size_threshold_words = PretenureSizeThreshold >> LogHeapWordSize;
233
234 _gc_timer = new (ResourceObj::C_HEAP, mtGC) STWGCTimer();
416 // "changed" will be false. If the expansion failed
417 // (and at this point it was expected to succeed),
418 // ignore the failure (leaving "changed" as false).
419 }
420 if (desired_new_size < new_size_before && eden()->is_empty()) {
421 // bail out of shrinking if objects in eden
422 size_t change = new_size_before - desired_new_size;
423 assert(change % alignment == 0, "just checking");
424 _virtual_space.shrink_by(change);
425 changed = true;
426 }
427 if (changed) {
428 // The spaces have already been mangled at this point but
429 // may not have been cleared (set top = bottom) and should be.
430 // Mangling was done when the heap was being expanded.
431 compute_space_boundaries(eden()->used(),
432 SpaceDecorator::Clear,
433 SpaceDecorator::DontMangle);
434 MemRegion cmr((HeapWord*)_virtual_space.low(),
435 (HeapWord*)_virtual_space.high());
436 Universe::heap()->barrier_set()->resize_covered_region(cmr);
437 if (Verbose && PrintGC) {
438 size_t new_size_after = _virtual_space.committed_size();
439 size_t eden_size_after = eden()->capacity();
440 size_t survivor_size_after = from()->capacity();
441 gclog_or_tty->print("New generation size " SIZE_FORMAT "K->"
442 SIZE_FORMAT "K [eden="
443 SIZE_FORMAT "K,survivor=" SIZE_FORMAT "K]",
444 new_size_before/K, new_size_after/K,
445 eden_size_after/K, survivor_size_after/K);
446 if (WizardMode) {
447 gclog_or_tty->print("[allowed " SIZE_FORMAT "K extra for %d threads]",
448 thread_increase_size/K, threads_count);
449 }
450 gclog_or_tty->cr();
451 }
452 }
453 }
454
455 void DefNewGeneration::younger_refs_iterate(OopsInGenClosure* cl) {
456 assert(false, "NYI -- are you sure you want to call this?");
674 _promo_failure_scan_stack.clear(true); // Clear cached segments.
675
676 remove_forwarding_pointers();
677 if (PrintGCDetails) {
678 gclog_or_tty->print(" (promotion failed) ");
679 }
680 // Add to-space to the list of space to compact
681 // when a promotion failure has occurred. In that
682 // case there can be live objects in to-space
683 // as a result of a partial evacuation of eden
684 // and from-space.
685 swap_spaces(); // For uniformity wrt ParNewGeneration.
686 from()->set_next_compaction_space(to());
687 gch->set_incremental_collection_failed();
688
689 // Inform the next generation that a promotion failure occurred.
690 _old_gen->promotion_failure_occurred();
691 gc_tracer.report_promotion_failed(_promotion_failed_info);
692
693 // Reset the PromotionFailureALot counters.
694 NOT_PRODUCT(Universe::heap()->reset_promotion_should_fail();)
695 }
696 if (PrintGC && !PrintGCDetails) {
697 gch->print_heap_change(gch_prev_used);
698 }
699 // set new iteration safe limit for the survivor spaces
700 from()->set_concurrent_iteration_safe_limit(from()->top());
701 to()->set_concurrent_iteration_safe_limit(to()->top());
702
703 // We need to use a monotonically non-decreasing time in ms
704 // or we will see time-warp warnings and os::javaTimeMillis()
705 // does not guarantee monotonicity.
706 jlong now = os::javaTimeNanos() / NANOSECS_PER_MILLISEC;
707 update_time_of_last_gc(now);
708
709 gch->trace_heap_after_gc(&gc_tracer);
710 gc_tracer.report_tenuring_threshold(tenuring_threshold());
711
712 _gc_timer->register_gc_end();
713
714 gc_tracer.report_gc_end(_gc_timer->gc_end(), _gc_timer->time_partitions());
|
176
177 void FilteringClosure::do_oop(oop* p) { FilteringClosure::do_oop_work(p); }
178 void FilteringClosure::do_oop(narrowOop* p) { FilteringClosure::do_oop_work(p); }
179
180 KlassScanClosure::KlassScanClosure(OopsInKlassOrGenClosure* scavenge_closure,
181 KlassRemSet* klass_rem_set)
182 : _scavenge_closure(scavenge_closure),
183 _accumulate_modified_oops(klass_rem_set->accumulate_modified_oops()) {}
184
185
186 DefNewGeneration::DefNewGeneration(ReservedSpace rs,
187 size_t initial_size,
188 int level,
189 const char* policy)
190 : Generation(rs, initial_size, level),
191 _promo_failure_drain_in_progress(false),
192 _should_allocate_from_space(false)
193 {
194 MemRegion cmr((HeapWord*)_virtual_space.low(),
195 (HeapWord*)_virtual_space.high());
196 GenCollectedHeap* gch = GenCollectedHeap::heap();
197
198 gch->barrier_set()->resize_covered_region(cmr);
199
200 _eden_space = new ContiguousSpace();
201 _from_space = new ContiguousSpace();
202 _to_space = new ContiguousSpace();
203
204 if (_eden_space == NULL || _from_space == NULL || _to_space == NULL)
205 vm_exit_during_initialization("Could not allocate a new gen space");
206
207 // Compute the maximum eden and survivor space sizes. These sizes
208 // are computed assuming the entire reserved space is committed.
209 // These values are exported as performance counters.
210 uintx alignment = gch->collector_policy()->space_alignment();
211 uintx size = _virtual_space.reserved_size();
212 _max_survivor_size = compute_survivor_size(size, alignment);
213 _max_eden_size = size - (2*_max_survivor_size);
214
215 // allocate the performance counters
216 GenCollectorPolicy* gcp = (GenCollectorPolicy*)gch->collector_policy();
217
218 // Generation counters -- generation 0, 3 subspaces
219 _gen_counters = new GenerationCounters("new", 0, 3,
220 gcp->min_young_size(), gcp->max_young_size(), &_virtual_space);
221 _gc_counters = new CollectorCounters(policy, 0);
222
223 _eden_counters = new CSpaceCounters("eden", 0, _max_eden_size, _eden_space,
224 _gen_counters);
225 _from_counters = new CSpaceCounters("s0", 1, _max_survivor_size, _from_space,
226 _gen_counters);
227 _to_counters = new CSpaceCounters("s1", 2, _max_survivor_size, _to_space,
228 _gen_counters);
229
230 compute_space_boundaries(0, SpaceDecorator::Clear, SpaceDecorator::Mangle);
231 update_counters();
232 _old_gen = NULL;
233 _tenuring_threshold = MaxTenuringThreshold;
234 _pretenure_size_threshold_words = PretenureSizeThreshold >> LogHeapWordSize;
235
236 _gc_timer = new (ResourceObj::C_HEAP, mtGC) STWGCTimer();
418 // "changed" will be false. If the expansion failed
419 // (and at this point it was expected to succeed),
420 // ignore the failure (leaving "changed" as false).
421 }
422 if (desired_new_size < new_size_before && eden()->is_empty()) {
423 // bail out of shrinking if objects in eden
424 size_t change = new_size_before - desired_new_size;
425 assert(change % alignment == 0, "just checking");
426 _virtual_space.shrink_by(change);
427 changed = true;
428 }
429 if (changed) {
430 // The spaces have already been mangled at this point but
431 // may not have been cleared (set top = bottom) and should be.
432 // Mangling was done when the heap was being expanded.
433 compute_space_boundaries(eden()->used(),
434 SpaceDecorator::Clear,
435 SpaceDecorator::DontMangle);
436 MemRegion cmr((HeapWord*)_virtual_space.low(),
437 (HeapWord*)_virtual_space.high());
438 gch->barrier_set()->resize_covered_region(cmr);
439 if (Verbose && PrintGC) {
440 size_t new_size_after = _virtual_space.committed_size();
441 size_t eden_size_after = eden()->capacity();
442 size_t survivor_size_after = from()->capacity();
443 gclog_or_tty->print("New generation size " SIZE_FORMAT "K->"
444 SIZE_FORMAT "K [eden="
445 SIZE_FORMAT "K,survivor=" SIZE_FORMAT "K]",
446 new_size_before/K, new_size_after/K,
447 eden_size_after/K, survivor_size_after/K);
448 if (WizardMode) {
449 gclog_or_tty->print("[allowed " SIZE_FORMAT "K extra for %d threads]",
450 thread_increase_size/K, threads_count);
451 }
452 gclog_or_tty->cr();
453 }
454 }
455 }
456
457 void DefNewGeneration::younger_refs_iterate(OopsInGenClosure* cl) {
458 assert(false, "NYI -- are you sure you want to call this?");
676 _promo_failure_scan_stack.clear(true); // Clear cached segments.
677
678 remove_forwarding_pointers();
679 if (PrintGCDetails) {
680 gclog_or_tty->print(" (promotion failed) ");
681 }
682 // Add to-space to the list of space to compact
683 // when a promotion failure has occurred. In that
684 // case there can be live objects in to-space
685 // as a result of a partial evacuation of eden
686 // and from-space.
687 swap_spaces(); // For uniformity wrt ParNewGeneration.
688 from()->set_next_compaction_space(to());
689 gch->set_incremental_collection_failed();
690
691 // Inform the next generation that a promotion failure occurred.
692 _old_gen->promotion_failure_occurred();
693 gc_tracer.report_promotion_failed(_promotion_failed_info);
694
695 // Reset the PromotionFailureALot counters.
696 NOT_PRODUCT(gch->reset_promotion_should_fail();)
697 }
698 if (PrintGC && !PrintGCDetails) {
699 gch->print_heap_change(gch_prev_used);
700 }
701 // set new iteration safe limit for the survivor spaces
702 from()->set_concurrent_iteration_safe_limit(from()->top());
703 to()->set_concurrent_iteration_safe_limit(to()->top());
704
705 // We need to use a monotonically non-decreasing time in ms
706 // or we will see time-warp warnings and os::javaTimeMillis()
707 // does not guarantee monotonicity.
708 jlong now = os::javaTimeNanos() / NANOSECS_PER_MILLISEC;
709 update_time_of_last_gc(now);
710
711 gch->trace_heap_after_gc(&gc_tracer);
712 gc_tracer.report_tenuring_threshold(tenuring_threshold());
713
714 _gc_timer->register_gc_end();
715
716 gc_tracer.report_gc_end(_gc_timer->gc_end(), _gc_timer->time_partitions());
|