src/share/vm/memory/defNewGeneration.cpp
Index Unified diffs Context diffs Sdiffs Patch New Old Previous File Next File hotspot Cdiff src/share/vm/memory/defNewGeneration.cpp

src/share/vm/memory/defNewGeneration.cpp

Print this page
rev 7211 : [mq]: remove_ngen
rev 7212 : [mq]: remove_get_gen
rev 7213 : imported patch move_genspecs
rev 7215 : imported patch remove_levels

*** 54,66 **** // // DefNewGeneration functions. // Methods of protected closure types. ! DefNewGeneration::IsAliveClosure::IsAliveClosure(Generation* g) : _g(g) { ! assert(g->level() == 0, "Optimized for youngest gen."); ! } bool DefNewGeneration::IsAliveClosure::do_object_b(oop p) { return (HeapWord*)p >= _g->reserved().end() || p->is_forwarded(); } DefNewGeneration::KeepAliveClosure:: --- 54,64 ---- // // DefNewGeneration functions. // Methods of protected closure types. ! DefNewGeneration::IsAliveClosure::IsAliveClosure(Generation* g) : _g(g) { } bool DefNewGeneration::IsAliveClosure::do_object_b(oop p) { return (HeapWord*)p >= _g->reserved().end() || p->is_forwarded(); } DefNewGeneration::KeepAliveClosure::
*** 81,133 **** void DefNewGeneration::FastKeepAliveClosure::do_oop(oop* p) { DefNewGeneration::FastKeepAliveClosure::do_oop_work(p); } void DefNewGeneration::FastKeepAliveClosure::do_oop(narrowOop* p) { DefNewGeneration::FastKeepAliveClosure::do_oop_work(p); } DefNewGeneration::EvacuateFollowersClosure:: ! EvacuateFollowersClosure(GenCollectedHeap* gch, int level, ScanClosure* cur, ScanClosure* older) : ! _gch(gch), _level(level), ! _scan_cur_or_nonheap(cur), _scan_older(older) {} void DefNewGeneration::EvacuateFollowersClosure::do_void() { do { ! _gch->oop_since_save_marks_iterate(_level, _scan_cur_or_nonheap, _scan_older); ! } while (!_gch->no_allocs_since_save_marks(_level)); } DefNewGeneration::FastEvacuateFollowersClosure:: ! FastEvacuateFollowersClosure(GenCollectedHeap* gch, int level, DefNewGeneration* gen, FastScanClosure* cur, FastScanClosure* older) : ! _gch(gch), _level(level), _gen(gen), ! _scan_cur_or_nonheap(cur), _scan_older(older) {} void DefNewGeneration::FastEvacuateFollowersClosure::do_void() { do { ! _gch->oop_since_save_marks_iterate(_level, _scan_cur_or_nonheap, _scan_older); ! } while (!_gch->no_allocs_since_save_marks(_level)); guarantee(_gen->promo_failure_scan_is_complete(), "Failed to finish scan"); } ScanClosure::ScanClosure(DefNewGeneration* g, bool gc_barrier) : OopsInKlassOrGenClosure(g), _g(g), _gc_barrier(gc_barrier) { - assert(_g->level() == 0, "Optimized for youngest generation"); _boundary = _g->reserved().end(); } void ScanClosure::do_oop(oop* p) { ScanClosure::do_oop_work(p); } void ScanClosure::do_oop(narrowOop* p) { ScanClosure::do_oop_work(p); } FastScanClosure::FastScanClosure(DefNewGeneration* g, bool gc_barrier) : OopsInKlassOrGenClosure(g), _g(g), _gc_barrier(gc_barrier) { - assert(_g->level() == 0, "Optimized for youngest generation"); _boundary = _g->reserved().end(); } void FastScanClosure::do_oop(oop* p) { FastScanClosure::do_oop_work(p); } void FastScanClosure::do_oop(narrowOop* p) { FastScanClosure::do_oop_work(p); } --- 79,127 ---- void DefNewGeneration::FastKeepAliveClosure::do_oop(oop* p) { DefNewGeneration::FastKeepAliveClosure::do_oop_work(p); } void DefNewGeneration::FastKeepAliveClosure::do_oop(narrowOop* p) { DefNewGeneration::FastKeepAliveClosure::do_oop_work(p); } DefNewGeneration::EvacuateFollowersClosure:: ! EvacuateFollowersClosure(GenCollectedHeap* gch, ScanClosure* cur, ScanClosure* older) : ! _gch(gch), _scan_cur_or_nonheap(cur), _scan_older(older) {} void DefNewGeneration::EvacuateFollowersClosure::do_void() { do { ! _gch->oop_since_save_marks_iterate(Generation::Young, _scan_cur_or_nonheap, _scan_older); ! } while (!_gch->no_allocs_since_save_marks(Generation::Young)); } DefNewGeneration::FastEvacuateFollowersClosure:: ! FastEvacuateFollowersClosure(GenCollectedHeap* gch, DefNewGeneration* gen, FastScanClosure* cur, FastScanClosure* older) : ! _gch(gch), _gen(gen), _scan_cur_or_nonheap(cur), _scan_older(older) {} void DefNewGeneration::FastEvacuateFollowersClosure::do_void() { do { ! _gch->oop_since_save_marks_iterate(Generation::Young, _scan_cur_or_nonheap, _scan_older); ! } while (!_gch->no_allocs_since_save_marks(Generation::Young)); guarantee(_gen->promo_failure_scan_is_complete(), "Failed to finish scan"); } ScanClosure::ScanClosure(DefNewGeneration* g, bool gc_barrier) : OopsInKlassOrGenClosure(g), _g(g), _gc_barrier(gc_barrier) { _boundary = _g->reserved().end(); } void ScanClosure::do_oop(oop* p) { ScanClosure::do_oop_work(p); } void ScanClosure::do_oop(narrowOop* p) { ScanClosure::do_oop_work(p); } FastScanClosure::FastScanClosure(DefNewGeneration* g, bool gc_barrier) : OopsInKlassOrGenClosure(g), _g(g), _gc_barrier(gc_barrier) { _boundary = _g->reserved().end(); } void FastScanClosure::do_oop(oop* p) { FastScanClosure::do_oop_work(p); } void FastScanClosure::do_oop(narrowOop* p) { FastScanClosure::do_oop_work(p); }
*** 164,174 **** } ScanWeakRefClosure::ScanWeakRefClosure(DefNewGeneration* g) : _g(g) { - assert(_g->level() == 0, "Optimized for youngest generation"); _boundary = _g->reserved().end(); } void ScanWeakRefClosure::do_oop(oop* p) { ScanWeakRefClosure::do_oop_work(p); } void ScanWeakRefClosure::do_oop(narrowOop* p) { ScanWeakRefClosure::do_oop_work(p); } --- 158,167 ----
*** 182,194 **** _accumulate_modified_oops(klass_rem_set->accumulate_modified_oops()) {} DefNewGeneration::DefNewGeneration(ReservedSpace rs, size_t initial_size, - int level, const char* policy) ! : Generation(rs, initial_size, level), _promo_failure_drain_in_progress(false), _should_allocate_from_space(false) { MemRegion cmr((HeapWord*)_virtual_space.low(), (HeapWord*)_virtual_space.high()); --- 175,186 ---- _accumulate_modified_oops(klass_rem_set->accumulate_modified_oops()) {} DefNewGeneration::DefNewGeneration(ReservedSpace rs, size_t initial_size, const char* policy) ! : Generation(rs, initial_size), _promo_failure_drain_in_progress(false), _should_allocate_from_space(false) { MemRegion cmr((HeapWord*)_virtual_space.low(), (HeapWord*)_virtual_space.high());
*** 380,396 **** // If not we bail out (otherwise we would have to relocate the objects) if (!from()->is_empty() || !to()->is_empty()) { return; } - int next_level = level() + 1; GenCollectedHeap* gch = GenCollectedHeap::heap(); - assert(next_level < gch->n_gens(), - "DefNewGeneration cannot be an oldest gen"); ! Generation* old_gen = gch->old_gen(); ! size_t old_size = old_gen->capacity(); size_t new_size_before = _virtual_space.committed_size(); size_t min_new_size = spec()->init_size(); size_t max_new_size = reserved().byte_size(); assert(min_new_size <= new_size_before && new_size_before <= max_new_size, --- 372,384 ---- // If not we bail out (otherwise we would have to relocate the objects) if (!from()->is_empty() || !to()->is_empty()) { return; } GenCollectedHeap* gch = GenCollectedHeap::heap(); ! size_t old_size = gch->old_gen()->capacity(); size_t new_size_before = _virtual_space.committed_size(); size_t min_new_size = spec()->init_size(); size_t max_new_size = reserved().byte_size(); assert(min_new_size <= new_size_before && new_size_before <= max_new_size,
*** 603,613 **** age_table()->clear(); to()->clear(SpaceDecorator::Mangle); gch->rem_set()->prepare_for_younger_refs_iterate(false); ! assert(gch->no_allocs_since_save_marks(0), "save marks have not been newly set."); // Not very pretty. CollectorPolicy* cp = gch->collector_policy(); --- 591,601 ---- age_table()->clear(); to()->clear(SpaceDecorator::Mangle); gch->rem_set()->prepare_for_younger_refs_iterate(false); ! assert(gch->no_allocs_since_save_marks(Generation::Young), "save marks have not been newly set."); // Not very pretty. CollectorPolicy* cp = gch->collector_policy();
*** 619,636 **** CLDToKlassAndOopClosure cld_scan_closure(&klass_scan_closure, &fsc_with_no_gc_barrier, false); set_promo_failure_scan_stack_closure(&fsc_with_no_gc_barrier); ! FastEvacuateFollowersClosure evacuate_followers(gch, _level, this, &fsc_with_no_gc_barrier, &fsc_with_gc_barrier); ! assert(gch->no_allocs_since_save_marks(0), "save marks have not been newly set."); ! gch->gen_process_roots(_level, true, // Process younger gens, if any, // as strong roots. true, // activate StrongRootsScope SharedHeap::SO_ScavengeCodeCache, GenCollectedHeap::StrongAndWeakRoots, --- 607,624 ---- CLDToKlassAndOopClosure cld_scan_closure(&klass_scan_closure, &fsc_with_no_gc_barrier, false); set_promo_failure_scan_stack_closure(&fsc_with_no_gc_barrier); ! FastEvacuateFollowersClosure evacuate_followers(gch, this, &fsc_with_no_gc_barrier, &fsc_with_gc_barrier); ! assert(gch->no_allocs_since_save_marks(Generation::Young), "save marks have not been newly set."); ! gch->gen_process_roots(Generation::Young, true, // Process younger gens, if any, // as strong roots. true, // activate StrongRootsScope SharedHeap::SO_ScavengeCodeCache, GenCollectedHeap::StrongAndWeakRoots,
*** 865,875 **** #undef DefNew_SINCE_SAVE_MARKS_DEFN void DefNewGeneration::contribute_scratch(ScratchBlock*& list, Generation* requestor, size_t max_alloc_words) { if (requestor == this || _promotion_failed) return; ! assert(requestor->level() > level(), "DefNewGeneration must be youngest"); /* $$$ Assert this? "trace" is a "MarkSweep" function so that's not appropriate. if (to_space->top() > to_space->bottom()) { trace("to_space not empty when contribute_scratch called"); } --- 853,863 ---- #undef DefNew_SINCE_SAVE_MARKS_DEFN void DefNewGeneration::contribute_scratch(ScratchBlock*& list, Generation* requestor, size_t max_alloc_words) { if (requestor == this || _promotion_failed) return; ! assert(requestor == GenCollectedHeap::heap()->old_gen(), "We should not call our own generation"); /* $$$ Assert this? "trace" is a "MarkSweep" function so that's not appropriate. if (to_space->top() > to_space->bottom()) { trace("to_space not empty when contribute_scratch called"); }
src/share/vm/memory/defNewGeneration.cpp
Index Unified diffs Context diffs Sdiffs Patch New Old Previous File Next File