src/share/vm/memory/defNewGeneration.cpp
Index Unified diffs Context diffs Sdiffs Patch New Old Previous File Next File hotspot Cdiff src/share/vm/memory/defNewGeneration.cpp

src/share/vm/memory/defNewGeneration.cpp

Print this page
rev 7211 : [mq]: remove_ngen
rev 7212 : [mq]: remove_get_gen
rev 7213 : imported patch move_genspecs
rev 7215 : imported patch remove_levels
rev 7216 : imported patch cleanup

*** 54,66 **** // // DefNewGeneration functions. // Methods of protected closure types. ! DefNewGeneration::IsAliveClosure::IsAliveClosure(Generation* g) : _g(g) { ! assert(g->level() == 0, "Optimized for youngest gen."); ! } bool DefNewGeneration::IsAliveClosure::do_object_b(oop p) { return (HeapWord*)p >= _g->reserved().end() || p->is_forwarded(); } DefNewGeneration::KeepAliveClosure:: --- 54,64 ---- // // DefNewGeneration functions. // Methods of protected closure types. ! DefNewGeneration::IsAliveClosure::IsAliveClosure(Generation* g) : _g(g) { } bool DefNewGeneration::IsAliveClosure::do_object_b(oop p) { return (HeapWord*)p >= _g->reserved().end() || p->is_forwarded(); } DefNewGeneration::KeepAliveClosure::
*** 81,133 **** void DefNewGeneration::FastKeepAliveClosure::do_oop(oop* p) { DefNewGeneration::FastKeepAliveClosure::do_oop_work(p); } void DefNewGeneration::FastKeepAliveClosure::do_oop(narrowOop* p) { DefNewGeneration::FastKeepAliveClosure::do_oop_work(p); } DefNewGeneration::EvacuateFollowersClosure:: ! EvacuateFollowersClosure(GenCollectedHeap* gch, int level, ScanClosure* cur, ScanClosure* older) : ! _gch(gch), _level(level), ! _scan_cur_or_nonheap(cur), _scan_older(older) {} void DefNewGeneration::EvacuateFollowersClosure::do_void() { do { ! _gch->oop_since_save_marks_iterate(_level, _scan_cur_or_nonheap, _scan_older); ! } while (!_gch->no_allocs_since_save_marks(_level)); } DefNewGeneration::FastEvacuateFollowersClosure:: ! FastEvacuateFollowersClosure(GenCollectedHeap* gch, int level, DefNewGeneration* gen, FastScanClosure* cur, FastScanClosure* older) : ! _gch(gch), _level(level), _gen(gen), ! _scan_cur_or_nonheap(cur), _scan_older(older) {} void DefNewGeneration::FastEvacuateFollowersClosure::do_void() { do { ! _gch->oop_since_save_marks_iterate(_level, _scan_cur_or_nonheap, _scan_older); ! } while (!_gch->no_allocs_since_save_marks(_level)); guarantee(_gen->promo_failure_scan_is_complete(), "Failed to finish scan"); } ScanClosure::ScanClosure(DefNewGeneration* g, bool gc_barrier) : OopsInKlassOrGenClosure(g), _g(g), _gc_barrier(gc_barrier) { - assert(_g->level() == 0, "Optimized for youngest generation"); _boundary = _g->reserved().end(); } void ScanClosure::do_oop(oop* p) { ScanClosure::do_oop_work(p); } void ScanClosure::do_oop(narrowOop* p) { ScanClosure::do_oop_work(p); } FastScanClosure::FastScanClosure(DefNewGeneration* g, bool gc_barrier) : OopsInKlassOrGenClosure(g), _g(g), _gc_barrier(gc_barrier) { - assert(_g->level() == 0, "Optimized for youngest generation"); _boundary = _g->reserved().end(); } void FastScanClosure::do_oop(oop* p) { FastScanClosure::do_oop_work(p); } void FastScanClosure::do_oop(narrowOop* p) { FastScanClosure::do_oop_work(p); } --- 79,127 ---- void DefNewGeneration::FastKeepAliveClosure::do_oop(oop* p) { DefNewGeneration::FastKeepAliveClosure::do_oop_work(p); } void DefNewGeneration::FastKeepAliveClosure::do_oop(narrowOop* p) { DefNewGeneration::FastKeepAliveClosure::do_oop_work(p); } DefNewGeneration::EvacuateFollowersClosure:: ! EvacuateFollowersClosure(GenCollectedHeap* gch, ScanClosure* cur, ScanClosure* older) : ! _gch(gch), _scan_cur_or_nonheap(cur), _scan_older(older) {} void DefNewGeneration::EvacuateFollowersClosure::do_void() { do { ! _gch->oop_since_save_marks_iterate(Generation::Young, _scan_cur_or_nonheap, _scan_older); ! } while (!_gch->no_allocs_since_save_marks(Generation::Young)); } DefNewGeneration::FastEvacuateFollowersClosure:: ! FastEvacuateFollowersClosure(GenCollectedHeap* gch, DefNewGeneration* gen, FastScanClosure* cur, FastScanClosure* older) : ! _gch(gch), _gen(gen), _scan_cur_or_nonheap(cur), _scan_older(older) {} void DefNewGeneration::FastEvacuateFollowersClosure::do_void() { do { ! _gch->oop_since_save_marks_iterate(Generation::Young, _scan_cur_or_nonheap, _scan_older); ! } while (!_gch->no_allocs_since_save_marks(Generation::Young)); guarantee(_gen->promo_failure_scan_is_complete(), "Failed to finish scan"); } ScanClosure::ScanClosure(DefNewGeneration* g, bool gc_barrier) : OopsInKlassOrGenClosure(g), _g(g), _gc_barrier(gc_barrier) { _boundary = _g->reserved().end(); } void ScanClosure::do_oop(oop* p) { ScanClosure::do_oop_work(p); } void ScanClosure::do_oop(narrowOop* p) { ScanClosure::do_oop_work(p); } FastScanClosure::FastScanClosure(DefNewGeneration* g, bool gc_barrier) : OopsInKlassOrGenClosure(g), _g(g), _gc_barrier(gc_barrier) { _boundary = _g->reserved().end(); } void FastScanClosure::do_oop(oop* p) { FastScanClosure::do_oop_work(p); } void FastScanClosure::do_oop(narrowOop* p) { FastScanClosure::do_oop_work(p); }
*** 164,174 **** } ScanWeakRefClosure::ScanWeakRefClosure(DefNewGeneration* g) : _g(g) { - assert(_g->level() == 0, "Optimized for youngest generation"); _boundary = _g->reserved().end(); } void ScanWeakRefClosure::do_oop(oop* p) { ScanWeakRefClosure::do_oop_work(p); } void ScanWeakRefClosure::do_oop(narrowOop* p) { ScanWeakRefClosure::do_oop_work(p); } --- 158,167 ----
*** 182,194 **** _accumulate_modified_oops(klass_rem_set->accumulate_modified_oops()) {} DefNewGeneration::DefNewGeneration(ReservedSpace rs, size_t initial_size, - int level, const char* policy) ! : Generation(rs, initial_size, level), _promo_failure_drain_in_progress(false), _should_allocate_from_space(false) { MemRegion cmr((HeapWord*)_virtual_space.low(), (HeapWord*)_virtual_space.high()); --- 175,186 ---- _accumulate_modified_oops(klass_rem_set->accumulate_modified_oops()) {} DefNewGeneration::DefNewGeneration(ReservedSpace rs, size_t initial_size, const char* policy) ! : Generation(rs, initial_size), _promo_failure_drain_in_progress(false), _should_allocate_from_space(false) { MemRegion cmr((HeapWord*)_virtual_space.low(), (HeapWord*)_virtual_space.high());
*** 228,238 **** _to_counters = new CSpaceCounters("s1", 2, _max_survivor_size, _to_space, _gen_counters); compute_space_boundaries(0, SpaceDecorator::Clear, SpaceDecorator::Mangle); update_counters(); ! _next_gen = NULL; _tenuring_threshold = MaxTenuringThreshold; _pretenure_size_threshold_words = PretenureSizeThreshold >> LogHeapWordSize; _gc_timer = new (ResourceObj::C_HEAP, mtGC) STWGCTimer(); } --- 220,230 ---- _to_counters = new CSpaceCounters("s1", 2, _max_survivor_size, _to_space, _gen_counters); compute_space_boundaries(0, SpaceDecorator::Clear, SpaceDecorator::Mangle); update_counters(); ! _old_gen = NULL; _tenuring_threshold = MaxTenuringThreshold; _pretenure_size_threshold_words = PretenureSizeThreshold >> LogHeapWordSize; _gc_timer = new (ResourceObj::C_HEAP, mtGC) STWGCTimer(); }
*** 380,396 **** // If not we bail out (otherwise we would have to relocate the objects) if (!from()->is_empty() || !to()->is_empty()) { return; } - int next_level = level() + 1; GenCollectedHeap* gch = GenCollectedHeap::heap(); - assert(next_level < gch->_n_gens, - "DefNewGeneration cannot be an oldest gen"); ! Generation* next_gen = gch->_gens[next_level]; ! size_t old_size = next_gen->capacity(); size_t new_size_before = _virtual_space.committed_size(); size_t min_new_size = spec()->init_size(); size_t max_new_size = reserved().byte_size(); assert(min_new_size <= new_size_before && new_size_before <= max_new_size, --- 372,384 ---- // If not we bail out (otherwise we would have to relocate the objects) if (!from()->is_empty() || !to()->is_empty()) { return; } GenCollectedHeap* gch = GenCollectedHeap::heap(); ! size_t old_size = gch->old_gen()->capacity(); size_t new_size_before = _virtual_space.committed_size(); size_t min_new_size = spec()->init_size(); size_t max_new_size = reserved().byte_size(); assert(min_new_size <= new_size_before && new_size_before <= max_new_size,
*** 570,580 **** _gc_timer->register_gc_start(); DefNewTracer gc_tracer; gc_tracer.report_gc_start(gch->gc_cause(), _gc_timer->gc_start()); ! _next_gen = gch->next_gen(this); // If the next generation is too full to accommodate promotion // from this generation, pass on collection; let the next generation // do it. if (!collection_attempt_is_safe()) { --- 558,568 ---- _gc_timer->register_gc_start(); DefNewTracer gc_tracer; gc_tracer.report_gc_start(gch->gc_cause(), _gc_timer->gc_start()); ! _old_gen = gch->old_gen(); // If the next generation is too full to accommodate promotion // from this generation, pass on collection; let the next generation // do it. if (!collection_attempt_is_safe()) {
*** 603,613 **** age_table()->clear(); to()->clear(SpaceDecorator::Mangle); gch->rem_set()->prepare_for_younger_refs_iterate(false); ! assert(gch->no_allocs_since_save_marks(0), "save marks have not been newly set."); // Not very pretty. CollectorPolicy* cp = gch->collector_policy(); --- 591,601 ---- age_table()->clear(); to()->clear(SpaceDecorator::Mangle); gch->rem_set()->prepare_for_younger_refs_iterate(false); ! assert(gch->no_allocs_since_save_marks(Generation::Young), "save marks have not been newly set."); // Not very pretty. CollectorPolicy* cp = gch->collector_policy();
*** 619,636 **** CLDToKlassAndOopClosure cld_scan_closure(&klass_scan_closure, &fsc_with_no_gc_barrier, false); set_promo_failure_scan_stack_closure(&fsc_with_no_gc_barrier); ! FastEvacuateFollowersClosure evacuate_followers(gch, _level, this, &fsc_with_no_gc_barrier, &fsc_with_gc_barrier); ! assert(gch->no_allocs_since_save_marks(0), "save marks have not been newly set."); ! gch->gen_process_roots(_level, true, // Process younger gens, if any, // as strong roots. true, // activate StrongRootsScope SharedHeap::SO_ScavengeCodeCache, GenCollectedHeap::StrongAndWeakRoots, --- 607,624 ---- CLDToKlassAndOopClosure cld_scan_closure(&klass_scan_closure, &fsc_with_no_gc_barrier, false); set_promo_failure_scan_stack_closure(&fsc_with_no_gc_barrier); ! FastEvacuateFollowersClosure evacuate_followers(gch, this, &fsc_with_no_gc_barrier, &fsc_with_gc_barrier); ! assert(gch->no_allocs_since_save_marks(Generation::Young), "save marks have not been newly set."); ! gch->gen_process_roots(Generation::Young, true, // Process younger gens, if any, // as strong roots. true, // activate StrongRootsScope SharedHeap::SO_ScavengeCodeCache, GenCollectedHeap::StrongAndWeakRoots,
*** 690,700 **** swap_spaces(); // For uniformity wrt ParNewGeneration. from()->set_next_compaction_space(to()); gch->set_incremental_collection_failed(); // Inform the next generation that a promotion failure occurred. ! _next_gen->promotion_failure_occurred(); gc_tracer.report_promotion_failed(_promotion_failed_info); // Reset the PromotionFailureALot counters. NOT_PRODUCT(Universe::heap()->reset_promotion_should_fail();) } --- 678,688 ---- swap_spaces(); // For uniformity wrt ParNewGeneration. from()->set_next_compaction_space(to()); gch->set_incremental_collection_failed(); // Inform the next generation that a promotion failure occurred. ! _old_gen->promotion_failure_occurred(); gc_tracer.report_promotion_failed(_promotion_failed_info); // Reset the PromotionFailureALot counters. NOT_PRODUCT(Universe::heap()->reset_promotion_should_fail();) }
*** 795,805 **** obj = (oop) to()->allocate_aligned(s); } // Otherwise try allocating obj tenured if (obj == NULL) { ! obj = _next_gen->promote(old, s); if (obj == NULL) { handle_promotion_failure(old); return old; } } else { --- 783,793 ---- obj = (oop) to()->allocate_aligned(s); } // Otherwise try allocating obj tenured if (obj == NULL) { ! obj = _old_gen->promote(old, s); if (obj == NULL) { handle_promotion_failure(old); return old; } } else {
*** 864,875 **** #undef DefNew_SINCE_SAVE_MARKS_DEFN void DefNewGeneration::contribute_scratch(ScratchBlock*& list, Generation* requestor, size_t max_alloc_words) { ! if (requestor == this || _promotion_failed) return; ! assert(requestor->level() > level(), "DefNewGeneration must be youngest"); /* $$$ Assert this? "trace" is a "MarkSweep" function so that's not appropriate. if (to_space->top() > to_space->bottom()) { trace("to_space not empty when contribute_scratch called"); } --- 852,865 ---- #undef DefNew_SINCE_SAVE_MARKS_DEFN void DefNewGeneration::contribute_scratch(ScratchBlock*& list, Generation* requestor, size_t max_alloc_words) { ! if (requestor == this || _promotion_failed) { ! return; ! } ! assert(requestor == GenCollectedHeap::heap()->old_gen(), "We should not call our own generation"); /* $$$ Assert this? "trace" is a "MarkSweep" function so that's not appropriate. if (to_space->top() > to_space->bottom()) { trace("to_space not empty when contribute_scratch called"); }
*** 900,914 **** if (Verbose && PrintGCDetails) { gclog_or_tty->print(" :: to is not empty :: "); } return false; } ! if (_next_gen == NULL) { GenCollectedHeap* gch = GenCollectedHeap::heap(); ! _next_gen = gch->next_gen(this); } ! return _next_gen->promotion_attempt_is_safe(used()); } void DefNewGeneration::gc_epilogue(bool full) { DEBUG_ONLY(static bool seen_incremental_collection_failed = false;) --- 890,904 ---- if (Verbose && PrintGCDetails) { gclog_or_tty->print(" :: to is not empty :: "); } return false; } ! if (_old_gen == NULL) { GenCollectedHeap* gch = GenCollectedHeap::heap(); ! _old_gen = gch->old_gen(); } ! return _old_gen->promotion_attempt_is_safe(used()); } void DefNewGeneration::gc_epilogue(bool full) { DEBUG_ONLY(static bool seen_incremental_collection_failed = false;)
*** 1024,1053 **** // Moved from inline file as they are not called inline CompactibleSpace* DefNewGeneration::first_compaction_space() const { return eden(); } ! HeapWord* DefNewGeneration::allocate(size_t word_size, ! bool is_tlab) { // This is the slow-path allocation for the DefNewGeneration. // Most allocations are fast-path in compiled code. // We try to allocate from the eden. If that works, we are happy. // Note that since DefNewGeneration supports lock-free allocation, we // have to use it here, as well. HeapWord* result = eden()->par_allocate(word_size); if (result != NULL) { ! if (CMSEdenChunksRecordAlways && _next_gen != NULL) { ! _next_gen->sample_eden_chunk(); } return result; } do { HeapWord* old_limit = eden()->soft_end(); if (old_limit < eden()->end()) { ! // Tell the next generation we reached a limit. HeapWord* new_limit = ! next_gen()->allocation_limit_reached(eden(), eden()->top(), word_size); if (new_limit != NULL) { Atomic::cmpxchg_ptr(new_limit, eden()->soft_end_addr(), old_limit); } else { assert(eden()->soft_end() == eden()->end(), "invalid state after allocation_limit_reached returned null"); --- 1014,1042 ---- // Moved from inline file as they are not called inline CompactibleSpace* DefNewGeneration::first_compaction_space() const { return eden(); } ! HeapWord* DefNewGeneration::allocate(size_t word_size, bool is_tlab) { // This is the slow-path allocation for the DefNewGeneration. // Most allocations are fast-path in compiled code. // We try to allocate from the eden. If that works, we are happy. // Note that since DefNewGeneration supports lock-free allocation, we // have to use it here, as well. HeapWord* result = eden()->par_allocate(word_size); if (result != NULL) { ! if (CMSEdenChunksRecordAlways && _old_gen != NULL) { ! _old_gen->sample_eden_chunk(); } return result; } do { HeapWord* old_limit = eden()->soft_end(); if (old_limit < eden()->end()) { ! // Tell the old generation we reached a limit. HeapWord* new_limit = ! _old_gen->allocation_limit_reached(eden(), eden()->top(), word_size); if (new_limit != NULL) { Atomic::cmpxchg_ptr(new_limit, eden()->soft_end_addr(), old_limit); } else { assert(eden()->soft_end() == eden()->end(), "invalid state after allocation_limit_reached returned null");
*** 1066,1086 **** // out of heap space, and we try to allocate the from-space, too. // allocate_from_space can't be inlined because that would introduce a // circular dependency at compile time. if (result == NULL) { result = allocate_from_space(word_size); ! } else if (CMSEdenChunksRecordAlways && _next_gen != NULL) { ! _next_gen->sample_eden_chunk(); } return result; } HeapWord* DefNewGeneration::par_allocate(size_t word_size, bool is_tlab) { HeapWord* res = eden()->par_allocate(word_size); ! if (CMSEdenChunksRecordAlways && _next_gen != NULL) { ! _next_gen->sample_eden_chunk(); } return res; } void DefNewGeneration::gc_prologue(bool full) { --- 1055,1075 ---- // out of heap space, and we try to allocate the from-space, too. // allocate_from_space can't be inlined because that would introduce a // circular dependency at compile time. if (result == NULL) { result = allocate_from_space(word_size); ! } else if (CMSEdenChunksRecordAlways && _old_gen != NULL) { ! _old_gen->sample_eden_chunk(); } return result; } HeapWord* DefNewGeneration::par_allocate(size_t word_size, bool is_tlab) { HeapWord* res = eden()->par_allocate(word_size); ! if (CMSEdenChunksRecordAlways && _old_gen != NULL) { ! _old_gen->sample_eden_chunk(); } return res; } void DefNewGeneration::gc_prologue(bool full) {
src/share/vm/memory/defNewGeneration.cpp
Index Unified diffs Context diffs Sdiffs Patch New Old Previous File Next File