src/share/vm/memory/defNewGeneration.cpp
Index Unified diffs Context diffs Sdiffs Patch New Old Previous File Next File hotspot Cdiff src/share/vm/memory/defNewGeneration.cpp

src/share/vm/memory/defNewGeneration.cpp

Print this page
rev 7211 : [mq]: remove_ngen
rev 7212 : [mq]: remove_get_gen

*** 228,238 **** _to_counters = new CSpaceCounters("s1", 2, _max_survivor_size, _to_space, _gen_counters); compute_space_boundaries(0, SpaceDecorator::Clear, SpaceDecorator::Mangle); update_counters(); ! _next_gen = NULL; _tenuring_threshold = MaxTenuringThreshold; _pretenure_size_threshold_words = PretenureSizeThreshold >> LogHeapWordSize; _gc_timer = new (ResourceObj::C_HEAP, mtGC) STWGCTimer(); } --- 228,238 ---- _to_counters = new CSpaceCounters("s1", 2, _max_survivor_size, _to_space, _gen_counters); compute_space_boundaries(0, SpaceDecorator::Clear, SpaceDecorator::Mangle); update_counters(); ! _old_gen = NULL; _tenuring_threshold = MaxTenuringThreshold; _pretenure_size_threshold_words = PretenureSizeThreshold >> LogHeapWordSize; _gc_timer = new (ResourceObj::C_HEAP, mtGC) STWGCTimer(); }
*** 385,396 **** int next_level = level() + 1; GenCollectedHeap* gch = GenCollectedHeap::heap(); assert(next_level < gch->_n_gens, "DefNewGeneration cannot be an oldest gen"); ! Generation* next_gen = gch->get_gen(next_level); ! size_t old_size = next_gen->capacity(); size_t new_size_before = _virtual_space.committed_size(); size_t min_new_size = spec()->init_size(); size_t max_new_size = reserved().byte_size(); assert(min_new_size <= new_size_before && new_size_before <= max_new_size, --- 385,396 ---- int next_level = level() + 1; GenCollectedHeap* gch = GenCollectedHeap::heap(); assert(next_level < gch->_n_gens, "DefNewGeneration cannot be an oldest gen"); ! Generation* old_gen = gch->old_gen(); ! size_t old_size = old_gen->capacity(); size_t new_size_before = _virtual_space.committed_size(); size_t min_new_size = spec()->init_size(); size_t max_new_size = reserved().byte_size(); assert(min_new_size <= new_size_before && new_size_before <= max_new_size,
*** 570,580 **** _gc_timer->register_gc_start(); DefNewTracer gc_tracer; gc_tracer.report_gc_start(gch->gc_cause(), _gc_timer->gc_start()); ! _next_gen = gch->next_gen(this); // If the next generation is too full to accommodate promotion // from this generation, pass on collection; let the next generation // do it. if (!collection_attempt_is_safe()) { --- 570,580 ---- _gc_timer->register_gc_start(); DefNewTracer gc_tracer; gc_tracer.report_gc_start(gch->gc_cause(), _gc_timer->gc_start()); ! _old_gen = gch->old_gen(); // If the next generation is too full to accommodate promotion // from this generation, pass on collection; let the next generation // do it. if (!collection_attempt_is_safe()) {
*** 690,700 **** swap_spaces(); // For uniformity wrt ParNewGeneration. from()->set_next_compaction_space(to()); gch->set_incremental_collection_failed(); // Inform the next generation that a promotion failure occurred. ! _next_gen->promotion_failure_occurred(); gc_tracer.report_promotion_failed(_promotion_failed_info); // Reset the PromotionFailureALot counters. NOT_PRODUCT(Universe::heap()->reset_promotion_should_fail();) } --- 690,700 ---- swap_spaces(); // For uniformity wrt ParNewGeneration. from()->set_next_compaction_space(to()); gch->set_incremental_collection_failed(); // Inform the next generation that a promotion failure occurred. ! _old_gen->promotion_failure_occurred(); gc_tracer.report_promotion_failed(_promotion_failed_info); // Reset the PromotionFailureALot counters. NOT_PRODUCT(Universe::heap()->reset_promotion_should_fail();) }
*** 795,805 **** obj = (oop) to()->allocate_aligned(s); } // Otherwise try allocating obj tenured if (obj == NULL) { ! obj = _next_gen->promote(old, s); if (obj == NULL) { handle_promotion_failure(old); return old; } } else { --- 795,805 ---- obj = (oop) to()->allocate_aligned(s); } // Otherwise try allocating obj tenured if (obj == NULL) { ! obj = _old_gen->promote(old, s); if (obj == NULL) { handle_promotion_failure(old); return old; } } else {
*** 900,914 **** if (Verbose && PrintGCDetails) { gclog_or_tty->print(" :: to is not empty :: "); } return false; } ! if (_next_gen == NULL) { GenCollectedHeap* gch = GenCollectedHeap::heap(); ! _next_gen = gch->next_gen(this); } ! return _next_gen->promotion_attempt_is_safe(used()); } void DefNewGeneration::gc_epilogue(bool full) { DEBUG_ONLY(static bool seen_incremental_collection_failed = false;) --- 900,914 ---- if (Verbose && PrintGCDetails) { gclog_or_tty->print(" :: to is not empty :: "); } return false; } ! if (_old_gen == NULL) { GenCollectedHeap* gch = GenCollectedHeap::heap(); ! _old_gen = gch->old_gen(); } ! return _old_gen->promotion_attempt_is_safe(used()); } void DefNewGeneration::gc_epilogue(bool full) { DEBUG_ONLY(static bool seen_incremental_collection_failed = false;)
*** 1024,1053 **** // Moved from inline file as they are not called inline CompactibleSpace* DefNewGeneration::first_compaction_space() const { return eden(); } ! HeapWord* DefNewGeneration::allocate(size_t word_size, ! bool is_tlab) { // This is the slow-path allocation for the DefNewGeneration. // Most allocations are fast-path in compiled code. // We try to allocate from the eden. If that works, we are happy. // Note that since DefNewGeneration supports lock-free allocation, we // have to use it here, as well. HeapWord* result = eden()->par_allocate(word_size); if (result != NULL) { ! if (CMSEdenChunksRecordAlways && _next_gen != NULL) { ! _next_gen->sample_eden_chunk(); } return result; } do { HeapWord* old_limit = eden()->soft_end(); if (old_limit < eden()->end()) { ! // Tell the next generation we reached a limit. HeapWord* new_limit = ! next_gen()->allocation_limit_reached(eden(), eden()->top(), word_size); if (new_limit != NULL) { Atomic::cmpxchg_ptr(new_limit, eden()->soft_end_addr(), old_limit); } else { assert(eden()->soft_end() == eden()->end(), "invalid state after allocation_limit_reached returned null"); --- 1024,1052 ---- // Moved from inline file as they are not called inline CompactibleSpace* DefNewGeneration::first_compaction_space() const { return eden(); } ! HeapWord* DefNewGeneration::allocate(size_t word_size, bool is_tlab) { // This is the slow-path allocation for the DefNewGeneration. // Most allocations are fast-path in compiled code. // We try to allocate from the eden. If that works, we are happy. // Note that since DefNewGeneration supports lock-free allocation, we // have to use it here, as well. HeapWord* result = eden()->par_allocate(word_size); if (result != NULL) { ! if (CMSEdenChunksRecordAlways && _old_gen != NULL) { ! _old_gen->sample_eden_chunk(); } return result; } do { HeapWord* old_limit = eden()->soft_end(); if (old_limit < eden()->end()) { ! // Tell the old generation we reached a limit. HeapWord* new_limit = ! _old_gen->allocation_limit_reached(eden(), eden()->top(), word_size); if (new_limit != NULL) { Atomic::cmpxchg_ptr(new_limit, eden()->soft_end_addr(), old_limit); } else { assert(eden()->soft_end() == eden()->end(), "invalid state after allocation_limit_reached returned null");
*** 1066,1086 **** // out of heap space, and we try to allocate the from-space, too. // allocate_from_space can't be inlined because that would introduce a // circular dependency at compile time. if (result == NULL) { result = allocate_from_space(word_size); ! } else if (CMSEdenChunksRecordAlways && _next_gen != NULL) { ! _next_gen->sample_eden_chunk(); } return result; } HeapWord* DefNewGeneration::par_allocate(size_t word_size, bool is_tlab) { HeapWord* res = eden()->par_allocate(word_size); ! if (CMSEdenChunksRecordAlways && _next_gen != NULL) { ! _next_gen->sample_eden_chunk(); } return res; } void DefNewGeneration::gc_prologue(bool full) { --- 1065,1085 ---- // out of heap space, and we try to allocate the from-space, too. // allocate_from_space can't be inlined because that would introduce a // circular dependency at compile time. if (result == NULL) { result = allocate_from_space(word_size); ! } else if (CMSEdenChunksRecordAlways && _old_gen != NULL) { ! _old_gen->sample_eden_chunk(); } return result; } HeapWord* DefNewGeneration::par_allocate(size_t word_size, bool is_tlab) { HeapWord* res = eden()->par_allocate(word_size); ! if (CMSEdenChunksRecordAlways && _old_gen != NULL) { ! _old_gen->sample_eden_chunk(); } return res; } void DefNewGeneration::gc_prologue(bool full) {
src/share/vm/memory/defNewGeneration.cpp
Index Unified diffs Context diffs Sdiffs Patch New Old Previous File Next File