--- old/src/share/vm/memory/defNewGeneration.cpp 2014-10-17 15:27:15.000000000 +0200 +++ new/src/share/vm/memory/defNewGeneration.cpp 2014-10-17 15:27:15.000000000 +0200 @@ -230,7 +230,7 @@ compute_space_boundaries(0, SpaceDecorator::Clear, SpaceDecorator::Mangle); update_counters(); - _next_gen = NULL; + _old_gen = NULL; _tenuring_threshold = MaxTenuringThreshold; _pretenure_size_threshold_words = PretenureSizeThreshold >> LogHeapWordSize; @@ -387,8 +387,8 @@ assert(next_level < gch->_n_gens, "DefNewGeneration cannot be an oldest gen"); - Generation* next_gen = gch->get_gen(next_level); - size_t old_size = next_gen->capacity(); + Generation* old_gen = gch->old_gen(); + size_t old_size = old_gen->capacity(); size_t new_size_before = _virtual_space.committed_size(); size_t min_new_size = spec()->init_size(); size_t max_new_size = reserved().byte_size(); @@ -572,7 +572,7 @@ DefNewTracer gc_tracer; gc_tracer.report_gc_start(gch->gc_cause(), _gc_timer->gc_start()); - _next_gen = gch->next_gen(this); + _old_gen = gch->old_gen(); // If the next generation is too full to accommodate promotion // from this generation, pass on collection; let the next generation @@ -692,7 +692,7 @@ gch->set_incremental_collection_failed(); // Inform the next generation that a promotion failure occurred. - _next_gen->promotion_failure_occurred(); + _old_gen->promotion_failure_occurred(); gc_tracer.report_promotion_failed(_promotion_failed_info); // Reset the PromotionFailureALot counters. @@ -797,7 +797,7 @@ // Otherwise try allocating obj tenured if (obj == NULL) { - obj = _next_gen->promote(old, s); + obj = _old_gen->promote(old, s); if (obj == NULL) { handle_promotion_failure(old); return old; @@ -902,11 +902,11 @@ } return false; } - if (_next_gen == NULL) { + if (_old_gen == NULL) { GenCollectedHeap* gch = GenCollectedHeap::heap(); - _next_gen = gch->next_gen(this); + _old_gen = gch->old_gen(); } - return _next_gen->promotion_attempt_is_safe(used()); + return _old_gen->promotion_attempt_is_safe(used()); } void DefNewGeneration::gc_epilogue(bool full) { @@ -1026,8 +1026,7 @@ return eden(); } -HeapWord* DefNewGeneration::allocate(size_t word_size, - bool is_tlab) { +HeapWord* DefNewGeneration::allocate(size_t word_size, bool is_tlab) { // This is the slow-path allocation for the DefNewGeneration. // Most allocations are fast-path in compiled code. // We try to allocate from the eden. If that works, we are happy. @@ -1035,17 +1034,17 @@ // have to use it here, as well. HeapWord* result = eden()->par_allocate(word_size); if (result != NULL) { - if (CMSEdenChunksRecordAlways && _next_gen != NULL) { - _next_gen->sample_eden_chunk(); + if (CMSEdenChunksRecordAlways && _old_gen != NULL) { + _old_gen->sample_eden_chunk(); } return result; } do { HeapWord* old_limit = eden()->soft_end(); if (old_limit < eden()->end()) { - // Tell the next generation we reached a limit. + // Tell the old generation we reached a limit. HeapWord* new_limit = - next_gen()->allocation_limit_reached(eden(), eden()->top(), word_size); + _old_gen->allocation_limit_reached(eden(), eden()->top(), word_size); if (new_limit != NULL) { Atomic::cmpxchg_ptr(new_limit, eden()->soft_end_addr(), old_limit); } else { @@ -1068,8 +1067,8 @@ // circular dependency at compile time. if (result == NULL) { result = allocate_from_space(word_size); - } else if (CMSEdenChunksRecordAlways && _next_gen != NULL) { - _next_gen->sample_eden_chunk(); + } else if (CMSEdenChunksRecordAlways && _old_gen != NULL) { + _old_gen->sample_eden_chunk(); } return result; } @@ -1077,8 +1076,8 @@ HeapWord* DefNewGeneration::par_allocate(size_t word_size, bool is_tlab) { HeapWord* res = eden()->par_allocate(word_size); - if (CMSEdenChunksRecordAlways && _next_gen != NULL) { - _next_gen->sample_eden_chunk(); + if (CMSEdenChunksRecordAlways && _old_gen != NULL) { + _old_gen->sample_eden_chunk(); } return res; }