--- old/src/share/vm/memory/genCollectedHeap.cpp 2014-10-17 16:28:42.000000000 +0200 +++ new/src/share/vm/memory/genCollectedHeap.cpp 2014-10-17 16:28:42.000000000 +0200 @@ -85,9 +85,6 @@ jint GenCollectedHeap::initialize() { CollectedHeap::pre_initialize(); - int i; - _n_gens = gen_policy()->number_of_generations(); - // While there are no constraints in the GC code that HeapWordSize // be any particular value, there are multiple other areas in the // system which believe this to be true (e.g. oop->object_size in some @@ -95,16 +92,6 @@ // HeapWordSize). guarantee(HeapWordSize == wordSize, "HeapWordSize must equal wordSize"); - // The heap must be at least as aligned as generations. - size_t gen_alignment = Generation::GenGrain; - - _gen_specs = gen_policy()->generations(); - - // Make sure the sizes are all aligned. - for (i = 0; i < _n_gens; i++) { - _gen_specs[i]->align(gen_alignment); - } - // Allocate space for the heap. char* heap_address; @@ -130,11 +117,14 @@ _gch = this; - for (i = 0; i < _n_gens; i++) { - ReservedSpace this_rs = heap_rs.first_part(_gen_specs[i]->max_size(), false, false); - _gens[i] = _gen_specs[i]->init(this_rs, i, rem_set()); - heap_rs = heap_rs.last_part(_gen_specs[i]->max_size()); - } + ReservedSpace young_rs = heap_rs.first_part(gen_policy()->young_gen_spec()->max_size(), false, false); + _young_gen = gen_policy()->young_gen_spec()->init(young_rs, rem_set()); + heap_rs = heap_rs.last_part(gen_policy()->young_gen_spec()->max_size()); + + ReservedSpace old_rs = heap_rs.first_part(gen_policy()->old_gen_spec()->max_size(), false, false); + _old_gen = gen_policy()->old_gen_spec()->init(old_rs, rem_set()); + heap_rs = heap_rs.last_part(gen_policy()->old_gen_spec()->max_size()); + clear_incremental_collection_failed(); #if INCLUDE_ALL_GCS @@ -149,7 +139,6 @@ return JNI_OK; } - char* GenCollectedHeap::allocate(size_t alignment, size_t* _total_reserved, int* _n_covered_regions, @@ -158,24 +147,21 @@ "the maximum representable size"; // Now figure out the total size. - size_t total_reserved = 0; - int n_covered_regions = 0; - const size_t pageSize = UseLargePages ? - os::large_page_size() : os::vm_page_size(); - + const size_t pageSize = UseLargePages ? os::large_page_size() : os::vm_page_size(); assert(alignment % pageSize == 0, "Must be"); - for (int i = 0; i < _n_gens; i++) { - total_reserved += _gen_specs[i]->max_size(); - if (total_reserved < _gen_specs[i]->max_size()) { - vm_exit_during_initialization(overflow_msg); - } - n_covered_regions += _gen_specs[i]->n_covered_regions(); + size_t total_reserved = gen_policy()->young_gen_spec()->max_size() + + gen_policy()->old_gen_spec()->max_size(); + if (total_reserved < gen_policy()->young_gen_spec()->max_size() || + total_reserved < gen_policy()->old_gen_spec()->max_size()) { + vm_exit_during_initialization(overflow_msg); } assert(total_reserved % alignment == 0, err_msg("Gen size; total_reserved=" SIZE_FORMAT ", alignment=" SIZE_FORMAT, total_reserved, alignment)); + int n_covered_regions = 2; // Young + Old + // Needed until the cardtable is fixed to have the right number // of covered regions. n_covered_regions += 2; @@ -187,64 +173,46 @@ return heap_rs->base(); } - void GenCollectedHeap::post_initialize() { SharedHeap::post_initialize(); GenCollectorPolicy *policy = (GenCollectorPolicy *)collector_policy(); guarantee(policy->is_generation_policy(), "Illegal policy type"); - DefNewGeneration* def_new_gen = (DefNewGeneration*) get_gen(0); + DefNewGeneration* def_new_gen = (DefNewGeneration*) _young_gen; assert(def_new_gen->kind() == Generation::DefNew || def_new_gen->kind() == Generation::ParNew, "Wrong generation kind"); - Generation* old_gen = get_gen(1); - assert(old_gen->kind() == Generation::ConcurrentMarkSweep || - old_gen->kind() == Generation::MarkSweepCompact, + assert(_old_gen->kind() == Generation::ConcurrentMarkSweep || + _old_gen->kind() == Generation::MarkSweepCompact, "Wrong generation kind"); policy->initialize_size_policy(def_new_gen->eden()->capacity(), - old_gen->capacity(), + _old_gen->capacity(), def_new_gen->from()->capacity()); policy->initialize_gc_policy_counters(); } void GenCollectedHeap::ref_processing_init() { SharedHeap::ref_processing_init(); - for (int i = 0; i < _n_gens; i++) { - _gens[i]->ref_processor_init(); - } + _young_gen->ref_processor_init(); + _old_gen->ref_processor_init(); } size_t GenCollectedHeap::capacity() const { - size_t res = 0; - for (int i = 0; i < _n_gens; i++) { - res += _gens[i]->capacity(); - } - return res; + return _young_gen->capacity() + _old_gen->capacity(); } size_t GenCollectedHeap::used() const { - size_t res = 0; - for (int i = 0; i < _n_gens; i++) { - res += _gens[i]->used(); - } - return res; + return _young_gen->used() + _old_gen->used(); } -// Save the "used_region" for generations level and lower. -void GenCollectedHeap::save_used_regions(int level) { - assert(level < _n_gens, "Illegal level parameter"); - for (int i = level; i >= 0; i--) { - _gens[i]->save_used_region(); - } +void GenCollectedHeap::save_used_regions() { + _old_gen->save_used_region(); + _young_gen->save_used_region(); } size_t GenCollectedHeap::max_capacity() const { - size_t res = 0; - for (int i = 0; i < _n_gens; i++) { - res += _gens[i]->max_capacity(); - } - return res; + return _young_gen->max_capacity() + _old_gen->max_capacity(); } // Update the _full_collections_completed counter @@ -308,16 +276,20 @@ HeapWord* GenCollectedHeap::attempt_allocation(size_t size, bool is_tlab, bool first_only) { - HeapWord* res; - for (int i = 0; i < _n_gens; i++) { - if (_gens[i]->should_allocate(size, is_tlab)) { - res = _gens[i]->allocate(size, is_tlab); - if (res != NULL) return res; - else if (first_only) break; + HeapWord* res = NULL; + + if (_young_gen->should_allocate(size, is_tlab)) { + res = _young_gen->allocate(size, is_tlab); + if (res != NULL || first_only) { + return res; } } - // Otherwise... - return NULL; + + if (_old_gen->should_allocate(size, is_tlab)) { + res = _old_gen->allocate(size, is_tlab); + } + + return res; } HeapWord* GenCollectedHeap::mem_allocate(size_t size, @@ -337,12 +309,115 @@ (cause == GCCause::_java_lang_system_gc && ExplicitGCInvokesConcurrent)); } -void GenCollectedHeap::do_collection(bool full, - bool clear_all_soft_refs, - size_t size, - bool is_tlab, - int max_level) { - bool prepared_for_verification = false; +void GenCollectedHeap::collect_generation(Generation* gen, bool full, size_t size, + bool is_tlab, bool run_verification, bool clear_soft_refs) { + // Timer for individual generations. Last argument is false: no CR + // FIXME: We should try to start the timing earlier to cover more of the GC pause + // The PrintGCDetails logging starts before we have incremented the GC id. We will do that later + // so we can assume here that the next GC id is what we want. + GCTraceTime t1(gen->short_name(), PrintGCDetails, false, NULL, GCId::peek()); + TraceCollectorStats tcs(gen->counters()); + TraceMemoryManagerStats tmms(gen->kind(),gc_cause()); + + size_t prev_used = gen->used(); + gen->stat_record()->invocations++; + gen->stat_record()->accumulated_time.start(); + + // Must be done anew before each collection because + // a previous collection will do mangling and will + // change top of some spaces. + record_gen_tops_before_GC(); + + if (PrintGC && Verbose) { + // I didn't want to change the logging when removing the level concept, + // but I guess this logging could say young/old or something instead of 0/1. + int level; + if (gen == GenCollectedHeap::heap()->young_gen()) { + level = 0; + } else { + level = 1; + } + gclog_or_tty->print("level=%d invoke=%d size=" SIZE_FORMAT, + level, + gen->stat_record()->invocations, + size * HeapWordSize); + } + + if (run_verification && VerifyBeforeGC) { + HandleMark hm; // Discard invalid handles created during verification + Universe::verify(" VerifyBeforeGC:"); + } + COMPILER2_PRESENT(DerivedPointerTable::clear()); + + // Do collection work + { + // Note on ref discovery: For what appear to be historical reasons, + // GCH enables and disabled (by enqueing) refs discovery. + // In the future this should be moved into the generation's + // collect method so that ref discovery and enqueueing concerns + // are local to a generation. The collect method could return + // an appropriate indication in the case that notification on + // the ref lock was needed. This will make the treatment of + // weak refs more uniform (and indeed remove such concerns + // from GCH). XXX + + HandleMark hm; // Discard invalid handles created during gc + save_marks(); // save marks for all gens + // We want to discover references, but not process them yet. + // This mode is disabled in process_discovered_references if the + // generation does some collection work, or in + // enqueue_discovered_references if the generation returns + // without doing any work. + ReferenceProcessor* rp = gen->ref_processor(); + // If the discovery of ("weak") refs in this generation is + // atomic wrt other collectors in this configuration, we + // are guaranteed to have empty discovered ref lists. + if (rp->discovery_is_atomic()) { + rp->enable_discovery(true /*verify_disabled*/, true /*verify_no_refs*/); + rp->setup_policy(clear_soft_refs); + } else { + // collect() below will enable discovery as appropriate + } + gen->collect(full, clear_soft_refs, size, is_tlab); + if (!rp->enqueuing_is_done()) { + rp->enqueue_discovered_references(); + } else { + rp->set_enqueuing_is_done(false); + } + rp->verify_no_references_recorded(); + } + + // Determine if allocation request was met. + if (size > 0) { + if (!is_tlab || gen->supports_tlab_allocation()) { + if (size * HeapWordSize <= gen->unsafe_max_alloc_nogc()) { + size = 0; + } + } + } + + COMPILER2_PRESENT(DerivedPointerTable::update_pointers()); + + gen->stat_record()->accumulated_time.stop(); + + update_gc_stats(gen, full); + + if (run_verification && VerifyAfterGC) { + HandleMark hm; // Discard invalid handles created during verification + Universe::verify(" VerifyAfterGC:"); + } + + if (PrintGCDetails) { + gclog_or_tty->print(":"); + gen->print_heap_change(prev_used); + } +} + +void GenCollectedHeap::do_collection(bool full, + bool clear_all_soft_refs, + size_t size, + bool is_tlab, + Generation::Type max_generation) { ResourceMark rm; DEBUG_ONLY(Thread* my_thread = Thread::current();) @@ -353,7 +428,6 @@ assert(Heap_lock->is_locked(), "the requesting thread should have the Heap_lock"); guarantee(!is_gc_active(), "collection is not reentrant"); - assert(max_level < n_gens(), "sanity check"); if (GC_locker::check_active_before_gc()) { return; // GC is disabled (e.g. JNI GetXXXCritical operation) @@ -371,7 +445,7 @@ { FlagSetting fl(_is_gc_active, true); - bool complete = full && (max_level == (n_gens()-1)); + bool complete = full && (max_generation == Generation::Old); const char* gc_cause_prefix = complete ? "Full GC" : "GC"; gclog_or_tty->date_stamp(PrintGC && PrintGCDateStamps); TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty); @@ -383,147 +457,46 @@ increment_total_collections(complete); size_t gch_prev_used = used(); + bool must_restore_marks_for_biased_locking = false; + bool old_collected = false; + bool run_verification = total_collections() >= VerifyGCStartAt; - int starting_level = 0; - if (full) { - // Search for the oldest generation which will collect all younger - // generations, and start collection loop there. - for (int i = max_level; i >= 0; i--) { - if (_gens[i]->full_collects_younger_generations()) { - starting_level = i; - break; - } + if (_young_gen->performs_in_place_marking() || + _old_gen->performs_in_place_marking()) { + // We want to avoid doing this for + // scavenge-only collections where it's unnecessary. + must_restore_marks_for_biased_locking = true; + BiasedLocking::preserve_marks(); + } + + bool prepared_for_verification = false; + if (!(full && _old_gen->full_collects_younger_generations()) && + _young_gen->should_collect(full, size, is_tlab)) { + if (run_verification && VerifyGCLevel <= 0 && VerifyBeforeGC) { + prepare_for_verify(); + prepared_for_verification = true; } + collect_generation(_young_gen, full, size, is_tlab, run_verification && VerifyGCLevel <= 0, do_clear_all_soft_refs); } - - bool must_restore_marks_for_biased_locking = false; - - int max_level_collected = starting_level; - for (int i = starting_level; i <= max_level; i++) { - if (_gens[i]->should_collect(full, size, is_tlab)) { - if (i == n_gens() - 1) { // a major collection is to happen - if (!complete) { - // The full_collections increment was missed above. - increment_total_full_collections(); - } - pre_full_gc_dump(NULL); // do any pre full gc dumps - } - // Timer for individual generations. Last argument is false: no CR - // FIXME: We should try to start the timing earlier to cover more of the GC pause - // The PrintGCDetails logging starts before we have incremented the GC id. We will do that later - // so we can assume here that the next GC id is what we want. - GCTraceTime t1(_gens[i]->short_name(), PrintGCDetails, false, NULL, GCId::peek()); - TraceCollectorStats tcs(_gens[i]->counters()); - TraceMemoryManagerStats tmms(_gens[i]->kind(),gc_cause()); - - size_t prev_used = _gens[i]->used(); - _gens[i]->stat_record()->invocations++; - _gens[i]->stat_record()->accumulated_time.start(); - - // Must be done anew before each collection because - // a previous collection will do mangling and will - // change top of some spaces. - record_gen_tops_before_GC(); - - if (PrintGC && Verbose) { - gclog_or_tty->print("level=%d invoke=%d size=" SIZE_FORMAT, - i, - _gens[i]->stat_record()->invocations, - size*HeapWordSize); - } - - if (VerifyBeforeGC && i >= VerifyGCLevel && - total_collections() >= VerifyGCStartAt) { - HandleMark hm; // Discard invalid handles created during verification - if (!prepared_for_verification) { - prepare_for_verify(); - prepared_for_verification = true; - } - Universe::verify(" VerifyBeforeGC:"); - } - COMPILER2_PRESENT(DerivedPointerTable::clear()); - - if (!must_restore_marks_for_biased_locking && - _gens[i]->performs_in_place_marking()) { - // We perform this mark word preservation work lazily - // because it's only at this point that we know whether we - // absolutely have to do it; we want to avoid doing it for - // scavenge-only collections where it's unnecessary - must_restore_marks_for_biased_locking = true; - BiasedLocking::preserve_marks(); - } - - // Do collection work - { - // Note on ref discovery: For what appear to be historical reasons, - // GCH enables and disabled (by enqueing) refs discovery. - // In the future this should be moved into the generation's - // collect method so that ref discovery and enqueueing concerns - // are local to a generation. The collect method could return - // an appropriate indication in the case that notification on - // the ref lock was needed. This will make the treatment of - // weak refs more uniform (and indeed remove such concerns - // from GCH). XXX - - HandleMark hm; // Discard invalid handles created during gc - save_marks(); // save marks for all gens - // We want to discover references, but not process them yet. - // This mode is disabled in process_discovered_references if the - // generation does some collection work, or in - // enqueue_discovered_references if the generation returns - // without doing any work. - ReferenceProcessor* rp = _gens[i]->ref_processor(); - // If the discovery of ("weak") refs in this generation is - // atomic wrt other collectors in this configuration, we - // are guaranteed to have empty discovered ref lists. - if (rp->discovery_is_atomic()) { - rp->enable_discovery(true /*verify_disabled*/, true /*verify_no_refs*/); - rp->setup_policy(do_clear_all_soft_refs); - } else { - // collect() below will enable discovery as appropriate - } - _gens[i]->collect(full, do_clear_all_soft_refs, size, is_tlab); - if (!rp->enqueuing_is_done()) { - rp->enqueue_discovered_references(); - } else { - rp->set_enqueuing_is_done(false); - } - rp->verify_no_references_recorded(); - } - max_level_collected = i; - - // Determine if allocation request was met. - if (size > 0) { - if (!is_tlab || _gens[i]->supports_tlab_allocation()) { - if (size*HeapWordSize <= _gens[i]->unsafe_max_alloc_nogc()) { - size = 0; - } - } - } - - COMPILER2_PRESENT(DerivedPointerTable::update_pointers()); - - _gens[i]->stat_record()->accumulated_time.stop(); - - update_gc_stats(i, full); - - if (VerifyAfterGC && i >= VerifyGCLevel && - total_collections() >= VerifyGCStartAt) { - HandleMark hm; // Discard invalid handles created during verification - Universe::verify(" VerifyAfterGC:"); - } - - if (PrintGCDetails) { - gclog_or_tty->print(":"); - _gens[i]->print_heap_change(prev_used); + if (max_generation == Generation::Old && _old_gen->should_collect(full, size, is_tlab)) { + if (!complete) { + // The full_collections increment was missed above. + increment_total_full_collections(); + } + pre_full_gc_dump(NULL); // do any pre full gc dumps + if (run_verification && VerifyGCLevel <= 1 && VerifyBeforeGC) { + if (!prepared_for_verification) { + prepare_for_verify(); } } + collect_generation(_old_gen, full, size, is_tlab, run_verification && VerifyGCLevel <= 1, do_clear_all_soft_refs); + old_collected = true; } // Update "complete" boolean wrt what actually transpired -- // for instance, a promotion failure could have led to // a whole heap collection. - complete = complete || (max_level_collected == n_gens() - 1); + complete = complete || old_collected; if (complete) { // We did a "major" collection // FIXME: See comment at pre_full_gc_dump call @@ -539,10 +512,11 @@ } } - for (int j = max_level_collected; j >= 0; j -= 1) { - // Adjust generation sizes. - _gens[j]->compute_new_size(); + // Adjust generation sizes. + if (old_collected) { + _old_gen->compute_new_size(); } + _young_gen->compute_new_size(); if (complete) { // Delete metaspaces for unloaded class loaders and clean up loader_data graph @@ -580,7 +554,7 @@ } void GenCollectedHeap:: -gen_process_roots(int level, +gen_process_roots(Generation::Type type, bool younger_gens_as_roots, bool activate_scope, SharedHeap::ScanningOption so, @@ -599,18 +573,18 @@ if (younger_gens_as_roots) { if (!_gen_process_roots_tasks->is_task_claimed(GCH_PS_younger_gens)) { - for (int i = 0; i < level; i++) { - not_older_gens->set_generation(_gens[i]); - _gens[i]->oop_iterate(not_older_gens); + if (type == Generation::Old) { + not_older_gens->set_generation(_young_gen); + _young_gen->oop_iterate(not_older_gens); } not_older_gens->reset_generation(); } } // When collection is parallel, all threads get to cooperate to do - // older-gen scanning. - for (int i = level+1; i < _n_gens; i++) { - older_gens->set_generation(_gens[i]); - rem_set()->younger_refs_iterate(_gens[i], older_gens); + // old generation scanning. + if (type == Generation::Young) { + older_gens->set_generation(_old_gen); + rem_set()->younger_refs_iterate(_old_gen, older_gens); older_gens->reset_generation(); } @@ -618,7 +592,7 @@ } void GenCollectedHeap:: -gen_process_roots(int level, +gen_process_roots(Generation::Type type, bool younger_gens_as_roots, bool activate_scope, SharedHeap::ScanningOption so, @@ -630,7 +604,7 @@ const bool is_adjust_phase = !only_strong_roots && !younger_gens_as_roots; bool is_moving_collection = false; - if (level == 0 || is_adjust_phase) { + if (type == Generation::Young || is_adjust_phase) { // young collections are always moving is_moving_collection = true; } @@ -638,7 +612,7 @@ MarkingCodeBlobClosure mark_code_closure(not_older_gens, is_moving_collection); CodeBlobClosure* code_closure = &mark_code_closure; - gen_process_roots(level, + gen_process_roots(type, younger_gens_as_roots, activate_scope, so, not_older_gens, only_strong_roots ? NULL : not_older_gens, @@ -651,19 +625,20 @@ void GenCollectedHeap::gen_process_weak_roots(OopClosure* root_closure) { SharedHeap::process_weak_roots(root_closure); // "Local" "weak" refs - for (int i = 0; i < _n_gens; i++) { - _gens[i]->ref_processor()->weak_oops_do(root_closure); - } + _young_gen->ref_processor()->weak_oops_do(root_closure); + _old_gen->ref_processor()->weak_oops_do(root_closure); } #define GCH_SINCE_SAVE_MARKS_ITERATE_DEFN(OopClosureType, nv_suffix) \ void GenCollectedHeap:: \ -oop_since_save_marks_iterate(int level, \ +oop_since_save_marks_iterate(Generation::Type gen, \ OopClosureType* cur, \ OopClosureType* older) { \ - _gens[level]->oop_since_save_marks_iterate##nv_suffix(cur); \ - for (int i = level+1; i < n_gens(); i++) { \ - _gens[i]->oop_since_save_marks_iterate##nv_suffix(older); \ + if (gen == Generation::Young) { \ + _young_gen->oop_since_save_marks_iterate##nv_suffix(cur); \ + _old_gen->oop_since_save_marks_iterate##nv_suffix(older); \ + } else { \ + _old_gen->oop_since_save_marks_iterate##nv_suffix(cur); \ } \ } @@ -671,23 +646,21 @@ #undef GCH_SINCE_SAVE_MARKS_ITERATE_DEFN -bool GenCollectedHeap::no_allocs_since_save_marks(int level) { - for (int i = level; i < _n_gens; i++) { - if (!_gens[i]->no_allocs_since_save_marks()) return false; - } - return true; +bool GenCollectedHeap::no_allocs_since_save_marks(bool include_young) { + return include_young && _young_gen->no_allocs_since_save_marks() || + _old_gen->no_allocs_since_save_marks(); } bool GenCollectedHeap::supports_inline_contig_alloc() const { - return _gens[0]->supports_inline_contig_alloc(); + return _young_gen->supports_inline_contig_alloc(); } HeapWord** GenCollectedHeap::top_addr() const { - return _gens[0]->top_addr(); + return _young_gen->top_addr(); } HeapWord** GenCollectedHeap::end_addr() const { - return _gens[0]->end_addr(); + return _young_gen->end_addr(); } // public collection interfaces @@ -702,47 +675,47 @@ #endif // INCLUDE_ALL_GCS } else if (cause == GCCause::_wb_young_gc) { // minor collection for WhiteBox API - collect(cause, 0); + collect(cause, Generation::Young); } else { #ifdef ASSERT - if (cause == GCCause::_scavenge_alot) { - // minor collection only - collect(cause, 0); - } else { - // Stop-the-world full collection - collect(cause, n_gens() - 1); - } + if (cause == GCCause::_scavenge_alot) { + // minor collection only + collect(cause, Generation::Young); + } else { + // Stop-the-world full collection + collect(cause, Generation::Old); + } #else // Stop-the-world full collection - collect(cause, n_gens() - 1); + collect(cause, Generation::Old); #endif } } -void GenCollectedHeap::collect(GCCause::Cause cause, int max_level) { +void GenCollectedHeap::collect(GCCause::Cause cause, Generation::Type max_gen) { // The caller doesn't have the Heap_lock assert(!Heap_lock->owned_by_self(), "this thread should not own the Heap_lock"); MutexLocker ml(Heap_lock); - collect_locked(cause, max_level); + collect_locked(cause, max_gen); } void GenCollectedHeap::collect_locked(GCCause::Cause cause) { // The caller has the Heap_lock assert(Heap_lock->owned_by_self(), "this thread should own the Heap_lock"); - collect_locked(cause, n_gens() - 1); + collect_locked(cause, Generation::Old); } // this is the private collection interface // The Heap_lock is expected to be held on entry. -void GenCollectedHeap::collect_locked(GCCause::Cause cause, int max_level) { +void GenCollectedHeap::collect_locked(GCCause::Cause cause, Generation::Type max_generation) { // Read the GC count while holding the Heap_lock unsigned int gc_count_before = total_collections(); unsigned int full_gc_count_before = total_full_collections(); { MutexUnlocker mu(Heap_lock); // give up heap lock, execute gets it back VM_GenCollectFull op(gc_count_before, full_gc_count_before, - cause, max_level); + cause, max_generation); VMThread::execute(&op); } } @@ -750,12 +723,12 @@ #if INCLUDE_ALL_GCS bool GenCollectedHeap::create_cms_collector() { - assert(_gens[1]->kind() == Generation::ConcurrentMarkSweep, + assert(_old_gen->kind() == Generation::ConcurrentMarkSweep, "Unexpected generation kinds"); // Skip two header words in the block content verification NOT_PRODUCT(_skip_header_HeapWords = CMSCollector::skip_header_HeapWords();) CMSCollector* collector = new CMSCollector( - (ConcurrentMarkSweepGeneration*)_gens[1], + (ConcurrentMarkSweepGeneration*)_old_gen, _rem_set->as_CardTableRS(), (ConcurrentMarkSweepPolicy*) collector_policy()); @@ -785,28 +758,28 @@ #endif // INCLUDE_ALL_GCS void GenCollectedHeap::do_full_collection(bool clear_all_soft_refs) { - do_full_collection(clear_all_soft_refs, _n_gens - 1); + do_full_collection(clear_all_soft_refs, Generation::Old); } void GenCollectedHeap::do_full_collection(bool clear_all_soft_refs, - int max_level) { - int local_max_level; + Generation::Type max_gen) { + Generation::Type local_max_gen; if (!incremental_collection_will_fail(false /* don't consult_young */) && gc_cause() == GCCause::_gc_locker) { - local_max_level = 0; + local_max_gen = Generation::Young; } else { - local_max_level = max_level; + local_max_gen = max_gen; } do_collection(true /* full */, clear_all_soft_refs /* clear_all_soft_refs */, 0 /* size */, false /* is_tlab */, - local_max_level /* max_level */); + local_max_gen /* max_gen */); // Hack XXX FIX ME !!! // A scavenge may not have been attempted, or may have // been attempted and failed, because the old gen was too full - if (local_max_level == 0 && gc_cause() == GCCause::_gc_locker && + if (local_max_gen == Generation::Young && gc_cause() == GCCause::_gc_locker && incremental_collection_will_fail(false /* don't consult_young */)) { if (PrintGCDetails) { gclog_or_tty->print_cr("GC locker: Trying a full collection " @@ -817,13 +790,13 @@ clear_all_soft_refs /* clear_all_soft_refs */, 0 /* size */, false /* is_tlab */, - n_gens() - 1 /* max_level */); + Generation::Old /* max_gen */); } } bool GenCollectedHeap::is_in_young(oop p) { - bool result = ((HeapWord*)p) < _gens[_n_gens - 1]->reserved().start(); - assert(result == _gens[0]->is_in_reserved(p), + bool result = ((HeapWord*)p) < _old_gen->reserved().start(); + assert(result == _young_gen->is_in_reserved(p), err_msg("incorrect test - result=%d, p=" INTPTR_FORMAT, result, p2i((void*)p))); return result; } @@ -843,8 +816,8 @@ #endif // This might be sped up with a cache of the last generation that // answered yes. - for (int i = 0; i < _n_gens; i++) { - if (_gens[i]->is_in(p)) return true; + if (_young_gen->is_in(p) || _old_gen->is_in(p)) { + return true; } // Otherwise... return false; @@ -856,114 +829,97 @@ bool GenCollectedHeap::is_in_partial_collection(const void* p) { assert(is_in_reserved(p) || p == NULL, "Does not work if address is non-null and outside of the heap"); - return p < _gens[_n_gens - 2]->reserved().end() && p != NULL; + return p < _young_gen->reserved().end() && p != NULL; } #endif void GenCollectedHeap::oop_iterate(ExtendedOopClosure* cl) { - for (int i = 0; i < _n_gens; i++) { - _gens[i]->oop_iterate(cl); - } + _young_gen->oop_iterate(cl); + _old_gen->oop_iterate(cl); } void GenCollectedHeap::object_iterate(ObjectClosure* cl) { - for (int i = 0; i < _n_gens; i++) { - _gens[i]->object_iterate(cl); - } + _young_gen->object_iterate(cl); + _old_gen->object_iterate(cl); } void GenCollectedHeap::safe_object_iterate(ObjectClosure* cl) { - for (int i = 0; i < _n_gens; i++) { - _gens[i]->safe_object_iterate(cl); - } + _young_gen->safe_object_iterate(cl); + _old_gen->safe_object_iterate(cl); } Space* GenCollectedHeap::space_containing(const void* addr) const { - for (int i = 0; i < _n_gens; i++) { - Space* res = _gens[i]->space_containing(addr); - if (res != NULL) return res; + Space* res = _young_gen->space_containing(addr); + if (res != NULL) { + return res; } - // Otherwise... - assert(false, "Could not find containing space"); - return NULL; + res = _old_gen->space_containing(addr); + assert(res != NULL, "Could not find containing space"); + return res; } - HeapWord* GenCollectedHeap::block_start(const void* addr) const { assert(is_in_reserved(addr), "block_start of address outside of heap"); - for (int i = 0; i < _n_gens; i++) { - if (_gens[i]->is_in_reserved(addr)) { - assert(_gens[i]->is_in(addr), - "addr should be in allocated part of generation"); - return _gens[i]->block_start(addr); - } + if (_young_gen->is_in_reserved(addr)) { + assert(_young_gen->is_in(addr), "addr should be in allocated part of generation"); + return _young_gen->block_start(addr); } - assert(false, "Some generation should contain the address"); - return NULL; + + assert(_old_gen->is_in_reserved(addr), "Some generation should contain the address"); + assert(_old_gen->is_in(addr), "addr should be in allocated part of generation"); + return _old_gen->block_start(addr); } size_t GenCollectedHeap::block_size(const HeapWord* addr) const { assert(is_in_reserved(addr), "block_size of address outside of heap"); - for (int i = 0; i < _n_gens; i++) { - if (_gens[i]->is_in_reserved(addr)) { - assert(_gens[i]->is_in(addr), - "addr should be in allocated part of generation"); - return _gens[i]->block_size(addr); - } + if (_young_gen->is_in_reserved(addr)) { + assert(_young_gen->is_in(addr), "addr should be in allocated part of generation"); + return _young_gen->block_size(addr); } - assert(false, "Some generation should contain the address"); - return 0; + + assert(_old_gen->is_in_reserved(addr), "Some generation should contain the address"); + assert(_old_gen->is_in(addr), "addr should be in allocated part of generation"); + return _old_gen->block_size(addr); } bool GenCollectedHeap::block_is_obj(const HeapWord* addr) const { assert(is_in_reserved(addr), "block_is_obj of address outside of heap"); assert(block_start(addr) == addr, "addr must be a block start"); - for (int i = 0; i < _n_gens; i++) { - if (_gens[i]->is_in_reserved(addr)) { - return _gens[i]->block_is_obj(addr); - } + if (_young_gen->is_in_reserved(addr)) { + return _young_gen->block_is_obj(addr); } - assert(false, "Some generation should contain the address"); - return false; + + assert(_old_gen->is_in_reserved(addr), "Some generation should contain the address"); + return _old_gen->block_is_obj(addr); } bool GenCollectedHeap::supports_tlab_allocation() const { - for (int i = 0; i < _n_gens; i += 1) { - if (_gens[i]->supports_tlab_allocation()) { - return true; - } - } - return false; + assert(!_old_gen->supports_tlab_allocation(), "Old gen supports TLAB allocation?!"); + return _young_gen->supports_tlab_allocation(); } size_t GenCollectedHeap::tlab_capacity(Thread* thr) const { - size_t result = 0; - for (int i = 0; i < _n_gens; i += 1) { - if (_gens[i]->supports_tlab_allocation()) { - result += _gens[i]->tlab_capacity(); - } + assert(!_old_gen->supports_tlab_allocation(), "Old gen supports TLAB allocation?!"); + if (_young_gen->supports_tlab_allocation()) { + return _young_gen->tlab_capacity(); } - return result; + return 0; } size_t GenCollectedHeap::tlab_used(Thread* thr) const { - size_t result = 0; - for (int i = 0; i < _n_gens; i += 1) { - if (_gens[i]->supports_tlab_allocation()) { - result += _gens[i]->tlab_used(); - } + assert(!_old_gen->supports_tlab_allocation(), "Old gen supports TLAB allocation?!"); + if (_young_gen->supports_tlab_allocation()) { + return _young_gen->tlab_used(); } - return result; + return 0; } size_t GenCollectedHeap::unsafe_max_tlab_alloc(Thread* thr) const { - size_t result = 0; - for (int i = 0; i < _n_gens; i += 1) { - if (_gens[i]->supports_tlab_allocation()) { - result += _gens[i]->unsafe_max_tlab_alloc(); - } + assert(!_old_gen->supports_tlab_allocation(), "Old gen supports TLAB allocation?!"); + if (_young_gen->supports_tlab_allocation()) { + return _young_gen->unsafe_max_tlab_alloc(); } - return result; + return 0; } HeapWord* GenCollectedHeap::allocate_new_tlab(size_t size) { @@ -1012,17 +968,15 @@ ScratchBlock* GenCollectedHeap::gather_scratch(Generation* requestor, size_t max_alloc_words) { ScratchBlock* res = NULL; - for (int i = 0; i < _n_gens; i++) { - _gens[i]->contribute_scratch(res, requestor, max_alloc_words); - } + _young_gen->contribute_scratch(res, requestor, max_alloc_words); + _old_gen->contribute_scratch(res, requestor, max_alloc_words); sort_scratch_list(res); return res; } void GenCollectedHeap::release_scratch() { - for (int i = 0; i < _n_gens; i++) { - _gens[i]->reset_scratch(); - } + _young_gen->reset_scratch(); + _old_gen->reset_scratch(); } class GenPrepareForVerifyClosure: public GenCollectedHeap::GenClosure { @@ -1037,39 +991,29 @@ generation_iterate(&blk, false); } - void GenCollectedHeap::generation_iterate(GenClosure* cl, bool old_to_young) { if (old_to_young) { - for (int i = _n_gens-1; i >= 0; i--) { - cl->do_generation(_gens[i]); - } + cl->do_generation(_old_gen); + cl->do_generation(_young_gen); } else { - for (int i = 0; i < _n_gens; i++) { - cl->do_generation(_gens[i]); - } + cl->do_generation(_young_gen); + cl->do_generation(_old_gen); } } void GenCollectedHeap::space_iterate(SpaceClosure* cl) { - for (int i = 0; i < _n_gens; i++) { - _gens[i]->space_iterate(cl, true); - } + _young_gen->space_iterate(cl, true); + _old_gen->space_iterate(cl, true); } bool GenCollectedHeap::is_maximal_no_gc() const { - for (int i = 0; i < _n_gens; i++) { - if (!_gens[i]->is_maximal_no_gc()) { - return false; - } - } - return true; + return _young_gen->is_maximal_no_gc() && _old_gen->is_maximal_no_gc(); } void GenCollectedHeap::save_marks() { - for (int i = 0; i < _n_gens; i++) { - _gens[i]->save_marks(); - } + _young_gen->save_marks(); + _old_gen->save_marks(); } GenCollectedHeap* GenCollectedHeap::heap() { @@ -1078,30 +1022,26 @@ return _gch; } - void GenCollectedHeap::prepare_for_compaction() { - guarantee(_n_gens = 2, "Wrong number of generations"); - Generation* old_gen = _gens[1]; // Start by compacting into same gen. - CompactPoint cp(old_gen); - old_gen->prepare_for_compaction(&cp); - Generation* young_gen = _gens[0]; - young_gen->prepare_for_compaction(&cp); -} - -GCStats* GenCollectedHeap::gc_stats(int level) const { - return _gens[level]->gc_stats(); + CompactPoint cp(_old_gen); + _old_gen->prepare_for_compaction(&cp); + _young_gen->prepare_for_compaction(&cp); } void GenCollectedHeap::verify(bool silent, VerifyOption option /* ignored */) { - for (int i = _n_gens-1; i >= 0; i--) { - Generation* g = _gens[i]; - if (!silent) { - gclog_or_tty->print("%s", g->name()); - gclog_or_tty->print(" "); - } - g->verify(); + if (!silent) { + gclog_or_tty->print("%s", _old_gen->name()); + gclog_or_tty->print(" "); } + _old_gen->verify(); + + if (!silent) { + gclog_or_tty->print("%s", _young_gen->name()); + gclog_or_tty->print(" "); + } + _young_gen->verify(); + if (!silent) { gclog_or_tty->print("remset "); } @@ -1109,9 +1049,8 @@ } void GenCollectedHeap::print_on(outputStream* st) const { - for (int i = 0; i < _n_gens; i++) { - _gens[i]->print_on(st); - } + _young_gen->print_on(st); + _old_gen->print_on(st); MetaspaceAux::print_on(st); } @@ -1150,10 +1089,10 @@ void GenCollectedHeap::print_tracing_info() const { if (TraceYoungGenTime) { - get_gen(0)->print_summary_info(); + _young_gen->print_summary_info(); } if (TraceOldGenTime) { - get_gen(1)->print_summary_info(); + _old_gen->print_summary_info(); } } @@ -1259,7 +1198,7 @@ oop GenCollectedHeap::handle_failed_promotion(Generation* old_gen, oop obj, size_t obj_size) { - guarantee(old_gen->level() == 1, "We only get here with an old generation"); + guarantee(old_gen == _old_gen, "We only get here with an old generation"); assert(obj_size == (size_t)obj->size(), "bad obj_size passed in"); HeapWord* result = NULL;