< prev index next >

src/share/vm/gc/shared/genCollectedHeap.cpp

Print this page
rev 8393 : 8077842: Remove the level parameter passed around in GenCollectedHeap
Reviewed-by:

*** 125,139 **** _rem_set = collector_policy()->create_rem_set(reserved_region()); set_barrier_set(rem_set()->bs()); ReservedSpace young_rs = heap_rs.first_part(gen_policy()->young_gen_spec()->max_size(), false, false); ! _young_gen = gen_policy()->young_gen_spec()->init(young_rs, 0, rem_set()); heap_rs = heap_rs.last_part(gen_policy()->young_gen_spec()->max_size()); ReservedSpace old_rs = heap_rs.first_part(gen_policy()->old_gen_spec()->max_size(), false, false); ! _old_gen = gen_policy()->old_gen_spec()->init(old_rs, 1, rem_set()); clear_incremental_collection_failed(); #if INCLUDE_ALL_GCS // If we are running CMS, create the collector responsible // for collecting the CMS generations. --- 125,139 ---- _rem_set = collector_policy()->create_rem_set(reserved_region()); set_barrier_set(rem_set()->bs()); ReservedSpace young_rs = heap_rs.first_part(gen_policy()->young_gen_spec()->max_size(), false, false); ! _young_gen = gen_policy()->young_gen_spec()->init(young_rs, rem_set()); heap_rs = heap_rs.last_part(gen_policy()->young_gen_spec()->max_size()); ReservedSpace old_rs = heap_rs.first_part(gen_policy()->old_gen_spec()->max_size(), false, false); ! _old_gen = gen_policy()->old_gen_spec()->init(old_rs, rem_set()); clear_incremental_collection_failed(); #if INCLUDE_ALL_GCS // If we are running CMS, create the collector responsible // for collecting the CMS generations.
*** 200,215 **** size_t GenCollectedHeap::used() const { return _young_gen->used() + _old_gen->used(); } ! // Save the "used_region" for generations level and lower. ! void GenCollectedHeap::save_used_regions(int level) { ! assert(level == 0 || level == 1, "Illegal level parameter"); ! if (level == 1) { ! _old_gen->save_used_region(); ! } _young_gen->save_used_region(); } size_t GenCollectedHeap::max_capacity() const { return _young_gen->max_capacity() + _old_gen->max_capacity(); --- 200,211 ---- size_t GenCollectedHeap::used() const { return _young_gen->used() + _old_gen->used(); } ! void GenCollectedHeap::save_used_regions() { ! _old_gen->save_used_region(); _young_gen->save_used_region(); } size_t GenCollectedHeap::max_capacity() const { return _young_gen->max_capacity() + _old_gen->max_capacity();
*** 328,339 **** // a previous collection will do mangling and will // change top of some spaces. record_gen_tops_before_GC(); if (PrintGC && Verbose) { ! gclog_or_tty->print("level=%d invoke=%d size=" SIZE_FORMAT, ! gen->level(), gen->stat_record()->invocations, size * HeapWordSize); } if (run_verification && VerifyBeforeGC) { --- 324,343 ---- // a previous collection will do mangling and will // change top of some spaces. record_gen_tops_before_GC(); if (PrintGC && Verbose) { ! // I didn't want to change the logging when removing the level concept, ! // but I guess this logging could say young/old or something instead of 0/1. ! uint level; ! if (gen == GenCollectedHeap::heap()->young_gen()) { ! level = 0; ! } else { ! level = 1; ! } ! gclog_or_tty->print("level=%u invoke=%d size=" SIZE_FORMAT, ! level, gen->stat_record()->invocations, size * HeapWordSize); } if (run_verification && VerifyBeforeGC) {
*** 390,400 **** COMPILER2_PRESENT(DerivedPointerTable::update_pointers()); gen->stat_record()->accumulated_time.stop(); ! update_gc_stats(gen->level(), full); if (run_verification && VerifyAfterGC) { HandleMark hm; // Discard invalid handles created during verification Universe::verify(" VerifyAfterGC:"); } --- 394,404 ---- COMPILER2_PRESENT(DerivedPointerTable::update_pointers()); gen->stat_record()->accumulated_time.stop(); ! update_gc_stats(gen, full); if (run_verification && VerifyAfterGC) { HandleMark hm; // Discard invalid handles created during verification Universe::verify(" VerifyAfterGC:"); }
*** 403,417 **** gclog_or_tty->print(":"); gen->print_heap_change(prev_used); } } ! void GenCollectedHeap::do_collection(bool full, ! bool clear_all_soft_refs, ! size_t size, ! bool is_tlab, ! int max_level) { ResourceMark rm; DEBUG_ONLY(Thread* my_thread = Thread::current();) assert(SafepointSynchronize::is_at_safepoint(), "should be at safepoint"); assert(my_thread->is_VM_thread() || --- 407,421 ---- gclog_or_tty->print(":"); gen->print_heap_change(prev_used); } } ! void GenCollectedHeap::do_collection(bool full, ! bool clear_all_soft_refs, ! size_t size, ! bool is_tlab, ! Generation::Type max_generation) { ResourceMark rm; DEBUG_ONLY(Thread* my_thread = Thread::current();) assert(SafepointSynchronize::is_at_safepoint(), "should be at safepoint"); assert(my_thread->is_VM_thread() ||
*** 435,445 **** print_heap_before_gc(); { FlagSetting fl(_is_gc_active, true); ! bool complete = full && (max_level == 1 /* old */); const char* gc_cause_prefix = complete ? "Full GC" : "GC"; TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty); // The PrintGCDetails logging starts before we have incremented the GC id. We will do that later // so we can assume here that the next GC id is what we want. GCTraceTime t(GCCauseString(gc_cause_prefix, gc_cause()), PrintGCDetails, false, NULL, GCId::peek()); --- 439,449 ---- print_heap_before_gc(); { FlagSetting fl(_is_gc_active, true); ! bool complete = full && (max_generation == Generation::Old); const char* gc_cause_prefix = complete ? "Full GC" : "GC"; TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty); // The PrintGCDetails logging starts before we have incremented the GC id. We will do that later // so we can assume here that the next GC id is what we want. GCTraceTime t(GCCauseString(gc_cause_prefix, gc_cause()), PrintGCDetails, false, NULL, GCId::peek());
*** 449,461 **** size_t gch_prev_used = used(); bool run_verification = total_collections() >= VerifyGCStartAt; bool prepared_for_verification = false; ! int max_level_collected = 0; ! bool old_collects_young = (max_level == 1) && ! full && _old_gen->full_collects_younger_generations(); if (!old_collects_young && _young_gen->should_collect(full, size, is_tlab)) { if (run_verification && VerifyGCLevel <= 0 && VerifyBeforeGC) { prepare_for_verify(); --- 453,464 ---- size_t gch_prev_used = used(); bool run_verification = total_collections() >= VerifyGCStartAt; bool prepared_for_verification = false; ! bool collected_old = false; ! bool old_collects_young = complete && _old_gen->full_collects_younger_generations(); if (!old_collects_young && _young_gen->should_collect(full, size, is_tlab)) { if (run_verification && VerifyGCLevel <= 0 && VerifyBeforeGC) { prepare_for_verify();
*** 478,488 **** } } bool must_restore_marks_for_biased_locking = false; ! if (max_level == 1 && _old_gen->should_collect(full, size, is_tlab)) { if (!complete) { // The full_collections increment was missed above. increment_total_full_collections(); } --- 481,491 ---- } } bool must_restore_marks_for_biased_locking = false; ! if (max_generation == Generation::Old && _old_gen->should_collect(full, size, is_tlab)) { if (!complete) { // The full_collections increment was missed above. increment_total_full_collections(); }
*** 501,517 **** run_verification && VerifyGCLevel <= 1, do_clear_all_soft_refs, true); must_restore_marks_for_biased_locking = true; ! max_level_collected = 1; } // Update "complete" boolean wrt what actually transpired -- // for instance, a promotion failure could have led to // a whole heap collection. ! complete = complete || (max_level_collected == 1 /* old */); if (complete) { // We did a "major" collection // FIXME: See comment at pre_full_gc_dump call post_full_gc_dump(NULL); // do any post full gc dumps } --- 504,520 ---- run_verification && VerifyGCLevel <= 1, do_clear_all_soft_refs, true); must_restore_marks_for_biased_locking = true; ! collected_old = true; } // Update "complete" boolean wrt what actually transpired -- // for instance, a promotion failure could have led to // a whole heap collection. ! complete = complete || collected_old; if (complete) { // We did a "major" collection // FIXME: See comment at pre_full_gc_dump call post_full_gc_dump(NULL); // do any post full gc dumps }
*** 524,534 **** MetaspaceAux::print_metaspace_change(metadata_prev_used); } } // Adjust generation sizes. ! if (max_level_collected == 1 /* old */) { _old_gen->compute_new_size(); } _young_gen->compute_new_size(); if (complete) { --- 527,537 ---- MetaspaceAux::print_metaspace_change(metadata_prev_used); } } // Adjust generation sizes. ! if (collected_old) { _old_gen->compute_new_size(); } _young_gen->compute_new_size(); if (complete) {
*** 667,688 **** DEBUG_ONLY(CodeCache::asserted_non_scavengable_nmethods_do(&assert_code_is_non_scavengable)); } } ! void GenCollectedHeap::gen_process_roots(int level, bool younger_gens_as_roots, bool activate_scope, ScanningOption so, bool only_strong_roots, OopsInGenClosure* not_older_gens, OopsInGenClosure* older_gens, CLDClosure* cld_closure) { const bool is_adjust_phase = !only_strong_roots && !younger_gens_as_roots; bool is_moving_collection = false; ! if (level == 0 || is_adjust_phase) { // young collections are always moving is_moving_collection = true; } MarkingCodeBlobClosure mark_code_closure(not_older_gens, is_moving_collection); --- 670,691 ---- DEBUG_ONLY(CodeCache::asserted_non_scavengable_nmethods_do(&assert_code_is_non_scavengable)); } } ! void GenCollectedHeap::gen_process_roots(Generation::Type type, bool younger_gens_as_roots, bool activate_scope, ScanningOption so, bool only_strong_roots, OopsInGenClosure* not_older_gens, OopsInGenClosure* older_gens, CLDClosure* cld_closure) { const bool is_adjust_phase = !only_strong_roots && !younger_gens_as_roots; bool is_moving_collection = false; ! if (type == Generation::Young || is_adjust_phase) { // young collections are always moving is_moving_collection = true; } MarkingCodeBlobClosure mark_code_closure(not_older_gens, is_moving_collection);
*** 694,713 **** cld_closure, weak_cld_closure, &mark_code_closure); if (younger_gens_as_roots) { if (!_process_strong_tasks->is_task_claimed(GCH_PS_younger_gens)) { ! if (level == 1) { not_older_gens->set_generation(_young_gen); _young_gen->oop_iterate(not_older_gens); } not_older_gens->reset_generation(); } } // When collection is parallel, all threads get to cooperate to do ! // older-gen scanning. ! if (level == 0) { older_gens->set_generation(_old_gen); rem_set()->younger_refs_iterate(_old_gen, older_gens); older_gens->reset_generation(); } --- 697,716 ---- cld_closure, weak_cld_closure, &mark_code_closure); if (younger_gens_as_roots) { if (!_process_strong_tasks->is_task_claimed(GCH_PS_younger_gens)) { ! if (type == Generation::Old) { not_older_gens->set_generation(_young_gen); _young_gen->oop_iterate(not_older_gens); } not_older_gens->reset_generation(); } } // When collection is parallel, all threads get to cooperate to do ! // old generation scanning. ! if (type == Generation::Young) { older_gens->set_generation(_old_gen); rem_set()->younger_refs_iterate(_old_gen, older_gens); older_gens->reset_generation(); }
*** 727,740 **** _old_gen->ref_processor()->weak_oops_do(root_closure); } #define GCH_SINCE_SAVE_MARKS_ITERATE_DEFN(OopClosureType, nv_suffix) \ void GenCollectedHeap:: \ ! oop_since_save_marks_iterate(int level, \ OopClosureType* cur, \ OopClosureType* older) { \ ! if (level == 0) { \ _young_gen->oop_since_save_marks_iterate##nv_suffix(cur); \ _old_gen->oop_since_save_marks_iterate##nv_suffix(older); \ } else { \ _old_gen->oop_since_save_marks_iterate##nv_suffix(cur); \ } \ --- 730,743 ---- _old_gen->ref_processor()->weak_oops_do(root_closure); } #define GCH_SINCE_SAVE_MARKS_ITERATE_DEFN(OopClosureType, nv_suffix) \ void GenCollectedHeap:: \ ! oop_since_save_marks_iterate(Generation::Type gen, \ OopClosureType* cur, \ OopClosureType* older) { \ ! if (gen == Generation::Young) { \ _young_gen->oop_since_save_marks_iterate##nv_suffix(cur); \ _old_gen->oop_since_save_marks_iterate##nv_suffix(older); \ } else { \ _old_gen->oop_since_save_marks_iterate##nv_suffix(cur); \ } \
*** 742,753 **** ALL_SINCE_SAVE_MARKS_CLOSURES(GCH_SINCE_SAVE_MARKS_ITERATE_DEFN) #undef GCH_SINCE_SAVE_MARKS_ITERATE_DEFN ! bool GenCollectedHeap::no_allocs_since_save_marks(int level) { ! if (level == 0 && !_young_gen->no_allocs_since_save_marks()) { return false; } return _old_gen->no_allocs_since_save_marks(); } --- 745,756 ---- ALL_SINCE_SAVE_MARKS_CLOSURES(GCH_SINCE_SAVE_MARKS_ITERATE_DEFN) #undef GCH_SINCE_SAVE_MARKS_ITERATE_DEFN ! bool GenCollectedHeap::no_allocs_since_save_marks(bool include_young) { ! if (include_young && !_young_gen->no_allocs_since_save_marks()) { return false; } return _old_gen->no_allocs_since_save_marks(); }
*** 773,823 **** #else // INCLUDE_ALL_GCS ShouldNotReachHere(); #endif // INCLUDE_ALL_GCS } else if (cause == GCCause::_wb_young_gc) { // minor collection for WhiteBox API ! collect(cause, 0 /* young */); } else { #ifdef ASSERT if (cause == GCCause::_scavenge_alot) { // minor collection only ! collect(cause, 0 /* young */); } else { // Stop-the-world full collection ! collect(cause, 1 /* old */); } #else // Stop-the-world full collection ! collect(cause, 1 /* old */); #endif } } ! void GenCollectedHeap::collect(GCCause::Cause cause, int max_level) { // The caller doesn't have the Heap_lock assert(!Heap_lock->owned_by_self(), "this thread should not own the Heap_lock"); MutexLocker ml(Heap_lock); ! collect_locked(cause, max_level); } void GenCollectedHeap::collect_locked(GCCause::Cause cause) { // The caller has the Heap_lock assert(Heap_lock->owned_by_self(), "this thread should own the Heap_lock"); ! collect_locked(cause, 1 /* old */); } // this is the private collection interface // The Heap_lock is expected to be held on entry. ! void GenCollectedHeap::collect_locked(GCCause::Cause cause, int max_level) { // Read the GC count while holding the Heap_lock unsigned int gc_count_before = total_collections(); unsigned int full_gc_count_before = total_full_collections(); { MutexUnlocker mu(Heap_lock); // give up heap lock, execute gets it back VM_GenCollectFull op(gc_count_before, full_gc_count_before, ! cause, max_level); VMThread::execute(&op); } } #if INCLUDE_ALL_GCS --- 776,826 ---- #else // INCLUDE_ALL_GCS ShouldNotReachHere(); #endif // INCLUDE_ALL_GCS } else if (cause == GCCause::_wb_young_gc) { // minor collection for WhiteBox API ! collect(cause, Generation::Young); } else { #ifdef ASSERT if (cause == GCCause::_scavenge_alot) { // minor collection only ! collect(cause, Generation::Young); } else { // Stop-the-world full collection ! collect(cause, Generation::Old); } #else // Stop-the-world full collection ! collect(cause, Generation::Old); #endif } } ! void GenCollectedHeap::collect(GCCause::Cause cause, Generation::Type max_generation) { // The caller doesn't have the Heap_lock assert(!Heap_lock->owned_by_self(), "this thread should not own the Heap_lock"); MutexLocker ml(Heap_lock); ! collect_locked(cause, max_generation); } void GenCollectedHeap::collect_locked(GCCause::Cause cause) { // The caller has the Heap_lock assert(Heap_lock->owned_by_self(), "this thread should own the Heap_lock"); ! collect_locked(cause, Generation::Old); } // this is the private collection interface // The Heap_lock is expected to be held on entry. ! void GenCollectedHeap::collect_locked(GCCause::Cause cause, Generation::Type max_generation) { // Read the GC count while holding the Heap_lock unsigned int gc_count_before = total_collections(); unsigned int full_gc_count_before = total_full_collections(); { MutexUnlocker mu(Heap_lock); // give up heap lock, execute gets it back VM_GenCollectFull op(gc_count_before, full_gc_count_before, ! cause, max_generation); VMThread::execute(&op); } } #if INCLUDE_ALL_GCS
*** 856,898 **** } } #endif // INCLUDE_ALL_GCS void GenCollectedHeap::do_full_collection(bool clear_all_soft_refs) { ! do_full_collection(clear_all_soft_refs, 1 /* old */); } void GenCollectedHeap::do_full_collection(bool clear_all_soft_refs, ! int max_level) { ! int local_max_level; if (!incremental_collection_will_fail(false /* don't consult_young */) && gc_cause() == GCCause::_gc_locker) { ! local_max_level = 0; } else { ! local_max_level = max_level; } ! do_collection(true /* full */, ! clear_all_soft_refs /* clear_all_soft_refs */, ! 0 /* size */, ! false /* is_tlab */, ! local_max_level /* max_level */); // Hack XXX FIX ME !!! // A scavenge may not have been attempted, or may have // been attempted and failed, because the old gen was too full ! if (local_max_level == 0 && gc_cause() == GCCause::_gc_locker && incremental_collection_will_fail(false /* don't consult_young */)) { if (PrintGCDetails) { gclog_or_tty->print_cr("GC locker: Trying a full collection " "because scavenge failed"); } // This time allow the old gen to be collected as well do_collection(true /* full */, clear_all_soft_refs /* clear_all_soft_refs */, 0 /* size */, false /* is_tlab */, ! 1 /* old */ /* max_level */); } } bool GenCollectedHeap::is_in_young(oop p) { bool result = ((HeapWord*)p) < _old_gen->reserved().start(); --- 859,901 ---- } } #endif // INCLUDE_ALL_GCS void GenCollectedHeap::do_full_collection(bool clear_all_soft_refs) { ! do_full_collection(clear_all_soft_refs, Generation::Old); } void GenCollectedHeap::do_full_collection(bool clear_all_soft_refs, ! Generation::Type last_generation) { ! Generation::Type local_last_generation; if (!incremental_collection_will_fail(false /* don't consult_young */) && gc_cause() == GCCause::_gc_locker) { ! local_last_generation = Generation::Young; } else { ! local_last_generation = last_generation; } ! do_collection(true /* full */, ! clear_all_soft_refs /* clear_all_soft_refs */, ! 0 /* size */, ! false /* is_tlab */, ! local_last_generation /* last_generation */); // Hack XXX FIX ME !!! // A scavenge may not have been attempted, or may have // been attempted and failed, because the old gen was too full ! if (local_last_generation == Generation::Young && gc_cause() == GCCause::_gc_locker && incremental_collection_will_fail(false /* don't consult_young */)) { if (PrintGCDetails) { gclog_or_tty->print_cr("GC locker: Trying a full collection " "because scavenge failed"); } // This time allow the old gen to be collected as well do_collection(true /* full */, clear_all_soft_refs /* clear_all_soft_refs */, 0 /* size */, false /* is_tlab */, ! Generation::Old /* last_generation */); } } bool GenCollectedHeap::is_in_young(oop p) { bool result = ((HeapWord*)p) < _old_gen->reserved().start();
*** 1111,1126 **** CompactPoint cp(_old_gen); _old_gen->prepare_for_compaction(&cp); _young_gen->prepare_for_compaction(&cp); } ! GCStats* GenCollectedHeap::gc_stats(int level) const { ! if (level == 0) { ! return _young_gen->gc_stats(); ! } else { ! return _old_gen->gc_stats(); ! } } void GenCollectedHeap::verify(bool silent, VerifyOption option /* ignored */) { if (!silent) { gclog_or_tty->print("%s", _old_gen->name()); --- 1114,1125 ---- CompactPoint cp(_old_gen); _old_gen->prepare_for_compaction(&cp); _young_gen->prepare_for_compaction(&cp); } ! GCStats* GenCollectedHeap::gc_stats(Generation* gen) const { ! return gen->gc_stats(); } void GenCollectedHeap::verify(bool silent, VerifyOption option /* ignored */) { if (!silent) { gclog_or_tty->print("%s", _old_gen->name());
*** 1286,1296 **** } oop GenCollectedHeap::handle_failed_promotion(Generation* old_gen, oop obj, size_t obj_size) { ! guarantee(old_gen->level() == 1, "We only get here with an old generation"); assert(obj_size == (size_t)obj->size(), "bad obj_size passed in"); HeapWord* result = NULL; result = old_gen->expand_and_allocate(obj_size, false); --- 1285,1295 ---- } oop GenCollectedHeap::handle_failed_promotion(Generation* old_gen, oop obj, size_t obj_size) { ! guarantee(old_gen == _old_gen, "We only get here with an old generation"); assert(obj_size == (size_t)obj->size(), "bad obj_size passed in"); HeapWord* result = NULL; result = old_gen->expand_and_allocate(obj_size, false);
< prev index next >