src/share/vm/memory/genCollectedHeap.cpp
Index Unified diffs Context diffs Sdiffs Patch New Old Previous File Next File
*** old/src/share/vm/memory/genCollectedHeap.cpp	Fri Oct 17 16:10:02 2014
--- new/src/share/vm/memory/genCollectedHeap.cpp	Fri Oct 17 16:10:02 2014

*** 116,130 **** --- 116,130 ---- set_barrier_set(rem_set()->bs()); _gch = this; ReservedSpace young_rs = heap_rs.first_part(gen_policy()->young_gen_spec()->max_size(), false, false); - _young_gen = gen_policy()->young_gen_spec()->init(young_rs, 0, rem_set()); heap_rs = heap_rs.last_part(gen_policy()->young_gen_spec()->max_size()); ReservedSpace old_rs = heap_rs.first_part(gen_policy()->old_gen_spec()->max_size(), false, false); - _old_gen = gen_policy()->old_gen_spec()->init(old_rs, 1, rem_set()); heap_rs = heap_rs.last_part(gen_policy()->old_gen_spec()->max_size()); clear_incremental_collection_failed(); #if INCLUDE_ALL_GCS
*** 204,219 **** --- 204,215 ---- size_t GenCollectedHeap::used() const { return _young_gen->used() + _old_gen->used(); } // Save the "used_region" for generations level and lower. void GenCollectedHeap::save_used_regions(int level) { assert(level < _gen_policy->number_of_generations(), "Illegal level parameter"); if (level == 1) { + void GenCollectedHeap::save_used_regions() { _old_gen->save_used_region(); } _young_gen->save_used_region(); } size_t GenCollectedHeap::max_capacity() const { return _young_gen->max_capacity() + _old_gen->max_capacity();
*** 331,342 **** --- 327,346 ---- // a previous collection will do mangling and will // change top of some spaces. record_gen_tops_before_GC(); if (PrintGC && Verbose) { + // I didn't want to change the logging when removing the level concept, + // but I guess this logging could say young/old or something instead of 0/1. + int level; + if (gen == GenCollectedHeap::heap()->young_gen()) { + level = 0; + } else { + level = 1; + } gclog_or_tty->print("level=%d invoke=%d size=" SIZE_FORMAT, ! gen->level(), gen->stat_record()->invocations, size * HeapWordSize); } if (run_verification && VerifyBeforeGC) {
*** 394,404 **** --- 398,408 ---- COMPILER2_PRESENT(DerivedPointerTable::update_pointers()); gen->stat_record()->accumulated_time.stop(); - update_gc_stats(gen->level(), full); if (run_verification && VerifyAfterGC) { HandleMark hm; // Discard invalid handles created during verification Universe::verify(" VerifyAfterGC:"); }
*** 411,432 **** --- 415,435 ---- void GenCollectedHeap::do_collection(bool full, bool clear_all_soft_refs, size_t size, bool is_tlab, ! int max_level) { ! Generation::Type max_generation) { ResourceMark rm; DEBUG_ONLY(Thread* my_thread = Thread::current();) assert(SafepointSynchronize::is_at_safepoint(), "should be at safepoint"); assert(my_thread->is_VM_thread() || my_thread->is_ConcurrentGC_thread(), "incorrect thread type capability"); assert(Heap_lock->is_locked(), "the requesting thread should have the Heap_lock"); guarantee(!is_gc_active(), "collection is not reentrant"); assert(max_level < n_gens(), "sanity check"); if (GC_locker::check_active_before_gc()) { return; // GC is disabled (e.g. JNI GetXXXCritical operation) }
*** 440,450 **** --- 443,453 ---- print_heap_before_gc(); { FlagSetting fl(_is_gc_active, true); ! bool complete = full && (max_level == (n_gens()-1)); ! bool complete = full && (max_generation == Generation::Old); const char* gc_cause_prefix = complete ? "Full GC" : "GC"; gclog_or_tty->date_stamp(PrintGC && PrintGCDateStamps); TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty); // The PrintGCDetails logging starts before we have incremented the GC id. We will do that later // so we can assume here that the next GC id is what we want.
*** 453,462 **** --- 456,466 ---- gc_prologue(complete); increment_total_collections(complete); size_t gch_prev_used = used(); bool must_restore_marks_for_biased_locking = false; + bool old_collected = false; bool run_verification = total_collections() >= VerifyGCStartAt; if (_young_gen->performs_in_place_marking() || _old_gen->performs_in_place_marking()) { // We want to avoid doing this for
*** 464,483 **** --- 468,486 ---- must_restore_marks_for_biased_locking = true; BiasedLocking::preserve_marks(); } bool prepared_for_verification = false; int max_level_collected = 0; if (!(full && _old_gen->full_collects_younger_generations()) && _young_gen->should_collect(full, size, is_tlab)) { if (run_verification && VerifyGCLevel <= 0 && VerifyBeforeGC) { prepare_for_verify(); prepared_for_verification = true; } collect_generation(_young_gen, full, size, is_tlab, run_verification && VerifyGCLevel <= 0, do_clear_all_soft_refs); } ! if (max_level == 1 && _old_gen->should_collect(full, size, is_tlab)) { ! if (max_generation == Generation::Old && _old_gen->should_collect(full, size, is_tlab)) { if (!complete) { // The full_collections increment was missed above. increment_total_full_collections(); } pre_full_gc_dump(NULL); // do any pre full gc dumps
*** 485,501 **** --- 488,504 ---- if (!prepared_for_verification) { prepare_for_verify(); } } collect_generation(_old_gen, full, size, is_tlab, run_verification && VerifyGCLevel <= 1, do_clear_all_soft_refs); ! max_level_collected = 1; ! old_collected = true; } // Update "complete" boolean wrt what actually transpired -- // for instance, a promotion failure could have led to // a whole heap collection. ! complete = complete || (max_level_collected == n_gens() - 1); ! complete = complete || old_collected; if (complete) { // We did a "major" collection // FIXME: See comment at pre_full_gc_dump call post_full_gc_dump(NULL); // do any post full gc dumps }
*** 508,518 **** --- 511,521 ---- MetaspaceAux::print_metaspace_change(metadata_prev_used); } } // Adjust generation sizes. ! if (max_level_collected == 1) { ! if (old_collected) { _old_gen->compute_new_size(); } _young_gen->compute_new_size(); if (complete) {
*** 549,559 **** --- 552,562 ---- SharedHeap::set_par_threads(t); _gen_process_roots_tasks->set_n_threads(t); } void GenCollectedHeap:: ! gen_process_roots(int level, ! gen_process_roots(Generation::Type type, bool younger_gens_as_roots, bool activate_scope, SharedHeap::ScanningOption so, OopsInGenClosure* not_older_gens, OopsInGenClosure* weak_roots,
*** 568,597 **** --- 571,600 ---- cld_closure, weak_cld_closure, code_closure); if (younger_gens_as_roots) { if (!_gen_process_roots_tasks->is_task_claimed(GCH_PS_younger_gens)) { ! if (level == 1) { ! if (type == Generation::Old) { not_older_gens->set_generation(_young_gen); _young_gen->oop_iterate(not_older_gens); } not_older_gens->reset_generation(); } } // When collection is parallel, all threads get to cooperate to do ! // older-gen scanning. ! if (level == 0) { ! // old generation scanning. ! if (type == Generation::Young) { older_gens->set_generation(_old_gen); rem_set()->younger_refs_iterate(_old_gen, older_gens); older_gens->reset_generation(); } _gen_process_roots_tasks->all_tasks_completed(); } void GenCollectedHeap:: ! gen_process_roots(int level, ! gen_process_roots(Generation::Type type, bool younger_gens_as_roots, bool activate_scope, SharedHeap::ScanningOption so, bool only_strong_roots, OopsInGenClosure* not_older_gens,
*** 599,617 **** --- 602,620 ---- CLDClosure* cld_closure) { const bool is_adjust_phase = !only_strong_roots && !younger_gens_as_roots; bool is_moving_collection = false; ! if (level == 0 || is_adjust_phase) { ! if (type == Generation::Young || is_adjust_phase) { // young collections are always moving is_moving_collection = true; } MarkingCodeBlobClosure mark_code_closure(not_older_gens, is_moving_collection); CodeBlobClosure* code_closure = &mark_code_closure; ! gen_process_roots(level, ! gen_process_roots(type, younger_gens_as_roots, activate_scope, so, not_older_gens, only_strong_roots ? NULL : not_older_gens, older_gens, cld_closure, only_strong_roots ? NULL : cld_closure,
*** 626,639 **** --- 629,642 ---- _old_gen->ref_processor()->weak_oops_do(root_closure); } #define GCH_SINCE_SAVE_MARKS_ITERATE_DEFN(OopClosureType, nv_suffix) \ void GenCollectedHeap:: \ ! oop_since_save_marks_iterate(int level, \ ! oop_since_save_marks_iterate(Generation::Type gen, \ OopClosureType* cur, \ OopClosureType* older) { \ ! if (level == 0) { \ ! if (gen == Generation::Young) { \ _young_gen->oop_since_save_marks_iterate##nv_suffix(cur); \ _old_gen->oop_since_save_marks_iterate##nv_suffix(older); \ } else { \ _old_gen->oop_since_save_marks_iterate##nv_suffix(cur); \ } \
*** 641,656 **** --- 644,656 ---- ALL_SINCE_SAVE_MARKS_CLOSURES(GCH_SINCE_SAVE_MARKS_ITERATE_DEFN) #undef GCH_SINCE_SAVE_MARKS_ITERATE_DEFN ! bool GenCollectedHeap::no_allocs_since_save_marks(int level) { if (level == 0) { ! if (!_young_gen->no_allocs_since_save_marks()) return false; } if (!_old_gen->no_allocs_since_save_marks()) return false; return true; ! bool GenCollectedHeap::no_allocs_since_save_marks(bool include_young) { + return include_young && _young_gen->no_allocs_since_save_marks() || ! _old_gen->no_allocs_since_save_marks(); } bool GenCollectedHeap::supports_inline_contig_alloc() const { return _young_gen->supports_inline_contig_alloc(); }
*** 673,723 **** --- 673,723 ---- #else // INCLUDE_ALL_GCS ShouldNotReachHere(); #endif // INCLUDE_ALL_GCS } else if (cause == GCCause::_wb_young_gc) { // minor collection for WhiteBox API ! collect(cause, 0); ! collect(cause, Generation::Young); } else { #ifdef ASSERT if (cause == GCCause::_scavenge_alot) { // minor collection only ! collect(cause, 0); ! collect(cause, Generation::Young); } else { // Stop-the-world full collection ! collect(cause, n_gens() - 1); ! collect(cause, Generation::Old); } #else // Stop-the-world full collection ! collect(cause, n_gens() - 1); ! collect(cause, Generation::Old); #endif } } ! void GenCollectedHeap::collect(GCCause::Cause cause, int max_level) { ! void GenCollectedHeap::collect(GCCause::Cause cause, Generation::Type max_gen) { // The caller doesn't have the Heap_lock assert(!Heap_lock->owned_by_self(), "this thread should not own the Heap_lock"); MutexLocker ml(Heap_lock); ! collect_locked(cause, max_level); ! collect_locked(cause, max_gen); } void GenCollectedHeap::collect_locked(GCCause::Cause cause) { // The caller has the Heap_lock assert(Heap_lock->owned_by_self(), "this thread should own the Heap_lock"); ! collect_locked(cause, n_gens() - 1); ! collect_locked(cause, Generation::Old); } // this is the private collection interface // The Heap_lock is expected to be held on entry. ! void GenCollectedHeap::collect_locked(GCCause::Cause cause, int max_level) { ! void GenCollectedHeap::collect_locked(GCCause::Cause cause, Generation::Type max_generation) { // Read the GC count while holding the Heap_lock unsigned int gc_count_before = total_collections(); unsigned int full_gc_count_before = total_full_collections(); { MutexUnlocker mu(Heap_lock); // give up heap lock, execute gets it back VM_GenCollectFull op(gc_count_before, full_gc_count_before, ! cause, max_level); ! cause, max_generation); VMThread::execute(&op); } } #if INCLUDE_ALL_GCS
*** 756,798 **** --- 756,798 ---- } } #endif // INCLUDE_ALL_GCS void GenCollectedHeap::do_full_collection(bool clear_all_soft_refs) { ! do_full_collection(clear_all_soft_refs, _gen_policy->number_of_generations() - 1); ! do_full_collection(clear_all_soft_refs, Generation::Old); } void GenCollectedHeap::do_full_collection(bool clear_all_soft_refs, ! int max_level) { ! int local_max_level; ! Generation::Type max_gen) { ! Generation::Type local_max_gen; if (!incremental_collection_will_fail(false /* don't consult_young */) && gc_cause() == GCCause::_gc_locker) { ! local_max_level = 0; ! local_max_gen = Generation::Young; } else { ! local_max_level = max_level; ! local_max_gen = max_gen; } do_collection(true /* full */, clear_all_soft_refs /* clear_all_soft_refs */, 0 /* size */, false /* is_tlab */, ! local_max_level /* max_level */); ! local_max_gen /* max_gen */); // Hack XXX FIX ME !!! // A scavenge may not have been attempted, or may have // been attempted and failed, because the old gen was too full ! if (local_max_level == 0 && gc_cause() == GCCause::_gc_locker && ! if (local_max_gen == Generation::Young && gc_cause() == GCCause::_gc_locker && incremental_collection_will_fail(false /* don't consult_young */)) { if (PrintGCDetails) { gclog_or_tty->print_cr("GC locker: Trying a full collection " "because scavenge failed"); } // This time allow the old gen to be collected as well do_collection(true /* full */, clear_all_soft_refs /* clear_all_soft_refs */, 0 /* size */, false /* is_tlab */, ! n_gens() - 1 /* max_level */); ! Generation::Old /* max_gen */); } } bool GenCollectedHeap::is_in_young(oop p) { bool result = ((HeapWord*)p) < _old_gen->reserved().start();
*** 1196,1206 **** --- 1196,1206 ---- } oop GenCollectedHeap::handle_failed_promotion(Generation* old_gen, oop obj, size_t obj_size) { ! guarantee(old_gen->level() == 1, "We only get here with an old generation"); ! guarantee(old_gen == _old_gen, "We only get here with an old generation"); assert(obj_size == (size_t)obj->size(), "bad obj_size passed in"); HeapWord* result = NULL; result = old_gen->expand_and_allocate(obj_size, false);

src/share/vm/memory/genCollectedHeap.cpp
Index Unified diffs Context diffs Sdiffs Patch New Old Previous File Next File