src/share/vm/memory/genCollectedHeap.cpp
Index Unified diffs Context diffs Sdiffs Patch New Old Previous File Next File hotspot Cdiff src/share/vm/memory/genCollectedHeap.cpp

src/share/vm/memory/genCollectedHeap.cpp

Print this page
rev 7211 : [mq]: remove_ngen
rev 7212 : [mq]: remove_get_gen
rev 7213 : imported patch move_genspecs
rev 7214 : imported patch remove_n_gen
rev 7215 : imported patch remove_levels

*** 83,112 **** } jint GenCollectedHeap::initialize() { CollectedHeap::pre_initialize(); - int i; - _n_gens = gen_policy()->number_of_generations(); - // While there are no constraints in the GC code that HeapWordSize // be any particular value, there are multiple other areas in the // system which believe this to be true (e.g. oop->object_size in some // cases incorrectly returns the size in wordSize units rather than // HeapWordSize). guarantee(HeapWordSize == wordSize, "HeapWordSize must equal wordSize"); - // The heap must be at least as aligned as generations. - size_t gen_alignment = Generation::GenGrain; - - _gen_specs = gen_policy()->generations(); - - // Make sure the sizes are all aligned. - for (i = 0; i < _n_gens; i++) { - _gen_specs[i]->align(gen_alignment); - } - // Allocate space for the heap. char* heap_address; size_t total_reserved = 0; int n_covered_regions = 0; --- 83,99 ----
*** 128,142 **** _rem_set = collector_policy()->create_rem_set(reserved_region(), n_covered_regions); set_barrier_set(rem_set()->bs()); _gch = this; ! for (i = 0; i < _n_gens; i++) { ! ReservedSpace this_rs = heap_rs.first_part(_gen_specs[i]->max_size(), false, false); ! _gens[i] = _gen_specs[i]->init(this_rs, i, rem_set()); ! heap_rs = heap_rs.last_part(_gen_specs[i]->max_size()); ! } clear_incremental_collection_failed(); #if INCLUDE_ALL_GCS // If we are running CMS, create the collector responsible // for collecting the CMS generations. --- 115,132 ---- _rem_set = collector_policy()->create_rem_set(reserved_region(), n_covered_regions); set_barrier_set(rem_set()->bs()); _gch = this; ! ReservedSpace young_rs = heap_rs.first_part(gen_policy()->young_gen_spec()->max_size(), false, false); ! _young_gen = gen_policy()->young_gen_spec()->init(young_rs, rem_set()); ! heap_rs = heap_rs.last_part(gen_policy()->young_gen_spec()->max_size()); ! ! ReservedSpace old_rs = heap_rs.first_part(gen_policy()->old_gen_spec()->max_size(), false, false); ! _old_gen = gen_policy()->old_gen_spec()->init(old_rs, rem_set()); ! heap_rs = heap_rs.last_part(gen_policy()->old_gen_spec()->max_size()); ! clear_incremental_collection_failed(); #if INCLUDE_ALL_GCS // If we are running CMS, create the collector responsible // for collecting the CMS generations.
*** 147,183 **** #endif // INCLUDE_ALL_GCS return JNI_OK; } - char* GenCollectedHeap::allocate(size_t alignment, size_t* _total_reserved, int* _n_covered_regions, ReservedSpace* heap_rs){ const char overflow_msg[] = "The size of the object heap + VM data exceeds " "the maximum representable size"; // Now figure out the total size. ! size_t total_reserved = 0; ! int n_covered_regions = 0; ! const size_t pageSize = UseLargePages ? ! os::large_page_size() : os::vm_page_size(); ! assert(alignment % pageSize == 0, "Must be"); ! for (int i = 0; i < _n_gens; i++) { ! total_reserved += _gen_specs[i]->max_size(); ! if (total_reserved < _gen_specs[i]->max_size()) { vm_exit_during_initialization(overflow_msg); } - n_covered_regions += _gen_specs[i]->n_covered_regions(); - } assert(total_reserved % alignment == 0, err_msg("Gen size; total_reserved=" SIZE_FORMAT ", alignment=" SIZE_FORMAT, total_reserved, alignment)); // Needed until the cardtable is fixed to have the right number // of covered regions. n_covered_regions += 2; *_total_reserved = total_reserved; --- 137,169 ---- #endif // INCLUDE_ALL_GCS return JNI_OK; } char* GenCollectedHeap::allocate(size_t alignment, size_t* _total_reserved, int* _n_covered_regions, ReservedSpace* heap_rs){ const char overflow_msg[] = "The size of the object heap + VM data exceeds " "the maximum representable size"; // Now figure out the total size. ! const size_t pageSize = UseLargePages ? os::large_page_size() : os::vm_page_size(); assert(alignment % pageSize == 0, "Must be"); ! size_t total_reserved = gen_policy()->young_gen_spec()->max_size() + ! gen_policy()->old_gen_spec()->max_size(); ! if (total_reserved < gen_policy()->young_gen_spec()->max_size() || ! total_reserved < gen_policy()->old_gen_spec()->max_size()) { vm_exit_during_initialization(overflow_msg); } assert(total_reserved % alignment == 0, err_msg("Gen size; total_reserved=" SIZE_FORMAT ", alignment=" SIZE_FORMAT, total_reserved, alignment)); + int n_covered_regions = 2; // Young + Old + // Needed until the cardtable is fixed to have the right number // of covered regions. n_covered_regions += 2; *_total_reserved = total_reserved;
*** 185,252 **** *heap_rs = Universe::reserve_heap(total_reserved, alignment); return heap_rs->base(); } - void GenCollectedHeap::post_initialize() { SharedHeap::post_initialize(); GenCollectorPolicy *policy = (GenCollectorPolicy *)collector_policy(); guarantee(policy->is_generation_policy(), "Illegal policy type"); ! DefNewGeneration* def_new_gen = (DefNewGeneration*) get_gen(0); assert(def_new_gen->kind() == Generation::DefNew || def_new_gen->kind() == Generation::ParNew, "Wrong generation kind"); ! Generation* old_gen = get_gen(1); ! assert(old_gen->kind() == Generation::ConcurrentMarkSweep || ! old_gen->kind() == Generation::MarkSweepCompact, "Wrong generation kind"); policy->initialize_size_policy(def_new_gen->eden()->capacity(), ! old_gen->capacity(), def_new_gen->from()->capacity()); policy->initialize_gc_policy_counters(); } void GenCollectedHeap::ref_processing_init() { SharedHeap::ref_processing_init(); ! for (int i = 0; i < _n_gens; i++) { ! _gens[i]->ref_processor_init(); ! } } size_t GenCollectedHeap::capacity() const { ! size_t res = 0; ! for (int i = 0; i < _n_gens; i++) { ! res += _gens[i]->capacity(); ! } ! return res; } size_t GenCollectedHeap::used() const { ! size_t res = 0; ! for (int i = 0; i < _n_gens; i++) { ! res += _gens[i]->used(); ! } ! return res; } ! // Save the "used_region" for generations level and lower. ! void GenCollectedHeap::save_used_regions(int level) { ! assert(level < _n_gens, "Illegal level parameter"); ! for (int i = level; i >= 0; i--) { ! _gens[i]->save_used_region(); ! } } size_t GenCollectedHeap::max_capacity() const { ! size_t res = 0; ! for (int i = 0; i < _n_gens; i++) { ! res += _gens[i]->max_capacity(); ! } ! return res; } // Update the _full_collections_completed counter // at the end of a stop-world full GC. unsigned int GenCollectedHeap::update_full_collections_completed() { --- 171,220 ---- *heap_rs = Universe::reserve_heap(total_reserved, alignment); return heap_rs->base(); } void GenCollectedHeap::post_initialize() { SharedHeap::post_initialize(); GenCollectorPolicy *policy = (GenCollectorPolicy *)collector_policy(); guarantee(policy->is_generation_policy(), "Illegal policy type"); ! DefNewGeneration* def_new_gen = (DefNewGeneration*) _young_gen; assert(def_new_gen->kind() == Generation::DefNew || def_new_gen->kind() == Generation::ParNew, "Wrong generation kind"); ! assert(_old_gen->kind() == Generation::ConcurrentMarkSweep || ! _old_gen->kind() == Generation::MarkSweepCompact, "Wrong generation kind"); policy->initialize_size_policy(def_new_gen->eden()->capacity(), ! _old_gen->capacity(), def_new_gen->from()->capacity()); policy->initialize_gc_policy_counters(); } void GenCollectedHeap::ref_processing_init() { SharedHeap::ref_processing_init(); ! _young_gen->ref_processor_init(); ! _old_gen->ref_processor_init(); } size_t GenCollectedHeap::capacity() const { ! return _young_gen->capacity() + _old_gen->capacity(); } size_t GenCollectedHeap::used() const { ! return _young_gen->used() + _old_gen->used(); } ! void GenCollectedHeap::save_used_regions() { ! _old_gen->save_used_region(); ! _young_gen->save_used_region(); } size_t GenCollectedHeap::max_capacity() const { ! return _young_gen->max_capacity() + _old_gen->max_capacity(); } // Update the _full_collections_completed counter // at the end of a stop-world full GC. unsigned int GenCollectedHeap::update_full_collections_completed() {
*** 306,325 **** #endif HeapWord* GenCollectedHeap::attempt_allocation(size_t size, bool is_tlab, bool first_only) { ! HeapWord* res; ! for (int i = 0; i < _n_gens; i++) { ! if (_gens[i]->should_allocate(size, is_tlab)) { ! res = _gens[i]->allocate(size, is_tlab); ! if (res != NULL) return res; ! else if (first_only) break; } } ! // Otherwise... ! return NULL; } HeapWord* GenCollectedHeap::mem_allocate(size_t size, bool* gc_overhead_limit_was_exceeded) { return collector_policy()->mem_allocate_work(size, --- 274,297 ---- #endif HeapWord* GenCollectedHeap::attempt_allocation(size_t size, bool is_tlab, bool first_only) { ! HeapWord* res = NULL; ! ! if (_young_gen->should_allocate(size, is_tlab)) { ! res = _young_gen->allocate(size, is_tlab); ! if (res != NULL || first_only) { ! return res; } } ! ! if (_old_gen->should_allocate(size, is_tlab)) { ! res = _old_gen->allocate(size, is_tlab); ! } ! ! return res; } HeapWord* GenCollectedHeap::mem_allocate(size_t size, bool* gc_overhead_limit_was_exceeded) { return collector_policy()->mem_allocate_work(size,
*** 335,460 **** return UseConcMarkSweepGC && ((cause == GCCause::_gc_locker && GCLockerInvokesConcurrent) || (cause == GCCause::_java_lang_system_gc && ExplicitGCInvokesConcurrent)); } ! void GenCollectedHeap::do_collection(bool full, ! bool clear_all_soft_refs, ! size_t size, ! bool is_tlab, ! int max_level) { ! bool prepared_for_verification = false; ! ResourceMark rm; ! DEBUG_ONLY(Thread* my_thread = Thread::current();) ! ! assert(SafepointSynchronize::is_at_safepoint(), "should be at safepoint"); ! assert(my_thread->is_VM_thread() || ! my_thread->is_ConcurrentGC_thread(), ! "incorrect thread type capability"); ! assert(Heap_lock->is_locked(), ! "the requesting thread should have the Heap_lock"); ! guarantee(!is_gc_active(), "collection is not reentrant"); ! assert(max_level < n_gens(), "sanity check"); ! ! if (GC_locker::check_active_before_gc()) { ! return; // GC is disabled (e.g. JNI GetXXXCritical operation) ! } ! ! const bool do_clear_all_soft_refs = clear_all_soft_refs || ! collector_policy()->should_clear_all_soft_refs(); ! ! ClearedAllSoftRefs casr(do_clear_all_soft_refs, collector_policy()); ! ! const size_t metadata_prev_used = MetaspaceAux::used_bytes(); ! ! print_heap_before_gc(); ! ! { ! FlagSetting fl(_is_gc_active, true); ! ! bool complete = full && (max_level == (n_gens()-1)); ! const char* gc_cause_prefix = complete ? "Full GC" : "GC"; ! gclog_or_tty->date_stamp(PrintGC && PrintGCDateStamps); ! TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty); ! // The PrintGCDetails logging starts before we have incremented the GC id. We will do that later ! // so we can assume here that the next GC id is what we want. ! GCTraceTime t(GCCauseString(gc_cause_prefix, gc_cause()), PrintGCDetails, false, NULL, GCId::peek()); ! ! gc_prologue(complete); ! increment_total_collections(complete); ! ! size_t gch_prev_used = used(); ! ! int starting_level = 0; ! if (full) { ! // Search for the oldest generation which will collect all younger ! // generations, and start collection loop there. ! for (int i = max_level; i >= 0; i--) { ! if (_gens[i]->full_collects_younger_generations()) { ! starting_level = i; ! break; ! } ! } ! } ! ! bool must_restore_marks_for_biased_locking = false; ! ! int max_level_collected = starting_level; ! for (int i = starting_level; i <= max_level; i++) { ! if (_gens[i]->should_collect(full, size, is_tlab)) { ! if (i == n_gens() - 1) { // a major collection is to happen ! if (!complete) { ! // The full_collections increment was missed above. ! increment_total_full_collections(); ! } ! pre_full_gc_dump(NULL); // do any pre full gc dumps ! } // Timer for individual generations. Last argument is false: no CR // FIXME: We should try to start the timing earlier to cover more of the GC pause // The PrintGCDetails logging starts before we have incremented the GC id. We will do that later // so we can assume here that the next GC id is what we want. ! GCTraceTime t1(_gens[i]->short_name(), PrintGCDetails, false, NULL, GCId::peek()); ! TraceCollectorStats tcs(_gens[i]->counters()); ! TraceMemoryManagerStats tmms(_gens[i]->kind(),gc_cause()); ! ! size_t prev_used = _gens[i]->used(); ! _gens[i]->stat_record()->invocations++; ! _gens[i]->stat_record()->accumulated_time.start(); // Must be done anew before each collection because // a previous collection will do mangling and will // change top of some spaces. record_gen_tops_before_GC(); if (PrintGC && Verbose) { gclog_or_tty->print("level=%d invoke=%d size=" SIZE_FORMAT, ! i, ! _gens[i]->stat_record()->invocations, ! size*HeapWordSize); } ! if (VerifyBeforeGC && i >= VerifyGCLevel && ! total_collections() >= VerifyGCStartAt) { HandleMark hm; // Discard invalid handles created during verification - if (!prepared_for_verification) { - prepare_for_verify(); - prepared_for_verification = true; - } Universe::verify(" VerifyBeforeGC:"); } COMPILER2_PRESENT(DerivedPointerTable::clear()); - if (!must_restore_marks_for_biased_locking && - _gens[i]->performs_in_place_marking()) { - // We perform this mark word preservation work lazily - // because it's only at this point that we know whether we - // absolutely have to do it; we want to avoid doing it for - // scavenge-only collections where it's unnecessary - must_restore_marks_for_biased_locking = true; - BiasedLocking::preserve_marks(); - } - // Do collection work { // Note on ref discovery: For what appear to be historical reasons, // GCH enables and disabled (by enqueing) refs discovery. // In the future this should be moved into the generation's --- 307,356 ---- return UseConcMarkSweepGC && ((cause == GCCause::_gc_locker && GCLockerInvokesConcurrent) || (cause == GCCause::_java_lang_system_gc && ExplicitGCInvokesConcurrent)); } ! void GenCollectedHeap::collect_generation(Generation* gen, bool full, size_t size, ! bool is_tlab, bool run_verification, bool clear_soft_refs) { // Timer for individual generations. Last argument is false: no CR // FIXME: We should try to start the timing earlier to cover more of the GC pause // The PrintGCDetails logging starts before we have incremented the GC id. We will do that later // so we can assume here that the next GC id is what we want. ! GCTraceTime t1(gen->short_name(), PrintGCDetails, false, NULL, GCId::peek()); ! TraceCollectorStats tcs(gen->counters()); ! TraceMemoryManagerStats tmms(gen->kind(),gc_cause()); ! ! size_t prev_used = gen->used(); ! gen->stat_record()->invocations++; ! gen->stat_record()->accumulated_time.start(); // Must be done anew before each collection because // a previous collection will do mangling and will // change top of some spaces. record_gen_tops_before_GC(); if (PrintGC && Verbose) { + // I didn't want to change the logging when removing the level concept, + // but I guess this logging could say young/old or something instead of 0/1. + int level; + if (gen == GenCollectedHeap::heap()->young_gen()) { + level = 0; + } else { + level = 1; + } gclog_or_tty->print("level=%d invoke=%d size=" SIZE_FORMAT, ! level, ! gen->stat_record()->invocations, ! size * HeapWordSize); } ! if (run_verification && VerifyBeforeGC) { HandleMark hm; // Discard invalid handles created during verification Universe::verify(" VerifyBeforeGC:"); } COMPILER2_PRESENT(DerivedPointerTable::clear()); // Do collection work { // Note on ref discovery: For what appear to be historical reasons, // GCH enables and disabled (by enqueing) refs discovery. // In the future this should be moved into the generation's
*** 470,531 **** // We want to discover references, but not process them yet. // This mode is disabled in process_discovered_references if the // generation does some collection work, or in // enqueue_discovered_references if the generation returns // without doing any work. ! ReferenceProcessor* rp = _gens[i]->ref_processor(); // If the discovery of ("weak") refs in this generation is // atomic wrt other collectors in this configuration, we // are guaranteed to have empty discovered ref lists. if (rp->discovery_is_atomic()) { rp->enable_discovery(true /*verify_disabled*/, true /*verify_no_refs*/); ! rp->setup_policy(do_clear_all_soft_refs); } else { // collect() below will enable discovery as appropriate } ! _gens[i]->collect(full, do_clear_all_soft_refs, size, is_tlab); if (!rp->enqueuing_is_done()) { rp->enqueue_discovered_references(); } else { rp->set_enqueuing_is_done(false); } rp->verify_no_references_recorded(); } - max_level_collected = i; // Determine if allocation request was met. if (size > 0) { ! if (!is_tlab || _gens[i]->supports_tlab_allocation()) { ! if (size*HeapWordSize <= _gens[i]->unsafe_max_alloc_nogc()) { size = 0; } } } COMPILER2_PRESENT(DerivedPointerTable::update_pointers()); ! _gens[i]->stat_record()->accumulated_time.stop(); ! update_gc_stats(i, full); ! if (VerifyAfterGC && i >= VerifyGCLevel && ! total_collections() >= VerifyGCStartAt) { HandleMark hm; // Discard invalid handles created during verification Universe::verify(" VerifyAfterGC:"); } if (PrintGCDetails) { gclog_or_tty->print(":"); ! _gens[i]->print_heap_change(prev_used); } } } // Update "complete" boolean wrt what actually transpired -- // for instance, a promotion failure could have led to // a whole heap collection. ! complete = complete || (max_level_collected == n_gens() - 1); if (complete) { // We did a "major" collection // FIXME: See comment at pre_full_gc_dump call post_full_gc_dump(NULL); // do any post full gc dumps } --- 366,504 ---- // We want to discover references, but not process them yet. // This mode is disabled in process_discovered_references if the // generation does some collection work, or in // enqueue_discovered_references if the generation returns // without doing any work. ! ReferenceProcessor* rp = gen->ref_processor(); // If the discovery of ("weak") refs in this generation is // atomic wrt other collectors in this configuration, we // are guaranteed to have empty discovered ref lists. if (rp->discovery_is_atomic()) { rp->enable_discovery(true /*verify_disabled*/, true /*verify_no_refs*/); ! rp->setup_policy(clear_soft_refs); } else { // collect() below will enable discovery as appropriate } ! gen->collect(full, clear_soft_refs, size, is_tlab); if (!rp->enqueuing_is_done()) { rp->enqueue_discovered_references(); } else { rp->set_enqueuing_is_done(false); } rp->verify_no_references_recorded(); } // Determine if allocation request was met. if (size > 0) { ! if (!is_tlab || gen->supports_tlab_allocation()) { ! if (size * HeapWordSize <= gen->unsafe_max_alloc_nogc()) { size = 0; } } } COMPILER2_PRESENT(DerivedPointerTable::update_pointers()); ! gen->stat_record()->accumulated_time.stop(); ! update_gc_stats(gen, full); ! if (run_verification && VerifyAfterGC) { HandleMark hm; // Discard invalid handles created during verification Universe::verify(" VerifyAfterGC:"); } if (PrintGCDetails) { gclog_or_tty->print(":"); ! gen->print_heap_change(prev_used); } + } + + void GenCollectedHeap::do_collection(bool full, + bool clear_all_soft_refs, + size_t size, + bool is_tlab, + Generation::Type max_generation) { + ResourceMark rm; + DEBUG_ONLY(Thread* my_thread = Thread::current();) + + assert(SafepointSynchronize::is_at_safepoint(), "should be at safepoint"); + assert(my_thread->is_VM_thread() || + my_thread->is_ConcurrentGC_thread(), + "incorrect thread type capability"); + assert(Heap_lock->is_locked(), + "the requesting thread should have the Heap_lock"); + guarantee(!is_gc_active(), "collection is not reentrant"); + + if (GC_locker::check_active_before_gc()) { + return; // GC is disabled (e.g. JNI GetXXXCritical operation) + } + + const bool do_clear_all_soft_refs = clear_all_soft_refs || + collector_policy()->should_clear_all_soft_refs(); + + ClearedAllSoftRefs casr(do_clear_all_soft_refs, collector_policy()); + + const size_t metadata_prev_used = MetaspaceAux::used_bytes(); + + print_heap_before_gc(); + + { + FlagSetting fl(_is_gc_active, true); + + bool complete = full && (max_generation == Generation::Old); + const char* gc_cause_prefix = complete ? "Full GC" : "GC"; + gclog_or_tty->date_stamp(PrintGC && PrintGCDateStamps); + TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty); + // The PrintGCDetails logging starts before we have incremented the GC id. We will do that later + // so we can assume here that the next GC id is what we want. + GCTraceTime t(GCCauseString(gc_cause_prefix, gc_cause()), PrintGCDetails, false, NULL, GCId::peek()); + + gc_prologue(complete); + increment_total_collections(complete); + + size_t gch_prev_used = used(); + bool must_restore_marks_for_biased_locking = false; + bool old_collected = false; + bool run_verification = total_collections() >= VerifyGCStartAt; + + if (_young_gen->performs_in_place_marking() || + _old_gen->performs_in_place_marking()) { + // We want to avoid doing this for + // scavenge-only collections where it's unnecessary. + must_restore_marks_for_biased_locking = true; + BiasedLocking::preserve_marks(); + } + + bool prepared_for_verification = false; + if (!(full && _old_gen->full_collects_younger_generations()) && + _young_gen->should_collect(full, size, is_tlab)) { + if (run_verification && VerifyGCLevel <= 0 && VerifyBeforeGC) { + prepare_for_verify(); + prepared_for_verification = true; + } + collect_generation(_young_gen, full, size, is_tlab, run_verification && VerifyGCLevel <= 0, do_clear_all_soft_refs); + } + if (max_generation == Generation::Old && _old_gen->should_collect(full, size, is_tlab)) { + if (!complete) { + // The full_collections increment was missed above. + increment_total_full_collections(); + } + pre_full_gc_dump(NULL); // do any pre full gc dumps + if (run_verification && VerifyGCLevel <= 1 && VerifyBeforeGC) { + if (!prepared_for_verification) { + prepare_for_verify(); } } + collect_generation(_old_gen, full, size, is_tlab, run_verification && VerifyGCLevel <= 1, do_clear_all_soft_refs); + old_collected = true; + } // Update "complete" boolean wrt what actually transpired -- // for instance, a promotion failure could have led to // a whole heap collection. ! complete = complete || old_collected; if (complete) { // We did a "major" collection // FIXME: See comment at pre_full_gc_dump call post_full_gc_dump(NULL); // do any post full gc dumps }
*** 537,550 **** if (complete) { MetaspaceAux::print_metaspace_change(metadata_prev_used); } } - for (int j = max_level_collected; j >= 0; j -= 1) { // Adjust generation sizes. ! _gens[j]->compute_new_size(); } if (complete) { // Delete metaspaces for unloaded class loaders and clean up loader_data graph ClassLoaderDataGraph::purge(); MetaspaceAux::verify_metrics(); --- 510,524 ---- if (complete) { MetaspaceAux::print_metaspace_change(metadata_prev_used); } } // Adjust generation sizes. ! if (old_collected) { ! _old_gen->compute_new_size(); } + _young_gen->compute_new_size(); if (complete) { // Delete metaspaces for unloaded class loaders and clean up loader_data graph ClassLoaderDataGraph::purge(); MetaspaceAux::verify_metrics();
*** 578,588 **** SharedHeap::set_par_threads(t); _gen_process_roots_tasks->set_n_threads(t); } void GenCollectedHeap:: ! gen_process_roots(int level, bool younger_gens_as_roots, bool activate_scope, SharedHeap::ScanningOption so, OopsInGenClosure* not_older_gens, OopsInGenClosure* weak_roots, --- 552,562 ---- SharedHeap::set_par_threads(t); _gen_process_roots_tasks->set_n_threads(t); } void GenCollectedHeap:: ! gen_process_roots(Generation::Type type, bool younger_gens_as_roots, bool activate_scope, SharedHeap::ScanningOption so, OopsInGenClosure* not_older_gens, OopsInGenClosure* weak_roots,
*** 597,626 **** cld_closure, weak_cld_closure, code_closure); if (younger_gens_as_roots) { if (!_gen_process_roots_tasks->is_task_claimed(GCH_PS_younger_gens)) { ! for (int i = 0; i < level; i++) { ! not_older_gens->set_generation(_gens[i]); ! _gens[i]->oop_iterate(not_older_gens); } not_older_gens->reset_generation(); } } // When collection is parallel, all threads get to cooperate to do ! // older-gen scanning. ! for (int i = level+1; i < _n_gens; i++) { ! older_gens->set_generation(_gens[i]); ! rem_set()->younger_refs_iterate(_gens[i], older_gens); older_gens->reset_generation(); } _gen_process_roots_tasks->all_tasks_completed(); } void GenCollectedHeap:: ! gen_process_roots(int level, bool younger_gens_as_roots, bool activate_scope, SharedHeap::ScanningOption so, bool only_strong_roots, OopsInGenClosure* not_older_gens, --- 571,600 ---- cld_closure, weak_cld_closure, code_closure); if (younger_gens_as_roots) { if (!_gen_process_roots_tasks->is_task_claimed(GCH_PS_younger_gens)) { ! if (type == Generation::Old) { ! not_older_gens->set_generation(_young_gen); ! _young_gen->oop_iterate(not_older_gens); } not_older_gens->reset_generation(); } } // When collection is parallel, all threads get to cooperate to do ! // old generation scanning. ! if (type == Generation::Young) { ! older_gens->set_generation(_old_gen); ! rem_set()->younger_refs_iterate(_old_gen, older_gens); older_gens->reset_generation(); } _gen_process_roots_tasks->all_tasks_completed(); } void GenCollectedHeap:: ! gen_process_roots(Generation::Type type, bool younger_gens_as_roots, bool activate_scope, SharedHeap::ScanningOption so, bool only_strong_roots, OopsInGenClosure* not_older_gens,
*** 628,646 **** CLDClosure* cld_closure) { const bool is_adjust_phase = !only_strong_roots && !younger_gens_as_roots; bool is_moving_collection = false; ! if (level == 0 || is_adjust_phase) { // young collections are always moving is_moving_collection = true; } MarkingCodeBlobClosure mark_code_closure(not_older_gens, is_moving_collection); CodeBlobClosure* code_closure = &mark_code_closure; ! gen_process_roots(level, younger_gens_as_roots, activate_scope, so, not_older_gens, only_strong_roots ? NULL : not_older_gens, older_gens, cld_closure, only_strong_roots ? NULL : cld_closure, --- 602,620 ---- CLDClosure* cld_closure) { const bool is_adjust_phase = !only_strong_roots && !younger_gens_as_roots; bool is_moving_collection = false; ! if (type == Generation::Young || is_adjust_phase) { // young collections are always moving is_moving_collection = true; } MarkingCodeBlobClosure mark_code_closure(not_older_gens, is_moving_collection); CodeBlobClosure* code_closure = &mark_code_closure; ! gen_process_roots(type, younger_gens_as_roots, activate_scope, so, not_older_gens, only_strong_roots ? NULL : not_older_gens, older_gens, cld_closure, only_strong_roots ? NULL : cld_closure,
*** 649,695 **** } void GenCollectedHeap::gen_process_weak_roots(OopClosure* root_closure) { SharedHeap::process_weak_roots(root_closure); // "Local" "weak" refs ! for (int i = 0; i < _n_gens; i++) { ! _gens[i]->ref_processor()->weak_oops_do(root_closure); ! } } #define GCH_SINCE_SAVE_MARKS_ITERATE_DEFN(OopClosureType, nv_suffix) \ void GenCollectedHeap:: \ ! oop_since_save_marks_iterate(int level, \ OopClosureType* cur, \ OopClosureType* older) { \ ! _gens[level]->oop_since_save_marks_iterate##nv_suffix(cur); \ ! for (int i = level+1; i < n_gens(); i++) { \ ! _gens[i]->oop_since_save_marks_iterate##nv_suffix(older); \ } \ } ALL_SINCE_SAVE_MARKS_CLOSURES(GCH_SINCE_SAVE_MARKS_ITERATE_DEFN) #undef GCH_SINCE_SAVE_MARKS_ITERATE_DEFN ! bool GenCollectedHeap::no_allocs_since_save_marks(int level) { ! for (int i = level; i < _n_gens; i++) { ! if (!_gens[i]->no_allocs_since_save_marks()) return false; ! } ! return true; } bool GenCollectedHeap::supports_inline_contig_alloc() const { ! return _gens[0]->supports_inline_contig_alloc(); } HeapWord** GenCollectedHeap::top_addr() const { ! return _gens[0]->top_addr(); } HeapWord** GenCollectedHeap::end_addr() const { ! return _gens[0]->end_addr(); } // public collection interfaces void GenCollectedHeap::collect(GCCause::Cause cause) { --- 623,668 ---- } void GenCollectedHeap::gen_process_weak_roots(OopClosure* root_closure) { SharedHeap::process_weak_roots(root_closure); // "Local" "weak" refs ! _young_gen->ref_processor()->weak_oops_do(root_closure); ! _old_gen->ref_processor()->weak_oops_do(root_closure); } #define GCH_SINCE_SAVE_MARKS_ITERATE_DEFN(OopClosureType, nv_suffix) \ void GenCollectedHeap:: \ ! oop_since_save_marks_iterate(Generation::Type gen, \ OopClosureType* cur, \ OopClosureType* older) { \ ! if (gen == Generation::Young) { \ ! _young_gen->oop_since_save_marks_iterate##nv_suffix(cur); \ ! _old_gen->oop_since_save_marks_iterate##nv_suffix(older); \ ! } else { \ ! _old_gen->oop_since_save_marks_iterate##nv_suffix(cur); \ } \ } ALL_SINCE_SAVE_MARKS_CLOSURES(GCH_SINCE_SAVE_MARKS_ITERATE_DEFN) #undef GCH_SINCE_SAVE_MARKS_ITERATE_DEFN ! bool GenCollectedHeap::no_allocs_since_save_marks(bool include_young) { ! return include_young && _young_gen->no_allocs_since_save_marks() || ! _old_gen->no_allocs_since_save_marks(); } bool GenCollectedHeap::supports_inline_contig_alloc() const { ! return _young_gen->supports_inline_contig_alloc(); } HeapWord** GenCollectedHeap::top_addr() const { ! return _young_gen->top_addr(); } HeapWord** GenCollectedHeap::end_addr() const { ! return _young_gen->end_addr(); } // public collection interfaces void GenCollectedHeap::collect(GCCause::Cause cause) {
*** 700,763 **** #else // INCLUDE_ALL_GCS ShouldNotReachHere(); #endif // INCLUDE_ALL_GCS } else if (cause == GCCause::_wb_young_gc) { // minor collection for WhiteBox API ! collect(cause, 0); } else { #ifdef ASSERT if (cause == GCCause::_scavenge_alot) { // minor collection only ! collect(cause, 0); } else { // Stop-the-world full collection ! collect(cause, n_gens() - 1); } #else // Stop-the-world full collection ! collect(cause, n_gens() - 1); #endif } } ! void GenCollectedHeap::collect(GCCause::Cause cause, int max_level) { // The caller doesn't have the Heap_lock assert(!Heap_lock->owned_by_self(), "this thread should not own the Heap_lock"); MutexLocker ml(Heap_lock); ! collect_locked(cause, max_level); } void GenCollectedHeap::collect_locked(GCCause::Cause cause) { // The caller has the Heap_lock assert(Heap_lock->owned_by_self(), "this thread should own the Heap_lock"); ! collect_locked(cause, n_gens() - 1); } // this is the private collection interface // The Heap_lock is expected to be held on entry. ! void GenCollectedHeap::collect_locked(GCCause::Cause cause, int max_level) { // Read the GC count while holding the Heap_lock unsigned int gc_count_before = total_collections(); unsigned int full_gc_count_before = total_full_collections(); { MutexUnlocker mu(Heap_lock); // give up heap lock, execute gets it back VM_GenCollectFull op(gc_count_before, full_gc_count_before, ! cause, max_level); VMThread::execute(&op); } } #if INCLUDE_ALL_GCS bool GenCollectedHeap::create_cms_collector() { ! assert(_gens[1]->kind() == Generation::ConcurrentMarkSweep, "Unexpected generation kinds"); // Skip two header words in the block content verification NOT_PRODUCT(_skip_header_HeapWords = CMSCollector::skip_header_HeapWords();) CMSCollector* collector = new CMSCollector( ! (ConcurrentMarkSweepGeneration*)_gens[1], _rem_set->as_CardTableRS(), (ConcurrentMarkSweepPolicy*) collector_policy()); if (collector == NULL || !collector->completed_initialization()) { if (collector) { --- 673,736 ---- #else // INCLUDE_ALL_GCS ShouldNotReachHere(); #endif // INCLUDE_ALL_GCS } else if (cause == GCCause::_wb_young_gc) { // minor collection for WhiteBox API ! collect(cause, Generation::Young); } else { #ifdef ASSERT if (cause == GCCause::_scavenge_alot) { // minor collection only ! collect(cause, Generation::Young); } else { // Stop-the-world full collection ! collect(cause, Generation::Old); } #else // Stop-the-world full collection ! collect(cause, Generation::Old); #endif } } ! void GenCollectedHeap::collect(GCCause::Cause cause, Generation::Type max_gen) { // The caller doesn't have the Heap_lock assert(!Heap_lock->owned_by_self(), "this thread should not own the Heap_lock"); MutexLocker ml(Heap_lock); ! collect_locked(cause, max_gen); } void GenCollectedHeap::collect_locked(GCCause::Cause cause) { // The caller has the Heap_lock assert(Heap_lock->owned_by_self(), "this thread should own the Heap_lock"); ! collect_locked(cause, Generation::Old); } // this is the private collection interface // The Heap_lock is expected to be held on entry. ! void GenCollectedHeap::collect_locked(GCCause::Cause cause, Generation::Type max_generation) { // Read the GC count while holding the Heap_lock unsigned int gc_count_before = total_collections(); unsigned int full_gc_count_before = total_full_collections(); { MutexUnlocker mu(Heap_lock); // give up heap lock, execute gets it back VM_GenCollectFull op(gc_count_before, full_gc_count_before, ! cause, max_generation); VMThread::execute(&op); } } #if INCLUDE_ALL_GCS bool GenCollectedHeap::create_cms_collector() { ! assert(_old_gen->kind() == Generation::ConcurrentMarkSweep, "Unexpected generation kinds"); // Skip two header words in the block content verification NOT_PRODUCT(_skip_header_HeapWords = CMSCollector::skip_header_HeapWords();) CMSCollector* collector = new CMSCollector( ! (ConcurrentMarkSweepGeneration*)_old_gen, _rem_set->as_CardTableRS(), (ConcurrentMarkSweepPolicy*) collector_policy()); if (collector == NULL || !collector->completed_initialization()) { if (collector) {
*** 783,831 **** } } #endif // INCLUDE_ALL_GCS void GenCollectedHeap::do_full_collection(bool clear_all_soft_refs) { ! do_full_collection(clear_all_soft_refs, _n_gens - 1); } void GenCollectedHeap::do_full_collection(bool clear_all_soft_refs, ! int max_level) { ! int local_max_level; if (!incremental_collection_will_fail(false /* don't consult_young */) && gc_cause() == GCCause::_gc_locker) { ! local_max_level = 0; } else { ! local_max_level = max_level; } do_collection(true /* full */, clear_all_soft_refs /* clear_all_soft_refs */, 0 /* size */, false /* is_tlab */, ! local_max_level /* max_level */); // Hack XXX FIX ME !!! // A scavenge may not have been attempted, or may have // been attempted and failed, because the old gen was too full ! if (local_max_level == 0 && gc_cause() == GCCause::_gc_locker && incremental_collection_will_fail(false /* don't consult_young */)) { if (PrintGCDetails) { gclog_or_tty->print_cr("GC locker: Trying a full collection " "because scavenge failed"); } // This time allow the old gen to be collected as well do_collection(true /* full */, clear_all_soft_refs /* clear_all_soft_refs */, 0 /* size */, false /* is_tlab */, ! n_gens() - 1 /* max_level */); } } bool GenCollectedHeap::is_in_young(oop p) { ! bool result = ((HeapWord*)p) < _gens[_n_gens - 1]->reserved().start(); ! assert(result == _gens[0]->is_in_reserved(p), err_msg("incorrect test - result=%d, p=" INTPTR_FORMAT, result, p2i((void*)p))); return result; } // Returns "TRUE" iff "p" points into the committed areas of the heap. --- 756,804 ---- } } #endif // INCLUDE_ALL_GCS void GenCollectedHeap::do_full_collection(bool clear_all_soft_refs) { ! do_full_collection(clear_all_soft_refs, Generation::Old); } void GenCollectedHeap::do_full_collection(bool clear_all_soft_refs, ! Generation::Type max_gen) { ! Generation::Type local_max_gen; if (!incremental_collection_will_fail(false /* don't consult_young */) && gc_cause() == GCCause::_gc_locker) { ! local_max_gen = Generation::Young; } else { ! local_max_gen = max_gen; } do_collection(true /* full */, clear_all_soft_refs /* clear_all_soft_refs */, 0 /* size */, false /* is_tlab */, ! local_max_gen /* max_gen */); // Hack XXX FIX ME !!! // A scavenge may not have been attempted, or may have // been attempted and failed, because the old gen was too full ! if (local_max_gen == Generation::Young && gc_cause() == GCCause::_gc_locker && incremental_collection_will_fail(false /* don't consult_young */)) { if (PrintGCDetails) { gclog_or_tty->print_cr("GC locker: Trying a full collection " "because scavenge failed"); } // This time allow the old gen to be collected as well do_collection(true /* full */, clear_all_soft_refs /* clear_all_soft_refs */, 0 /* size */, false /* is_tlab */, ! Generation::Old /* max_gen */); } } bool GenCollectedHeap::is_in_young(oop p) { ! bool result = ((HeapWord*)p) < _old_gen->reserved().start(); ! assert(result == _young_gen->is_in_reserved(p), err_msg("incorrect test - result=%d, p=" INTPTR_FORMAT, result, p2i((void*)p))); return result; } // Returns "TRUE" iff "p" points into the committed areas of the heap.
*** 841,852 **** VMError::fatal_error_in_progress(), "too expensive"); #endif // This might be sped up with a cache of the last generation that // answered yes. ! for (int i = 0; i < _n_gens; i++) { ! if (_gens[i]->is_in(p)) return true; } // Otherwise... return false; } --- 814,825 ---- VMError::fatal_error_in_progress(), "too expensive"); #endif // This might be sped up with a cache of the last generation that // answered yes. ! if (_young_gen->is_in(p) || _old_gen->is_in(p)) { ! return true; } // Otherwise... return false; }
*** 854,971 **** // Don't implement this by using is_in_young(). This method is used // in some cases to check that is_in_young() is correct. bool GenCollectedHeap::is_in_partial_collection(const void* p) { assert(is_in_reserved(p) || p == NULL, "Does not work if address is non-null and outside of the heap"); ! return p < _gens[_n_gens - 2]->reserved().end() && p != NULL; } #endif void GenCollectedHeap::oop_iterate(ExtendedOopClosure* cl) { ! for (int i = 0; i < _n_gens; i++) { ! _gens[i]->oop_iterate(cl); ! } } void GenCollectedHeap::object_iterate(ObjectClosure* cl) { ! for (int i = 0; i < _n_gens; i++) { ! _gens[i]->object_iterate(cl); ! } } void GenCollectedHeap::safe_object_iterate(ObjectClosure* cl) { ! for (int i = 0; i < _n_gens; i++) { ! _gens[i]->safe_object_iterate(cl); ! } } Space* GenCollectedHeap::space_containing(const void* addr) const { ! for (int i = 0; i < _n_gens; i++) { ! Space* res = _gens[i]->space_containing(addr); ! if (res != NULL) return res; } ! // Otherwise... ! assert(false, "Could not find containing space"); ! return NULL; } - HeapWord* GenCollectedHeap::block_start(const void* addr) const { assert(is_in_reserved(addr), "block_start of address outside of heap"); ! for (int i = 0; i < _n_gens; i++) { ! if (_gens[i]->is_in_reserved(addr)) { ! assert(_gens[i]->is_in(addr), ! "addr should be in allocated part of generation"); ! return _gens[i]->block_start(addr); ! } } ! assert(false, "Some generation should contain the address"); ! return NULL; } size_t GenCollectedHeap::block_size(const HeapWord* addr) const { assert(is_in_reserved(addr), "block_size of address outside of heap"); ! for (int i = 0; i < _n_gens; i++) { ! if (_gens[i]->is_in_reserved(addr)) { ! assert(_gens[i]->is_in(addr), ! "addr should be in allocated part of generation"); ! return _gens[i]->block_size(addr); ! } } ! assert(false, "Some generation should contain the address"); ! return 0; } bool GenCollectedHeap::block_is_obj(const HeapWord* addr) const { assert(is_in_reserved(addr), "block_is_obj of address outside of heap"); assert(block_start(addr) == addr, "addr must be a block start"); ! for (int i = 0; i < _n_gens; i++) { ! if (_gens[i]->is_in_reserved(addr)) { ! return _gens[i]->block_is_obj(addr); } ! } ! assert(false, "Some generation should contain the address"); ! return false; } bool GenCollectedHeap::supports_tlab_allocation() const { ! for (int i = 0; i < _n_gens; i += 1) { ! if (_gens[i]->supports_tlab_allocation()) { ! return true; ! } ! } ! return false; } size_t GenCollectedHeap::tlab_capacity(Thread* thr) const { ! size_t result = 0; ! for (int i = 0; i < _n_gens; i += 1) { ! if (_gens[i]->supports_tlab_allocation()) { ! result += _gens[i]->tlab_capacity(); ! } } ! return result; } size_t GenCollectedHeap::tlab_used(Thread* thr) const { ! size_t result = 0; ! for (int i = 0; i < _n_gens; i += 1) { ! if (_gens[i]->supports_tlab_allocation()) { ! result += _gens[i]->tlab_used(); ! } } ! return result; } size_t GenCollectedHeap::unsafe_max_tlab_alloc(Thread* thr) const { ! size_t result = 0; ! for (int i = 0; i < _n_gens; i += 1) { ! if (_gens[i]->supports_tlab_allocation()) { ! result += _gens[i]->unsafe_max_tlab_alloc(); } ! } ! return result; } HeapWord* GenCollectedHeap::allocate_new_tlab(size_t size) { bool gc_overhead_limit_was_exceeded; return collector_policy()->mem_allocate_work(size /* size */, --- 827,927 ---- // Don't implement this by using is_in_young(). This method is used // in some cases to check that is_in_young() is correct. bool GenCollectedHeap::is_in_partial_collection(const void* p) { assert(is_in_reserved(p) || p == NULL, "Does not work if address is non-null and outside of the heap"); ! return p < _young_gen->reserved().end() && p != NULL; } #endif void GenCollectedHeap::oop_iterate(ExtendedOopClosure* cl) { ! _young_gen->oop_iterate(cl); ! _old_gen->oop_iterate(cl); } void GenCollectedHeap::object_iterate(ObjectClosure* cl) { ! _young_gen->object_iterate(cl); ! _old_gen->object_iterate(cl); } void GenCollectedHeap::safe_object_iterate(ObjectClosure* cl) { ! _young_gen->safe_object_iterate(cl); ! _old_gen->safe_object_iterate(cl); } Space* GenCollectedHeap::space_containing(const void* addr) const { ! Space* res = _young_gen->space_containing(addr); ! if (res != NULL) { ! return res; } ! res = _old_gen->space_containing(addr); ! assert(res != NULL, "Could not find containing space"); ! return res; } HeapWord* GenCollectedHeap::block_start(const void* addr) const { assert(is_in_reserved(addr), "block_start of address outside of heap"); ! if (_young_gen->is_in_reserved(addr)) { ! assert(_young_gen->is_in(addr), "addr should be in allocated part of generation"); ! return _young_gen->block_start(addr); } ! ! assert(_old_gen->is_in_reserved(addr), "Some generation should contain the address"); ! assert(_old_gen->is_in(addr), "addr should be in allocated part of generation"); ! return _old_gen->block_start(addr); } size_t GenCollectedHeap::block_size(const HeapWord* addr) const { assert(is_in_reserved(addr), "block_size of address outside of heap"); ! if (_young_gen->is_in_reserved(addr)) { ! assert(_young_gen->is_in(addr), "addr should be in allocated part of generation"); ! return _young_gen->block_size(addr); } ! ! assert(_old_gen->is_in_reserved(addr), "Some generation should contain the address"); ! assert(_old_gen->is_in(addr), "addr should be in allocated part of generation"); ! return _old_gen->block_size(addr); } bool GenCollectedHeap::block_is_obj(const HeapWord* addr) const { assert(is_in_reserved(addr), "block_is_obj of address outside of heap"); assert(block_start(addr) == addr, "addr must be a block start"); ! if (_young_gen->is_in_reserved(addr)) { ! return _young_gen->block_is_obj(addr); } ! ! assert(_old_gen->is_in_reserved(addr), "Some generation should contain the address"); ! return _old_gen->block_is_obj(addr); } bool GenCollectedHeap::supports_tlab_allocation() const { ! assert(!_old_gen->supports_tlab_allocation(), "Old gen supports TLAB allocation?!"); ! return _young_gen->supports_tlab_allocation(); } size_t GenCollectedHeap::tlab_capacity(Thread* thr) const { ! assert(!_old_gen->supports_tlab_allocation(), "Old gen supports TLAB allocation?!"); ! if (_young_gen->supports_tlab_allocation()) { ! return _young_gen->tlab_capacity(); } ! return 0; } size_t GenCollectedHeap::tlab_used(Thread* thr) const { ! assert(!_old_gen->supports_tlab_allocation(), "Old gen supports TLAB allocation?!"); ! if (_young_gen->supports_tlab_allocation()) { ! return _young_gen->tlab_used(); } ! return 0; } size_t GenCollectedHeap::unsafe_max_tlab_alloc(Thread* thr) const { ! assert(!_old_gen->supports_tlab_allocation(), "Old gen supports TLAB allocation?!"); ! if (_young_gen->supports_tlab_allocation()) { ! return _young_gen->unsafe_max_tlab_alloc(); } ! return 0; } HeapWord* GenCollectedHeap::allocate_new_tlab(size_t size) { bool gc_overhead_limit_was_exceeded; return collector_policy()->mem_allocate_work(size /* size */,
*** 1010,1030 **** } ScratchBlock* GenCollectedHeap::gather_scratch(Generation* requestor, size_t max_alloc_words) { ScratchBlock* res = NULL; ! for (int i = 0; i < _n_gens; i++) { ! _gens[i]->contribute_scratch(res, requestor, max_alloc_words); ! } sort_scratch_list(res); return res; } void GenCollectedHeap::release_scratch() { ! for (int i = 0; i < _n_gens; i++) { ! _gens[i]->reset_scratch(); ! } } class GenPrepareForVerifyClosure: public GenCollectedHeap::GenClosure { void do_generation(Generation* gen) { gen->prepare_for_verify(); --- 966,984 ---- } ScratchBlock* GenCollectedHeap::gather_scratch(Generation* requestor, size_t max_alloc_words) { ScratchBlock* res = NULL; ! _young_gen->contribute_scratch(res, requestor, max_alloc_words); ! _old_gen->contribute_scratch(res, requestor, max_alloc_words); sort_scratch_list(res); return res; } void GenCollectedHeap::release_scratch() { ! _young_gen->reset_scratch(); ! _old_gen->reset_scratch(); } class GenPrepareForVerifyClosure: public GenCollectedHeap::GenClosure { void do_generation(Generation* gen) { gen->prepare_for_verify();
*** 1035,1119 **** ensure_parsability(false); // no need to retire TLABs GenPrepareForVerifyClosure blk; generation_iterate(&blk, false); } - void GenCollectedHeap::generation_iterate(GenClosure* cl, bool old_to_young) { if (old_to_young) { ! for (int i = _n_gens-1; i >= 0; i--) { ! cl->do_generation(_gens[i]); ! } } else { ! for (int i = 0; i < _n_gens; i++) { ! cl->do_generation(_gens[i]); ! } } } void GenCollectedHeap::space_iterate(SpaceClosure* cl) { ! for (int i = 0; i < _n_gens; i++) { ! _gens[i]->space_iterate(cl, true); ! } } bool GenCollectedHeap::is_maximal_no_gc() const { ! for (int i = 0; i < _n_gens; i++) { ! if (!_gens[i]->is_maximal_no_gc()) { ! return false; ! } ! } ! return true; } void GenCollectedHeap::save_marks() { ! for (int i = 0; i < _n_gens; i++) { ! _gens[i]->save_marks(); ! } } GenCollectedHeap* GenCollectedHeap::heap() { assert(_gch != NULL, "Uninitialized access to GenCollectedHeap::heap()"); assert(_gch->kind() == CollectedHeap::GenCollectedHeap, "not a generational heap"); return _gch; } - void GenCollectedHeap::prepare_for_compaction() { - guarantee(_n_gens = 2, "Wrong number of generations"); - Generation* old_gen = _gens[1]; // Start by compacting into same gen. ! CompactPoint cp(old_gen); ! old_gen->prepare_for_compaction(&cp); ! Generation* young_gen = _gens[0]; ! young_gen->prepare_for_compaction(&cp); ! } ! ! GCStats* GenCollectedHeap::gc_stats(int level) const { ! return _gens[level]->gc_stats(); } void GenCollectedHeap::verify(bool silent, VerifyOption option /* ignored */) { - for (int i = _n_gens-1; i >= 0; i--) { - Generation* g = _gens[i]; if (!silent) { ! gclog_or_tty->print("%s", g->name()); gclog_or_tty->print(" "); } ! g->verify(); } if (!silent) { gclog_or_tty->print("remset "); } rem_set()->verify(); } void GenCollectedHeap::print_on(outputStream* st) const { ! for (int i = 0; i < _n_gens; i++) { ! _gens[i]->print_on(st); ! } MetaspaceAux::print_on(st); } void GenCollectedHeap::gc_threads_do(ThreadClosure* tc) const { if (workers() != NULL) { --- 989,1058 ---- ensure_parsability(false); // no need to retire TLABs GenPrepareForVerifyClosure blk; generation_iterate(&blk, false); } void GenCollectedHeap::generation_iterate(GenClosure* cl, bool old_to_young) { if (old_to_young) { ! cl->do_generation(_old_gen); ! cl->do_generation(_young_gen); } else { ! cl->do_generation(_young_gen); ! cl->do_generation(_old_gen); } } void GenCollectedHeap::space_iterate(SpaceClosure* cl) { ! _young_gen->space_iterate(cl, true); ! _old_gen->space_iterate(cl, true); } bool GenCollectedHeap::is_maximal_no_gc() const { ! return _young_gen->is_maximal_no_gc() && _old_gen->is_maximal_no_gc(); } void GenCollectedHeap::save_marks() { ! _young_gen->save_marks(); ! _old_gen->save_marks(); } GenCollectedHeap* GenCollectedHeap::heap() { assert(_gch != NULL, "Uninitialized access to GenCollectedHeap::heap()"); assert(_gch->kind() == CollectedHeap::GenCollectedHeap, "not a generational heap"); return _gch; } void GenCollectedHeap::prepare_for_compaction() { // Start by compacting into same gen. ! CompactPoint cp(_old_gen); ! _old_gen->prepare_for_compaction(&cp); ! _young_gen->prepare_for_compaction(&cp); } void GenCollectedHeap::verify(bool silent, VerifyOption option /* ignored */) { if (!silent) { ! gclog_or_tty->print("%s", _old_gen->name()); gclog_or_tty->print(" "); } ! _old_gen->verify(); ! ! if (!silent) { ! gclog_or_tty->print("%s", _young_gen->name()); ! gclog_or_tty->print(" "); } + _young_gen->verify(); + if (!silent) { gclog_or_tty->print("remset "); } rem_set()->verify(); } void GenCollectedHeap::print_on(outputStream* st) const { ! _young_gen->print_on(st); ! _old_gen->print_on(st); MetaspaceAux::print_on(st); } void GenCollectedHeap::gc_threads_do(ThreadClosure* tc) const { if (workers() != NULL) {
*** 1148,1161 **** #endif // INCLUDE_ALL_GCS } void GenCollectedHeap::print_tracing_info() const { if (TraceYoungGenTime) { ! get_gen(0)->print_summary_info(); } if (TraceOldGenTime) { ! get_gen(1)->print_summary_info(); } } void GenCollectedHeap::print_heap_change(size_t prev_used) const { if (PrintGCDetails && Verbose) { --- 1087,1100 ---- #endif // INCLUDE_ALL_GCS } void GenCollectedHeap::print_tracing_info() const { if (TraceYoungGenTime) { ! _young_gen->print_summary_info(); } if (TraceOldGenTime) { ! _old_gen->print_summary_info(); } } void GenCollectedHeap::print_heap_change(size_t prev_used) const { if (PrintGCDetails && Verbose) {
*** 1257,1267 **** } oop GenCollectedHeap::handle_failed_promotion(Generation* old_gen, oop obj, size_t obj_size) { ! guarantee(old_gen->level() == 1, "We only get here with an old generation"); assert(obj_size == (size_t)obj->size(), "bad obj_size passed in"); HeapWord* result = NULL; result = old_gen->expand_and_allocate(obj_size, false); --- 1196,1206 ---- } oop GenCollectedHeap::handle_failed_promotion(Generation* old_gen, oop obj, size_t obj_size) { ! guarantee(old_gen == _old_gen, "We only get here with an old generation"); assert(obj_size == (size_t)obj->size(), "bad obj_size passed in"); HeapWord* result = NULL; result = old_gen->expand_and_allocate(obj_size, false);
src/share/vm/memory/genCollectedHeap.cpp
Index Unified diffs Context diffs Sdiffs Patch New Old Previous File Next File