src/share/vm/memory/genCollectedHeap.cpp
Index Unified diffs Context diffs Sdiffs Patch New Old Previous File Next File hotspot Cdiff src/share/vm/memory/genCollectedHeap.cpp

src/share/vm/memory/genCollectedHeap.cpp

Print this page
rev 7211 : [mq]: remove_ngen

*** 128,142 **** _rem_set = collector_policy()->create_rem_set(reserved_region(), n_covered_regions); set_barrier_set(rem_set()->bs()); _gch = this; ! for (i = 0; i < _n_gens; i++) { ! ReservedSpace this_rs = heap_rs.first_part(_gen_specs[i]->max_size(), false, false); ! _gens[i] = _gen_specs[i]->init(this_rs, i, rem_set()); ! heap_rs = heap_rs.last_part(_gen_specs[i]->max_size()); ! } clear_incremental_collection_failed(); #if INCLUDE_ALL_GCS // If we are running CMS, create the collector responsible // for collecting the CMS generations. --- 128,144 ---- _rem_set = collector_policy()->create_rem_set(reserved_region(), n_covered_regions); set_barrier_set(rem_set()->bs()); _gch = this; ! ReservedSpace young_rs = heap_rs.first_part(_gen_specs[0]->max_size(), false, false); ! _young_gen = _gen_specs[0]->init(young_rs, 0, rem_set()); ! heap_rs = heap_rs.last_part(_gen_specs[0]->max_size()); ! ! ReservedSpace old_rs = heap_rs.first_part(_gen_specs[1]->max_size(), false, false); ! _old_gen = _gen_specs[1]->init(old_rs, 1, rem_set()); ! heap_rs = heap_rs.last_part(_gen_specs[1]->max_size()); clear_incremental_collection_failed(); #if INCLUDE_ALL_GCS // If we are running CMS, create the collector responsible // for collecting the CMS generations.
*** 147,157 **** #endif // INCLUDE_ALL_GCS return JNI_OK; } - char* GenCollectedHeap::allocate(size_t alignment, size_t* _total_reserved, int* _n_covered_regions, ReservedSpace* heap_rs){ const char overflow_msg[] = "The size of the object heap + VM data exceeds " --- 149,158 ----
*** 185,195 **** *heap_rs = Universe::reserve_heap(total_reserved, alignment); return heap_rs->base(); } - void GenCollectedHeap::post_initialize() { SharedHeap::post_initialize(); GenCollectorPolicy *policy = (GenCollectorPolicy *)collector_policy(); guarantee(policy->is_generation_policy(), "Illegal policy type"); DefNewGeneration* def_new_gen = (DefNewGeneration*) get_gen(0); --- 186,195 ----
*** 208,252 **** policy->initialize_gc_policy_counters(); } void GenCollectedHeap::ref_processing_init() { SharedHeap::ref_processing_init(); ! for (int i = 0; i < _n_gens; i++) { ! _gens[i]->ref_processor_init(); ! } } size_t GenCollectedHeap::capacity() const { ! size_t res = 0; ! for (int i = 0; i < _n_gens; i++) { ! res += _gens[i]->capacity(); ! } ! return res; } size_t GenCollectedHeap::used() const { ! size_t res = 0; ! for (int i = 0; i < _n_gens; i++) { ! res += _gens[i]->used(); ! } ! return res; } // Save the "used_region" for generations level and lower. void GenCollectedHeap::save_used_regions(int level) { assert(level < _n_gens, "Illegal level parameter"); ! for (int i = level; i >= 0; i--) { ! _gens[i]->save_used_region(); } } size_t GenCollectedHeap::max_capacity() const { ! size_t res = 0; ! for (int i = 0; i < _n_gens; i++) { ! res += _gens[i]->max_capacity(); ! } ! return res; } // Update the _full_collections_completed counter // at the end of a stop-world full GC. unsigned int GenCollectedHeap::update_full_collections_completed() { --- 208,240 ---- policy->initialize_gc_policy_counters(); } void GenCollectedHeap::ref_processing_init() { SharedHeap::ref_processing_init(); ! _young_gen->ref_processor_init(); ! _old_gen->ref_processor_init(); } size_t GenCollectedHeap::capacity() const { ! return _young_gen->capacity() + _old_gen->capacity(); } size_t GenCollectedHeap::used() const { ! return _young_gen->used() + _old_gen->used(); } // Save the "used_region" for generations level and lower. void GenCollectedHeap::save_used_regions(int level) { assert(level < _n_gens, "Illegal level parameter"); ! if (level == 1) { ! _old_gen->save_used_region(); } + _young_gen->save_used_region(); } size_t GenCollectedHeap::max_capacity() const { ! return _young_gen->max_capacity() + _old_gen->max_capacity(); } // Update the _full_collections_completed counter // at the end of a stop-world full GC. unsigned int GenCollectedHeap::update_full_collections_completed() {
*** 306,325 **** #endif HeapWord* GenCollectedHeap::attempt_allocation(size_t size, bool is_tlab, bool first_only) { ! HeapWord* res; ! for (int i = 0; i < _n_gens; i++) { ! if (_gens[i]->should_allocate(size, is_tlab)) { ! res = _gens[i]->allocate(size, is_tlab); ! if (res != NULL) return res; ! else if (first_only) break; } } ! // Otherwise... ! return NULL; } HeapWord* GenCollectedHeap::mem_allocate(size_t size, bool* gc_overhead_limit_was_exceeded) { return collector_policy()->mem_allocate_work(size, --- 294,317 ---- #endif HeapWord* GenCollectedHeap::attempt_allocation(size_t size, bool is_tlab, bool first_only) { ! HeapWord* res = NULL; ! ! if (_young_gen->should_allocate(size, is_tlab)) { ! res = _young_gen->allocate(size, is_tlab); ! if (res != NULL || first_only) { ! return res; } } ! ! if (_old_gen->should_allocate(size, is_tlab)) { ! res = _old_gen->allocate(size, is_tlab); ! } ! ! return res; } HeapWord* GenCollectedHeap::mem_allocate(size_t size, bool* gc_overhead_limit_was_exceeded) { return collector_policy()->mem_allocate_work(size,
*** 335,460 **** return UseConcMarkSweepGC && ((cause == GCCause::_gc_locker && GCLockerInvokesConcurrent) || (cause == GCCause::_java_lang_system_gc && ExplicitGCInvokesConcurrent)); } ! void GenCollectedHeap::do_collection(bool full, ! bool clear_all_soft_refs, ! size_t size, ! bool is_tlab, ! int max_level) { ! bool prepared_for_verification = false; ! ResourceMark rm; ! DEBUG_ONLY(Thread* my_thread = Thread::current();) ! ! assert(SafepointSynchronize::is_at_safepoint(), "should be at safepoint"); ! assert(my_thread->is_VM_thread() || ! my_thread->is_ConcurrentGC_thread(), ! "incorrect thread type capability"); ! assert(Heap_lock->is_locked(), ! "the requesting thread should have the Heap_lock"); ! guarantee(!is_gc_active(), "collection is not reentrant"); ! assert(max_level < n_gens(), "sanity check"); ! ! if (GC_locker::check_active_before_gc()) { ! return; // GC is disabled (e.g. JNI GetXXXCritical operation) ! } ! ! const bool do_clear_all_soft_refs = clear_all_soft_refs || ! collector_policy()->should_clear_all_soft_refs(); ! ! ClearedAllSoftRefs casr(do_clear_all_soft_refs, collector_policy()); ! ! const size_t metadata_prev_used = MetaspaceAux::used_bytes(); ! ! print_heap_before_gc(); ! ! { ! FlagSetting fl(_is_gc_active, true); ! ! bool complete = full && (max_level == (n_gens()-1)); ! const char* gc_cause_prefix = complete ? "Full GC" : "GC"; ! gclog_or_tty->date_stamp(PrintGC && PrintGCDateStamps); ! TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty); ! // The PrintGCDetails logging starts before we have incremented the GC id. We will do that later ! // so we can assume here that the next GC id is what we want. ! GCTraceTime t(GCCauseString(gc_cause_prefix, gc_cause()), PrintGCDetails, false, NULL, GCId::peek()); ! ! gc_prologue(complete); ! increment_total_collections(complete); ! ! size_t gch_prev_used = used(); ! ! int starting_level = 0; ! if (full) { ! // Search for the oldest generation which will collect all younger ! // generations, and start collection loop there. ! for (int i = max_level; i >= 0; i--) { ! if (_gens[i]->full_collects_younger_generations()) { ! starting_level = i; ! break; ! } ! } ! } ! ! bool must_restore_marks_for_biased_locking = false; ! ! int max_level_collected = starting_level; ! for (int i = starting_level; i <= max_level; i++) { ! if (_gens[i]->should_collect(full, size, is_tlab)) { ! if (i == n_gens() - 1) { // a major collection is to happen ! if (!complete) { ! // The full_collections increment was missed above. ! increment_total_full_collections(); ! } ! pre_full_gc_dump(NULL); // do any pre full gc dumps ! } // Timer for individual generations. Last argument is false: no CR // FIXME: We should try to start the timing earlier to cover more of the GC pause // The PrintGCDetails logging starts before we have incremented the GC id. We will do that later // so we can assume here that the next GC id is what we want. ! GCTraceTime t1(_gens[i]->short_name(), PrintGCDetails, false, NULL, GCId::peek()); ! TraceCollectorStats tcs(_gens[i]->counters()); ! TraceMemoryManagerStats tmms(_gens[i]->kind(),gc_cause()); ! ! size_t prev_used = _gens[i]->used(); ! _gens[i]->stat_record()->invocations++; ! _gens[i]->stat_record()->accumulated_time.start(); // Must be done anew before each collection because // a previous collection will do mangling and will // change top of some spaces. record_gen_tops_before_GC(); if (PrintGC && Verbose) { gclog_or_tty->print("level=%d invoke=%d size=" SIZE_FORMAT, ! i, ! _gens[i]->stat_record()->invocations, ! size*HeapWordSize); } ! if (VerifyBeforeGC && i >= VerifyGCLevel && ! total_collections() >= VerifyGCStartAt) { HandleMark hm; // Discard invalid handles created during verification - if (!prepared_for_verification) { - prepare_for_verify(); - prepared_for_verification = true; - } Universe::verify(" VerifyBeforeGC:"); } COMPILER2_PRESENT(DerivedPointerTable::clear()); - if (!must_restore_marks_for_biased_locking && - _gens[i]->performs_in_place_marking()) { - // We perform this mark word preservation work lazily - // because it's only at this point that we know whether we - // absolutely have to do it; we want to avoid doing it for - // scavenge-only collections where it's unnecessary - must_restore_marks_for_biased_locking = true; - BiasedLocking::preserve_marks(); - } - // Do collection work { // Note on ref discovery: For what appear to be historical reasons, // GCH enables and disabled (by enqueing) refs discovery. // In the future this should be moved into the generation's --- 327,368 ---- return UseConcMarkSweepGC && ((cause == GCCause::_gc_locker && GCLockerInvokesConcurrent) || (cause == GCCause::_java_lang_system_gc && ExplicitGCInvokesConcurrent)); } ! void GenCollectedHeap::collect_generation(Generation* gen, bool full, size_t size, ! bool is_tlab, bool run_verification, bool clear_soft_refs) { // Timer for individual generations. Last argument is false: no CR // FIXME: We should try to start the timing earlier to cover more of the GC pause // The PrintGCDetails logging starts before we have incremented the GC id. We will do that later // so we can assume here that the next GC id is what we want. ! GCTraceTime t1(gen->short_name(), PrintGCDetails, false, NULL, GCId::peek()); ! TraceCollectorStats tcs(gen->counters()); ! TraceMemoryManagerStats tmms(gen->kind(),gc_cause()); ! ! size_t prev_used = gen->used(); ! gen->stat_record()->invocations++; ! gen->stat_record()->accumulated_time.start(); // Must be done anew before each collection because // a previous collection will do mangling and will // change top of some spaces. record_gen_tops_before_GC(); if (PrintGC && Verbose) { gclog_or_tty->print("level=%d invoke=%d size=" SIZE_FORMAT, ! gen->level(), ! gen->stat_record()->invocations, ! size * HeapWordSize); } ! if (run_verification && VerifyBeforeGC) { HandleMark hm; // Discard invalid handles created during verification Universe::verify(" VerifyBeforeGC:"); } COMPILER2_PRESENT(DerivedPointerTable::clear()); // Do collection work { // Note on ref discovery: For what appear to be historical reasons, // GCH enables and disabled (by enqueing) refs discovery. // In the future this should be moved into the generation's
*** 470,525 **** // We want to discover references, but not process them yet. // This mode is disabled in process_discovered_references if the // generation does some collection work, or in // enqueue_discovered_references if the generation returns // without doing any work. ! ReferenceProcessor* rp = _gens[i]->ref_processor(); // If the discovery of ("weak") refs in this generation is // atomic wrt other collectors in this configuration, we // are guaranteed to have empty discovered ref lists. if (rp->discovery_is_atomic()) { rp->enable_discovery(true /*verify_disabled*/, true /*verify_no_refs*/); ! rp->setup_policy(do_clear_all_soft_refs); } else { // collect() below will enable discovery as appropriate } ! _gens[i]->collect(full, do_clear_all_soft_refs, size, is_tlab); if (!rp->enqueuing_is_done()) { rp->enqueue_discovered_references(); } else { rp->set_enqueuing_is_done(false); } rp->verify_no_references_recorded(); } - max_level_collected = i; // Determine if allocation request was met. if (size > 0) { ! if (!is_tlab || _gens[i]->supports_tlab_allocation()) { ! if (size*HeapWordSize <= _gens[i]->unsafe_max_alloc_nogc()) { size = 0; } } } COMPILER2_PRESENT(DerivedPointerTable::update_pointers()); ! _gens[i]->stat_record()->accumulated_time.stop(); ! update_gc_stats(i, full); ! if (VerifyAfterGC && i >= VerifyGCLevel && ! total_collections() >= VerifyGCStartAt) { HandleMark hm; // Discard invalid handles created during verification Universe::verify(" VerifyAfterGC:"); } if (PrintGCDetails) { gclog_or_tty->print(":"); ! _gens[i]->print_heap_change(prev_used); } } } // Update "complete" boolean wrt what actually transpired -- // for instance, a promotion failure could have led to // a whole heap collection. --- 378,511 ---- // We want to discover references, but not process them yet. // This mode is disabled in process_discovered_references if the // generation does some collection work, or in // enqueue_discovered_references if the generation returns // without doing any work. ! ReferenceProcessor* rp = gen->ref_processor(); // If the discovery of ("weak") refs in this generation is // atomic wrt other collectors in this configuration, we // are guaranteed to have empty discovered ref lists. if (rp->discovery_is_atomic()) { rp->enable_discovery(true /*verify_disabled*/, true /*verify_no_refs*/); ! rp->setup_policy(clear_soft_refs); } else { // collect() below will enable discovery as appropriate } ! gen->collect(full, clear_soft_refs, size, is_tlab); if (!rp->enqueuing_is_done()) { rp->enqueue_discovered_references(); } else { rp->set_enqueuing_is_done(false); } rp->verify_no_references_recorded(); } // Determine if allocation request was met. if (size > 0) { ! if (!is_tlab || gen->supports_tlab_allocation()) { ! if (size * HeapWordSize <= gen->unsafe_max_alloc_nogc()) { size = 0; } } } COMPILER2_PRESENT(DerivedPointerTable::update_pointers()); ! gen->stat_record()->accumulated_time.stop(); ! update_gc_stats(gen->level(), full); ! if (run_verification && VerifyAfterGC) { HandleMark hm; // Discard invalid handles created during verification Universe::verify(" VerifyAfterGC:"); } if (PrintGCDetails) { gclog_or_tty->print(":"); ! gen->print_heap_change(prev_used); } + } + + void GenCollectedHeap::do_collection(bool full, + bool clear_all_soft_refs, + size_t size, + bool is_tlab, + int max_level) { + ResourceMark rm; + DEBUG_ONLY(Thread* my_thread = Thread::current();) + + assert(SafepointSynchronize::is_at_safepoint(), "should be at safepoint"); + assert(my_thread->is_VM_thread() || + my_thread->is_ConcurrentGC_thread(), + "incorrect thread type capability"); + assert(Heap_lock->is_locked(), + "the requesting thread should have the Heap_lock"); + guarantee(!is_gc_active(), "collection is not reentrant"); + assert(max_level < n_gens(), "sanity check"); + + if (GC_locker::check_active_before_gc()) { + return; // GC is disabled (e.g. JNI GetXXXCritical operation) } + + const bool do_clear_all_soft_refs = clear_all_soft_refs || + collector_policy()->should_clear_all_soft_refs(); + + ClearedAllSoftRefs casr(do_clear_all_soft_refs, collector_policy()); + + const size_t metadata_prev_used = MetaspaceAux::used_bytes(); + + print_heap_before_gc(); + + { + FlagSetting fl(_is_gc_active, true); + + bool complete = full && (max_level == (n_gens()-1)); + const char* gc_cause_prefix = complete ? "Full GC" : "GC"; + gclog_or_tty->date_stamp(PrintGC && PrintGCDateStamps); + TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty); + // The PrintGCDetails logging starts before we have incremented the GC id. We will do that later + // so we can assume here that the next GC id is what we want. + GCTraceTime t(GCCauseString(gc_cause_prefix, gc_cause()), PrintGCDetails, false, NULL, GCId::peek()); + + gc_prologue(complete); + increment_total_collections(complete); + + size_t gch_prev_used = used(); + bool must_restore_marks_for_biased_locking = false; + bool run_verification = total_collections() >= VerifyGCStartAt; + + if (_young_gen->performs_in_place_marking() || + _old_gen->performs_in_place_marking()) { + // We want to avoid doing this for + // scavenge-only collections where it's unnecessary. + must_restore_marks_for_biased_locking = true; + BiasedLocking::preserve_marks(); + } + + bool prepared_for_verification = false; + int max_level_collected = 0; + if (!(full && _old_gen->full_collects_younger_generations()) && + _young_gen->should_collect(full, size, is_tlab)) { + if (run_verification && VerifyGCLevel <= 0 && VerifyBeforeGC) { + prepare_for_verify(); + prepared_for_verification = true; + } + collect_generation(_young_gen, full, size, is_tlab, run_verification && VerifyGCLevel <= 0, do_clear_all_soft_refs); + } + if (max_level == 1 && _old_gen->should_collect(full, size, is_tlab)) { + if (!complete) { + // The full_collections increment was missed above. + increment_total_full_collections(); + } + pre_full_gc_dump(NULL); // do any pre full gc dumps + if (run_verification && VerifyGCLevel <= 1 && VerifyBeforeGC) { + if (!prepared_for_verification) { + prepare_for_verify(); + } + } + collect_generation(_old_gen, full, size, is_tlab, run_verification && VerifyGCLevel <= 1, do_clear_all_soft_refs); + max_level_collected = 1; } // Update "complete" boolean wrt what actually transpired -- // for instance, a promotion failure could have led to // a whole heap collection.
*** 537,550 **** if (complete) { MetaspaceAux::print_metaspace_change(metadata_prev_used); } } - for (int j = max_level_collected; j >= 0; j -= 1) { // Adjust generation sizes. ! _gens[j]->compute_new_size(); } if (complete) { // Delete metaspaces for unloaded class loaders and clean up loader_data graph ClassLoaderDataGraph::purge(); MetaspaceAux::verify_metrics(); --- 523,537 ---- if (complete) { MetaspaceAux::print_metaspace_change(metadata_prev_used); } } // Adjust generation sizes. ! if (max_level_collected == 1) { ! _old_gen->compute_new_size(); } + _young_gen->compute_new_size(); if (complete) { // Delete metaspaces for unloaded class loaders and clean up loader_data graph ClassLoaderDataGraph::purge(); MetaspaceAux::verify_metrics();
*** 597,618 **** cld_closure, weak_cld_closure, code_closure); if (younger_gens_as_roots) { if (!_gen_process_roots_tasks->is_task_claimed(GCH_PS_younger_gens)) { ! for (int i = 0; i < level; i++) { ! not_older_gens->set_generation(_gens[i]); ! _gens[i]->oop_iterate(not_older_gens); } not_older_gens->reset_generation(); } } // When collection is parallel, all threads get to cooperate to do // older-gen scanning. ! for (int i = level+1; i < _n_gens; i++) { ! older_gens->set_generation(_gens[i]); ! rem_set()->younger_refs_iterate(_gens[i], older_gens); older_gens->reset_generation(); } _gen_process_roots_tasks->all_tasks_completed(); } --- 584,605 ---- cld_closure, weak_cld_closure, code_closure); if (younger_gens_as_roots) { if (!_gen_process_roots_tasks->is_task_claimed(GCH_PS_younger_gens)) { ! if (level == 1) { ! not_older_gens->set_generation(_young_gen); ! _young_gen->oop_iterate(not_older_gens); } not_older_gens->reset_generation(); } } // When collection is parallel, all threads get to cooperate to do // older-gen scanning. ! if (level == 0) { ! older_gens->set_generation(_old_gen); ! rem_set()->younger_refs_iterate(_old_gen, older_gens); older_gens->reset_generation(); } _gen_process_roots_tasks->all_tasks_completed(); }
*** 649,695 **** } void GenCollectedHeap::gen_process_weak_roots(OopClosure* root_closure) { SharedHeap::process_weak_roots(root_closure); // "Local" "weak" refs ! for (int i = 0; i < _n_gens; i++) { ! _gens[i]->ref_processor()->weak_oops_do(root_closure); ! } } #define GCH_SINCE_SAVE_MARKS_ITERATE_DEFN(OopClosureType, nv_suffix) \ void GenCollectedHeap:: \ oop_since_save_marks_iterate(int level, \ OopClosureType* cur, \ OopClosureType* older) { \ ! _gens[level]->oop_since_save_marks_iterate##nv_suffix(cur); \ ! for (int i = level+1; i < n_gens(); i++) { \ ! _gens[i]->oop_since_save_marks_iterate##nv_suffix(older); \ } \ } ALL_SINCE_SAVE_MARKS_CLOSURES(GCH_SINCE_SAVE_MARKS_ITERATE_DEFN) #undef GCH_SINCE_SAVE_MARKS_ITERATE_DEFN bool GenCollectedHeap::no_allocs_since_save_marks(int level) { ! for (int i = level; i < _n_gens; i++) { ! if (!_gens[i]->no_allocs_since_save_marks()) return false; } return true; } bool GenCollectedHeap::supports_inline_contig_alloc() const { ! return _gens[0]->supports_inline_contig_alloc(); } HeapWord** GenCollectedHeap::top_addr() const { ! return _gens[0]->top_addr(); } HeapWord** GenCollectedHeap::end_addr() const { ! return _gens[0]->end_addr(); } // public collection interfaces void GenCollectedHeap::collect(GCCause::Cause cause) { --- 636,684 ---- } void GenCollectedHeap::gen_process_weak_roots(OopClosure* root_closure) { SharedHeap::process_weak_roots(root_closure); // "Local" "weak" refs ! _young_gen->ref_processor()->weak_oops_do(root_closure); ! _old_gen->ref_processor()->weak_oops_do(root_closure); } #define GCH_SINCE_SAVE_MARKS_ITERATE_DEFN(OopClosureType, nv_suffix) \ void GenCollectedHeap:: \ oop_since_save_marks_iterate(int level, \ OopClosureType* cur, \ OopClosureType* older) { \ ! if (level == 0) { \ ! _young_gen->oop_since_save_marks_iterate##nv_suffix(cur); \ ! _old_gen->oop_since_save_marks_iterate##nv_suffix(older); \ ! } else { \ ! _old_gen->oop_since_save_marks_iterate##nv_suffix(cur); \ } \ } ALL_SINCE_SAVE_MARKS_CLOSURES(GCH_SINCE_SAVE_MARKS_ITERATE_DEFN) #undef GCH_SINCE_SAVE_MARKS_ITERATE_DEFN bool GenCollectedHeap::no_allocs_since_save_marks(int level) { ! if (level == 0) { ! if (!_young_gen->no_allocs_since_save_marks()) return false; } + if (!_old_gen->no_allocs_since_save_marks()) return false; return true; } bool GenCollectedHeap::supports_inline_contig_alloc() const { ! return _young_gen->supports_inline_contig_alloc(); } HeapWord** GenCollectedHeap::top_addr() const { ! return _young_gen->top_addr(); } HeapWord** GenCollectedHeap::end_addr() const { ! return _young_gen->end_addr(); } // public collection interfaces void GenCollectedHeap::collect(GCCause::Cause cause) {
*** 748,763 **** } #if INCLUDE_ALL_GCS bool GenCollectedHeap::create_cms_collector() { ! assert(_gens[1]->kind() == Generation::ConcurrentMarkSweep, "Unexpected generation kinds"); // Skip two header words in the block content verification NOT_PRODUCT(_skip_header_HeapWords = CMSCollector::skip_header_HeapWords();) CMSCollector* collector = new CMSCollector( ! (ConcurrentMarkSweepGeneration*)_gens[1], _rem_set->as_CardTableRS(), (ConcurrentMarkSweepPolicy*) collector_policy()); if (collector == NULL || !collector->completed_initialization()) { if (collector) { --- 737,752 ---- } #if INCLUDE_ALL_GCS bool GenCollectedHeap::create_cms_collector() { ! assert(_old_gen->kind() == Generation::ConcurrentMarkSweep, "Unexpected generation kinds"); // Skip two header words in the block content verification NOT_PRODUCT(_skip_header_HeapWords = CMSCollector::skip_header_HeapWords();) CMSCollector* collector = new CMSCollector( ! (ConcurrentMarkSweepGeneration*)_old_gen, _rem_set->as_CardTableRS(), (ConcurrentMarkSweepPolicy*) collector_policy()); if (collector == NULL || !collector->completed_initialization()) { if (collector) {
*** 820,831 **** n_gens() - 1 /* max_level */); } } bool GenCollectedHeap::is_in_young(oop p) { ! bool result = ((HeapWord*)p) < _gens[_n_gens - 1]->reserved().start(); ! assert(result == _gens[0]->is_in_reserved(p), err_msg("incorrect test - result=%d, p=" INTPTR_FORMAT, result, p2i((void*)p))); return result; } // Returns "TRUE" iff "p" points into the committed areas of the heap. --- 809,820 ---- n_gens() - 1 /* max_level */); } } bool GenCollectedHeap::is_in_young(oop p) { ! bool result = ((HeapWord*)p) < _old_gen->reserved().start(); ! assert(result == _young_gen->is_in_reserved(p), err_msg("incorrect test - result=%d, p=" INTPTR_FORMAT, result, p2i((void*)p))); return result; } // Returns "TRUE" iff "p" points into the committed areas of the heap.
*** 841,852 **** VMError::fatal_error_in_progress(), "too expensive"); #endif // This might be sped up with a cache of the last generation that // answered yes. ! for (int i = 0; i < _n_gens; i++) { ! if (_gens[i]->is_in(p)) return true; } // Otherwise... return false; } --- 830,841 ---- VMError::fatal_error_in_progress(), "too expensive"); #endif // This might be sped up with a cache of the last generation that // answered yes. ! if (_young_gen->is_in(p) || _old_gen->is_in(p)) { ! return true; } // Otherwise... return false; }
*** 854,971 **** // Don't implement this by using is_in_young(). This method is used // in some cases to check that is_in_young() is correct. bool GenCollectedHeap::is_in_partial_collection(const void* p) { assert(is_in_reserved(p) || p == NULL, "Does not work if address is non-null and outside of the heap"); ! return p < _gens[_n_gens - 2]->reserved().end() && p != NULL; } #endif void GenCollectedHeap::oop_iterate(ExtendedOopClosure* cl) { ! for (int i = 0; i < _n_gens; i++) { ! _gens[i]->oop_iterate(cl); ! } } void GenCollectedHeap::object_iterate(ObjectClosure* cl) { ! for (int i = 0; i < _n_gens; i++) { ! _gens[i]->object_iterate(cl); ! } } void GenCollectedHeap::safe_object_iterate(ObjectClosure* cl) { ! for (int i = 0; i < _n_gens; i++) { ! _gens[i]->safe_object_iterate(cl); ! } } Space* GenCollectedHeap::space_containing(const void* addr) const { ! for (int i = 0; i < _n_gens; i++) { ! Space* res = _gens[i]->space_containing(addr); ! if (res != NULL) return res; } ! // Otherwise... ! assert(false, "Could not find containing space"); ! return NULL; } - HeapWord* GenCollectedHeap::block_start(const void* addr) const { assert(is_in_reserved(addr), "block_start of address outside of heap"); ! for (int i = 0; i < _n_gens; i++) { ! if (_gens[i]->is_in_reserved(addr)) { ! assert(_gens[i]->is_in(addr), ! "addr should be in allocated part of generation"); ! return _gens[i]->block_start(addr); ! } } ! assert(false, "Some generation should contain the address"); ! return NULL; } size_t GenCollectedHeap::block_size(const HeapWord* addr) const { assert(is_in_reserved(addr), "block_size of address outside of heap"); ! for (int i = 0; i < _n_gens; i++) { ! if (_gens[i]->is_in_reserved(addr)) { ! assert(_gens[i]->is_in(addr), ! "addr should be in allocated part of generation"); ! return _gens[i]->block_size(addr); } ! } ! assert(false, "Some generation should contain the address"); ! return 0; } bool GenCollectedHeap::block_is_obj(const HeapWord* addr) const { assert(is_in_reserved(addr), "block_is_obj of address outside of heap"); assert(block_start(addr) == addr, "addr must be a block start"); ! for (int i = 0; i < _n_gens; i++) { ! if (_gens[i]->is_in_reserved(addr)) { ! return _gens[i]->block_is_obj(addr); ! } } ! assert(false, "Some generation should contain the address"); ! return false; } bool GenCollectedHeap::supports_tlab_allocation() const { ! for (int i = 0; i < _n_gens; i += 1) { ! if (_gens[i]->supports_tlab_allocation()) { ! return true; ! } ! } ! return false; } size_t GenCollectedHeap::tlab_capacity(Thread* thr) const { ! size_t result = 0; ! for (int i = 0; i < _n_gens; i += 1) { ! if (_gens[i]->supports_tlab_allocation()) { ! result += _gens[i]->tlab_capacity(); ! } } ! return result; } size_t GenCollectedHeap::tlab_used(Thread* thr) const { ! size_t result = 0; ! for (int i = 0; i < _n_gens; i += 1) { ! if (_gens[i]->supports_tlab_allocation()) { ! result += _gens[i]->tlab_used(); ! } } ! return result; } size_t GenCollectedHeap::unsafe_max_tlab_alloc(Thread* thr) const { ! size_t result = 0; ! for (int i = 0; i < _n_gens; i += 1) { ! if (_gens[i]->supports_tlab_allocation()) { ! result += _gens[i]->unsafe_max_tlab_alloc(); ! } } ! return result; } HeapWord* GenCollectedHeap::allocate_new_tlab(size_t size) { bool gc_overhead_limit_was_exceeded; return collector_policy()->mem_allocate_work(size /* size */, --- 843,943 ---- // Don't implement this by using is_in_young(). This method is used // in some cases to check that is_in_young() is correct. bool GenCollectedHeap::is_in_partial_collection(const void* p) { assert(is_in_reserved(p) || p == NULL, "Does not work if address is non-null and outside of the heap"); ! return p < _young_gen->reserved().end() && p != NULL; } #endif void GenCollectedHeap::oop_iterate(ExtendedOopClosure* cl) { ! _young_gen->oop_iterate(cl); ! _old_gen->oop_iterate(cl); } void GenCollectedHeap::object_iterate(ObjectClosure* cl) { ! _young_gen->object_iterate(cl); ! _old_gen->object_iterate(cl); } void GenCollectedHeap::safe_object_iterate(ObjectClosure* cl) { ! _young_gen->safe_object_iterate(cl); ! _old_gen->safe_object_iterate(cl); } Space* GenCollectedHeap::space_containing(const void* addr) const { ! Space* res = _young_gen->space_containing(addr); ! if (res != NULL) { ! return res; } ! res = _old_gen->space_containing(addr); ! assert(res != NULL, "Could not find containing space"); ! return res; } HeapWord* GenCollectedHeap::block_start(const void* addr) const { assert(is_in_reserved(addr), "block_start of address outside of heap"); ! if (_young_gen->is_in_reserved(addr)) { ! assert(_young_gen->is_in(addr), "addr should be in allocated part of generation"); ! return _young_gen->block_start(addr); } ! ! assert(_old_gen->is_in_reserved(addr), "Some generation should contain the address"); ! assert(_old_gen->is_in(addr), "addr should be in allocated part of generation"); ! return _old_gen->block_start(addr); } size_t GenCollectedHeap::block_size(const HeapWord* addr) const { assert(is_in_reserved(addr), "block_size of address outside of heap"); ! if (_young_gen->is_in_reserved(addr)) { ! assert(_young_gen->is_in(addr), "addr should be in allocated part of generation"); ! return _young_gen->block_size(addr); } ! ! assert(_old_gen->is_in_reserved(addr), "Some generation should contain the address"); ! assert(_old_gen->is_in(addr), "addr should be in allocated part of generation"); ! return _old_gen->block_size(addr); } bool GenCollectedHeap::block_is_obj(const HeapWord* addr) const { assert(is_in_reserved(addr), "block_is_obj of address outside of heap"); assert(block_start(addr) == addr, "addr must be a block start"); ! if (_young_gen->is_in_reserved(addr)) { ! return _young_gen->block_is_obj(addr); } ! ! assert(_old_gen->is_in_reserved(addr), "Some generation should contain the address"); ! return _old_gen->block_is_obj(addr); } bool GenCollectedHeap::supports_tlab_allocation() const { ! assert(!_old_gen->supports_tlab_allocation(), "Old gen supports TLAB allocation?!"); ! return _young_gen->supports_tlab_allocation(); } size_t GenCollectedHeap::tlab_capacity(Thread* thr) const { ! assert(!_old_gen->supports_tlab_allocation(), "Old gen supports TLAB allocation?!"); ! if (_young_gen->supports_tlab_allocation()) { ! return _young_gen->tlab_capacity(); } ! return 0; } size_t GenCollectedHeap::tlab_used(Thread* thr) const { ! assert(!_old_gen->supports_tlab_allocation(), "Old gen supports TLAB allocation?!"); ! if (_young_gen->supports_tlab_allocation()) { ! return _young_gen->tlab_used(); } ! return 0; } size_t GenCollectedHeap::unsafe_max_tlab_alloc(Thread* thr) const { ! assert(!_old_gen->supports_tlab_allocation(), "Old gen supports TLAB allocation?!"); ! if (_young_gen->supports_tlab_allocation()) { ! return _young_gen->unsafe_max_tlab_alloc(); } ! return 0; } HeapWord* GenCollectedHeap::allocate_new_tlab(size_t size) { bool gc_overhead_limit_was_exceeded; return collector_policy()->mem_allocate_work(size /* size */,
*** 1010,1030 **** } ScratchBlock* GenCollectedHeap::gather_scratch(Generation* requestor, size_t max_alloc_words) { ScratchBlock* res = NULL; ! for (int i = 0; i < _n_gens; i++) { ! _gens[i]->contribute_scratch(res, requestor, max_alloc_words); ! } sort_scratch_list(res); return res; } void GenCollectedHeap::release_scratch() { ! for (int i = 0; i < _n_gens; i++) { ! _gens[i]->reset_scratch(); ! } } class GenPrepareForVerifyClosure: public GenCollectedHeap::GenClosure { void do_generation(Generation* gen) { gen->prepare_for_verify(); --- 982,1000 ---- } ScratchBlock* GenCollectedHeap::gather_scratch(Generation* requestor, size_t max_alloc_words) { ScratchBlock* res = NULL; ! _young_gen->contribute_scratch(res, requestor, max_alloc_words); ! _old_gen->contribute_scratch(res, requestor, max_alloc_words); sort_scratch_list(res); return res; } void GenCollectedHeap::release_scratch() { ! _young_gen->reset_scratch(); ! _old_gen->reset_scratch(); } class GenPrepareForVerifyClosure: public GenCollectedHeap::GenClosure { void do_generation(Generation* gen) { gen->prepare_for_verify();
*** 1035,1077 **** ensure_parsability(false); // no need to retire TLABs GenPrepareForVerifyClosure blk; generation_iterate(&blk, false); } - void GenCollectedHeap::generation_iterate(GenClosure* cl, bool old_to_young) { if (old_to_young) { ! for (int i = _n_gens-1; i >= 0; i--) { ! cl->do_generation(_gens[i]); ! } } else { ! for (int i = 0; i < _n_gens; i++) { ! cl->do_generation(_gens[i]); ! } } } void GenCollectedHeap::space_iterate(SpaceClosure* cl) { ! for (int i = 0; i < _n_gens; i++) { ! _gens[i]->space_iterate(cl, true); ! } } bool GenCollectedHeap::is_maximal_no_gc() const { ! for (int i = 0; i < _n_gens; i++) { ! if (!_gens[i]->is_maximal_no_gc()) { ! return false; ! } ! } ! return true; } void GenCollectedHeap::save_marks() { ! for (int i = 0; i < _n_gens; i++) { ! _gens[i]->save_marks(); ! } } GenCollectedHeap* GenCollectedHeap::heap() { assert(_gch != NULL, "Uninitialized access to GenCollectedHeap::heap()"); assert(_gch->kind() == CollectedHeap::GenCollectedHeap, "not a generational heap"); --- 1005,1037 ---- ensure_parsability(false); // no need to retire TLABs GenPrepareForVerifyClosure blk; generation_iterate(&blk, false); } void GenCollectedHeap::generation_iterate(GenClosure* cl, bool old_to_young) { if (old_to_young) { ! cl->do_generation(_old_gen); ! cl->do_generation(_young_gen); } else { ! cl->do_generation(_young_gen); ! cl->do_generation(_old_gen); } } void GenCollectedHeap::space_iterate(SpaceClosure* cl) { ! _young_gen->space_iterate(cl, true); ! _old_gen->space_iterate(cl, true); } bool GenCollectedHeap::is_maximal_no_gc() const { ! return _young_gen->is_maximal_no_gc() && _old_gen->is_maximal_no_gc(); } void GenCollectedHeap::save_marks() { ! _young_gen->save_marks(); ! _old_gen->save_marks(); } GenCollectedHeap* GenCollectedHeap::heap() { assert(_gch != NULL, "Uninitialized access to GenCollectedHeap::heap()"); assert(_gch->kind() == CollectedHeap::GenCollectedHeap, "not a generational heap");
*** 1079,1119 **** } void GenCollectedHeap::prepare_for_compaction() { guarantee(_n_gens = 2, "Wrong number of generations"); ! Generation* old_gen = _gens[1]; // Start by compacting into same gen. CompactPoint cp(old_gen); old_gen->prepare_for_compaction(&cp); ! Generation* young_gen = _gens[0]; young_gen->prepare_for_compaction(&cp); } GCStats* GenCollectedHeap::gc_stats(int level) const { ! return _gens[level]->gc_stats(); } void GenCollectedHeap::verify(bool silent, VerifyOption option /* ignored */) { - for (int i = _n_gens-1; i >= 0; i--) { - Generation* g = _gens[i]; if (!silent) { ! gclog_or_tty->print("%s", g->name()); gclog_or_tty->print(" "); } ! g->verify(); } if (!silent) { gclog_or_tty->print("remset "); } rem_set()->verify(); } void GenCollectedHeap::print_on(outputStream* st) const { ! for (int i = 0; i < _n_gens; i++) { ! _gens[i]->print_on(st); ! } MetaspaceAux::print_on(st); } void GenCollectedHeap::gc_threads_do(ThreadClosure* tc) const { if (workers() != NULL) { --- 1039,1086 ---- } void GenCollectedHeap::prepare_for_compaction() { guarantee(_n_gens = 2, "Wrong number of generations"); ! Generation* old_gen = _old_gen; // Start by compacting into same gen. CompactPoint cp(old_gen); old_gen->prepare_for_compaction(&cp); ! Generation* young_gen = _young_gen; young_gen->prepare_for_compaction(&cp); } GCStats* GenCollectedHeap::gc_stats(int level) const { ! if (level == 0) { ! return _young_gen->gc_stats(); ! } else { ! return _old_gen->gc_stats(); ! } } void GenCollectedHeap::verify(bool silent, VerifyOption option /* ignored */) { if (!silent) { ! gclog_or_tty->print("%s", _old_gen->name()); gclog_or_tty->print(" "); } ! _old_gen->verify(); ! ! if (!silent) { ! gclog_or_tty->print("%s", _young_gen->name()); ! gclog_or_tty->print(" "); } + _young_gen->verify(); + if (!silent) { gclog_or_tty->print("remset "); } rem_set()->verify(); } void GenCollectedHeap::print_on(outputStream* st) const { ! _young_gen->print_on(st); ! _old_gen->print_on(st); MetaspaceAux::print_on(st); } void GenCollectedHeap::gc_threads_do(ThreadClosure* tc) const { if (workers() != NULL) {
src/share/vm/memory/genCollectedHeap.cpp
Index Unified diffs Context diffs Sdiffs Patch New Old Previous File Next File