src/share/vm/gc_implementation/parNew/parNewGeneration.cpp

Print this page
rev 4773 : 8005849: JEP 167: Event-Based JVM Tracing
Reviewed-by: acorn, coleenp, sla
Contributed-by: Karen Kinnear <karen.kinnear@oracle.com>, Bengt Rutisson <bengt.rutisson@oracle.com>, Calvin Cheung <calvin.cheung@oracle.com>, Erik Gahlin <erik.gahlin@oracle.com>, Erik Helin <erik.helin@oracle.com>, Jesper Wilhelmsson <jesper.wilhelmsson@oracle.com>, Keith McGuigan <keith.mcguigan@oracle.com>, Mattias Tobiasson <mattias.tobiasson@oracle.com>, Markus Gronlund <markus.gronlund@oracle.com>, Mikael Auno <mikael.auno@oracle.com>, Nils Eliasson <nils.eliasson@oracle.com>, Nils Loodin <nils.loodin@oracle.com>, Rickard Backman <rickard.backman@oracle.com>, Staffan Larsen <staffan.larsen@oracle.com>, Stefan Karlsson <stefan.karlsson@oracle.com>, Yekaterina Kantserova <yekaterina.kantserova@oracle.com>

*** 1,7 **** /* ! * Copyright (c) 2001, 2012, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License version 2 only, as * published by the Free Software Foundation. --- 1,7 ---- /* ! * Copyright (c) 2001, 2013, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License version 2 only, as * published by the Free Software Foundation.
*** 27,36 **** --- 27,41 ---- #include "gc_implementation/parNew/parNewGeneration.hpp" #include "gc_implementation/parNew/parOopClosures.inline.hpp" #include "gc_implementation/shared/adaptiveSizePolicy.hpp" #include "gc_implementation/shared/ageTable.hpp" #include "gc_implementation/shared/parGCAllocBuffer.hpp" + #include "gc_implementation/shared/gcHeapSummary.hpp" + #include "gc_implementation/shared/gcTimer.hpp" + #include "gc_implementation/shared/gcTrace.hpp" + #include "gc_implementation/shared/gcTraceTime.hpp" + #include "gc_implementation/shared/copyFailedInfo.hpp" #include "gc_implementation/shared/spaceDecorator.hpp" #include "memory/defNewGeneration.inline.hpp" #include "memory/genCollectedHeap.hpp" #include "memory/genOopClosures.inline.hpp" #include "memory/generation.hpp"
*** 73,83 **** _evacuate_followers(this, &_to_space_closure, &_old_gen_closure, &_to_space_root_closure, gen_, &_old_gen_root_closure, work_queue_set_, &term_), _is_alive_closure(gen_), _scan_weak_ref_closure(gen_, this), _keep_alive_closure(&_scan_weak_ref_closure), - _promotion_failure_size(0), _strong_roots_time(0.0), _term_time(0.0) { #if TASKQUEUE_STATS _term_attempts = 0; _overflow_refills = 0; --- 78,87 ----
*** 277,293 **** } else { CollectedHeap::fill_with_object(obj, word_sz); } } ! void ParScanThreadState::print_and_clear_promotion_failure_size() { ! if (_promotion_failure_size != 0) { ! if (PrintPromotionFailure) { gclog_or_tty->print(" (%d: promotion failure size = " SIZE_FORMAT ") ", ! _thread_num, _promotion_failure_size); ! } ! _promotion_failure_size = 0; } } class ParScanThreadStateSet: private ResourceArray { public: --- 281,294 ---- } else { CollectedHeap::fill_with_object(obj, word_sz); } } ! void ParScanThreadState::print_promotion_failure_size() { ! if (_promotion_failed_info.has_failed() && PrintPromotionFailure) { gclog_or_tty->print(" (%d: promotion failure size = " SIZE_FORMAT ") ", ! _thread_num, _promotion_failed_info.first_size()); } } class ParScanThreadStateSet: private ResourceArray { public:
*** 303,312 **** --- 304,314 ---- ~ParScanThreadStateSet() { TASKQUEUE_STATS_ONLY(reset_stats()); } inline ParScanThreadState& thread_state(int i); + void trace_promotion_failed(YoungGCTracer& gc_tracer); void reset(int active_workers, bool promotion_failed); void flush(); #if TASKQUEUE_STATS static void
*** 351,367 **** { assert(i >= 0 && i < length(), "sanity check!"); return ((ParScanThreadState*)_data)[i]; } void ParScanThreadStateSet::reset(int active_threads, bool promotion_failed) { _term.reset_for_reuse(active_threads); if (promotion_failed) { for (int i = 0; i < length(); ++i) { ! thread_state(i).print_and_clear_promotion_failure_size(); } } } #if TASKQUEUE_STATS --- 353,377 ---- { assert(i >= 0 && i < length(), "sanity check!"); return ((ParScanThreadState*)_data)[i]; } + void ParScanThreadStateSet::trace_promotion_failed(YoungGCTracer& gc_tracer) { + for (int i = 0; i < length(); ++i) { + if (thread_state(i).promotion_failed()) { + gc_tracer.report_promotion_failed(thread_state(i).promotion_failed_info()); + thread_state(i).promotion_failed_info().reset(); + } + } + } void ParScanThreadStateSet::reset(int active_threads, bool promotion_failed) { _term.reset_for_reuse(active_threads); if (promotion_failed) { for (int i = 0; i < length(); ++i) { ! thread_state(i).print_promotion_failure_size(); } } } #if TASKQUEUE_STATS
*** 581,598 **** // grab it instead. GenCollectedHeap* gch = GenCollectedHeap::heap(); gch->set_n_termination(active_workers); } - // The "i" passed to this method is the part of the work for - // this thread. It is not the worker ID. The "i" is derived - // from _started_workers which is incremented in internal_note_start() - // called in GangWorker loop() and which is called under the - // which is called under the protection of the gang monitor and is - // called after a task is started. So "i" is based on - // first-come-first-served. - void ParNewGenTask::work(uint worker_id) { GenCollectedHeap* gch = GenCollectedHeap::heap(); // Since this is being done in a separate thread, need new resource // and handle marks. ResourceMark rm; --- 591,600 ----
*** 874,893 **** _scan_older); } while (!_gch->no_allocs_since_save_marks(_level)); } bool ParNewGeneration::_avoid_promotion_undo = false; ! // A Generation that does parallel young-gen collection. void ParNewGeneration::collect(bool full, bool clear_all_soft_refs, size_t size, bool is_tlab) { assert(full || size > 0, "otherwise we don't want to collect"); GenCollectedHeap* gch = GenCollectedHeap::heap(); assert(gch->kind() == CollectedHeap::GenCollectedHeap, "not a CMS generational heap"); AdaptiveSizePolicy* size_policy = gch->gen_policy()->size_policy(); FlexibleWorkGang* workers = gch->workers(); assert(workers != NULL, "Need workgang for parallel work"); --- 876,924 ---- _scan_older); } while (!_gch->no_allocs_since_save_marks(_level)); } + // A Generation that does parallel young-gen collection. + bool ParNewGeneration::_avoid_promotion_undo = false; ! void ParNewGeneration::handle_promotion_failed(GenCollectedHeap* gch, ParScanThreadStateSet& thread_state_set, ParNewTracer& gc_tracer) { ! assert(_promo_failure_scan_stack.is_empty(), "post condition"); ! _promo_failure_scan_stack.clear(true); // Clear cached segments. ! ! remove_forwarding_pointers(); ! if (PrintGCDetails) { ! gclog_or_tty->print(" (promotion failed)"); ! } ! // All the spaces are in play for mark-sweep. ! swap_spaces(); // Make life simpler for CMS || rescan; see 6483690. ! from()->set_next_compaction_space(to()); ! gch->set_incremental_collection_failed(); ! // Inform the next generation that a promotion failure occurred. ! _next_gen->promotion_failure_occurred(); ! ! // Trace promotion failure in the parallel GC threads ! thread_state_set.trace_promotion_failed(gc_tracer); ! // Single threaded code may have reported promotion failure to the global state ! if (_promotion_failed_info.has_failed()) { ! gc_tracer.report_promotion_failed(_promotion_failed_info); ! } ! // Reset the PromotionFailureALot counters. ! NOT_PRODUCT(Universe::heap()->reset_promotion_should_fail();) ! } void ParNewGeneration::collect(bool full, bool clear_all_soft_refs, size_t size, bool is_tlab) { assert(full || size > 0, "otherwise we don't want to collect"); + GenCollectedHeap* gch = GenCollectedHeap::heap(); + + _gc_timer->register_gc_start(os::elapsed_counter()); + assert(gch->kind() == CollectedHeap::GenCollectedHeap, "not a CMS generational heap"); AdaptiveSizePolicy* size_policy = gch->gen_policy()->size_policy(); FlexibleWorkGang* workers = gch->workers(); assert(workers != NULL, "Need workgang for parallel work");
*** 904,930 **** // Do we have to avoid promotion_undo? if (gch->collector_policy()->is_concurrent_mark_sweep_policy()) { set_avoid_promotion_undo(true); } ! // If the next generation is too full to accomodate worst-case promotion // from this generation, pass on collection; let the next generation // do it. if (!collection_attempt_is_safe()) { gch->set_incremental_collection_failed(); // slight lie, in that we did not even attempt one return; } assert(to()->is_empty(), "Else not collection_attempt_is_safe"); init_assuming_no_promotion_failure(); if (UseAdaptiveSizePolicy) { set_survivor_overflow(false); size_policy->minor_collection_begin(); } ! TraceTime t1(GCCauseString("GC", gch->gc_cause()), PrintGC && !PrintGCDetails, true, gclog_or_tty); // Capture heap used before collection (for printing). size_t gch_prev_used = gch->used(); SpecializationStats::clear(); --- 935,965 ---- // Do we have to avoid promotion_undo? if (gch->collector_policy()->is_concurrent_mark_sweep_policy()) { set_avoid_promotion_undo(true); } ! // If the next generation is too full to accommodate worst-case promotion // from this generation, pass on collection; let the next generation // do it. if (!collection_attempt_is_safe()) { gch->set_incremental_collection_failed(); // slight lie, in that we did not even attempt one return; } assert(to()->is_empty(), "Else not collection_attempt_is_safe"); + ParNewTracer gc_tracer; + gc_tracer.report_gc_start(gch->gc_cause(), _gc_timer->gc_start()); + gch->trace_heap_before_gc(&gc_tracer); + init_assuming_no_promotion_failure(); if (UseAdaptiveSizePolicy) { set_survivor_overflow(false); size_policy->minor_collection_begin(); } ! GCTraceTime t1(GCCauseString("GC", gch->gc_cause()), PrintGC && !PrintGCDetails, true, NULL); // Capture heap used before collection (for printing). size_t gch_prev_used = gch->used(); SpecializationStats::clear();
*** 973,993 **** EvacuateFollowersClosureGeneral evacuate_followers(gch, _level, &scan_without_gc_barrier, &scan_with_gc_barrier); rp->setup_policy(clear_all_soft_refs); // Can the mt_degree be set later (at run_task() time would be best)? rp->set_active_mt_degree(active_workers); if (rp->processing_is_mt()) { ParNewRefProcTaskExecutor task_executor(*this, thread_state_set); ! rp->process_discovered_references(&is_alive, &keep_alive, ! &evacuate_followers, &task_executor); } else { thread_state_set.flush(); gch->set_par_threads(0); // 0 ==> non-parallel. gch->save_marks(); ! rp->process_discovered_references(&is_alive, &keep_alive, ! &evacuate_followers, NULL); } if (!promotion_failed()) { // Swap the survivor spaces. eden()->clear(SpaceDecorator::Mangle); from()->clear(SpaceDecorator::Mangle); if (ZapUnusedHeapArea) { --- 1008,1032 ---- EvacuateFollowersClosureGeneral evacuate_followers(gch, _level, &scan_without_gc_barrier, &scan_with_gc_barrier); rp->setup_policy(clear_all_soft_refs); // Can the mt_degree be set later (at run_task() time would be best)? rp->set_active_mt_degree(active_workers); + ReferenceProcessorStats stats; if (rp->processing_is_mt()) { ParNewRefProcTaskExecutor task_executor(*this, thread_state_set); ! stats = rp->process_discovered_references(&is_alive, &keep_alive, ! &evacuate_followers, &task_executor, ! _gc_timer); } else { thread_state_set.flush(); gch->set_par_threads(0); // 0 ==> non-parallel. gch->save_marks(); ! stats = rp->process_discovered_references(&is_alive, &keep_alive, ! &evacuate_followers, NULL, ! _gc_timer); } + gc_tracer.report_gc_reference_stats(stats); if (!promotion_failed()) { // Swap the survivor spaces. eden()->clear(SpaceDecorator::Mangle); from()->clear(SpaceDecorator::Mangle); if (ZapUnusedHeapArea) {
*** 1008,1033 **** assert(to()->is_empty(), "to space should be empty now"); adjust_desired_tenuring_threshold(); } else { ! assert(_promo_failure_scan_stack.is_empty(), "post condition"); ! _promo_failure_scan_stack.clear(true); // Clear cached segments. ! ! remove_forwarding_pointers(); ! if (PrintGCDetails) { ! gclog_or_tty->print(" (promotion failed)"); ! } ! // All the spaces are in play for mark-sweep. ! swap_spaces(); // Make life simpler for CMS || rescan; see 6483690. ! from()->set_next_compaction_space(to()); ! gch->set_incremental_collection_failed(); ! // Inform the next generation that a promotion failure occurred. ! _next_gen->promotion_failure_occurred(); ! ! // Reset the PromotionFailureALot counters. ! NOT_PRODUCT(Universe::heap()->reset_promotion_should_fail();) } // set new iteration safe limit for the survivor spaces from()->set_concurrent_iteration_safe_limit(from()->top()); to()->set_concurrent_iteration_safe_limit(to()->top()); --- 1047,1057 ---- assert(to()->is_empty(), "to space should be empty now"); adjust_desired_tenuring_threshold(); } else { ! handle_promotion_failed(gch, thread_state_set, gc_tracer); } // set new iteration safe limit for the survivor spaces from()->set_concurrent_iteration_safe_limit(from()->top()); to()->set_concurrent_iteration_safe_limit(to()->top());
*** 1063,1072 **** --- 1087,1103 ---- rp->enqueue_discovered_references(&task_executor); } else { rp->enqueue_discovered_references(NULL); } rp->verify_no_references_recorded(); + + gch->trace_heap_after_gc(&gc_tracer); + gc_tracer.report_tenuring_threshold(tenuring_threshold()); + + _gc_timer->register_gc_end(os::elapsed_counter()); + + gc_tracer.report_gc_end(_gc_timer->gc_end(), _gc_timer->time_partitions()); } static int sum; void ParNewGeneration::waste_some_time() { for (int i = 0; i < 100; i++) {
*** 1172,1183 **** // promotion failed, forward to self _promotion_failed = true; new_obj = old; preserve_mark_if_necessary(old, m); ! // Log the size of the maiden promotion failure ! par_scan_state->log_promotion_failure(sz); } old->forward_to(new_obj); forward_ptr = NULL; } else { --- 1203,1213 ---- // promotion failed, forward to self _promotion_failed = true; new_obj = old; preserve_mark_if_necessary(old, m); ! par_scan_state->register_promotion_failure(sz); } old->forward_to(new_obj); forward_ptr = NULL; } else {
*** 1298,1309 **** _promotion_failed = true; failed_to_promote = true; preserve_mark_if_necessary(old, m); ! // Log the size of the maiden promotion failure ! par_scan_state->log_promotion_failure(sz); } } else { // Is in to-space; do copying ourselves. Copy::aligned_disjoint_words((HeapWord*)old, (HeapWord*)new_obj, sz); // Restore the mark word copied above. --- 1328,1338 ---- _promotion_failed = true; failed_to_promote = true; preserve_mark_if_necessary(old, m); ! par_scan_state->register_promotion_failure(sz); } } else { // Is in to-space; do copying ourselves. Copy::aligned_disjoint_words((HeapWord*)old, (HeapWord*)new_obj, sz); // Restore the mark word copied above.
*** 1597,1608 **** #endif return true; } #undef BUSY ! void ParNewGeneration::ref_processor_init() ! { if (_ref_processor == NULL) { // Allocate and initialize a reference processor _ref_processor = new ReferenceProcessor(_reserved, // span ParallelRefProcEnabled && (ParallelGCThreads > 1), // mt processing --- 1626,1636 ---- #endif return true; } #undef BUSY ! void ParNewGeneration::ref_processor_init() { if (_ref_processor == NULL) { // Allocate and initialize a reference processor _ref_processor = new ReferenceProcessor(_reserved, // span ParallelRefProcEnabled && (ParallelGCThreads > 1), // mt processing