--- old/src/share/vm/memory/genCollectedHeap.cpp 2015-01-28 18:28:52.000000000 +0100 +++ new/src/share/vm/memory/genCollectedHeap.cpp 2015-01-28 18:28:52.000000000 +0100 @@ -320,7 +320,8 @@ } void GenCollectedHeap::collect_generation(Generation* gen, bool full, size_t size, - bool is_tlab, bool run_verification, bool clear_soft_refs) { + bool is_tlab, bool run_verification, bool clear_soft_refs, + bool restore_marks_for_biased_locking) { // Timer for individual generations. Last argument is false: no CR // FIXME: We should try to start the timing earlier to cover more of the GC pause // The PrintGCDetails logging starts before we have incremented the GC id. We will do that later @@ -351,6 +352,14 @@ } COMPILER2_PRESENT(DerivedPointerTable::clear()); + if (restore_marks_for_biased_locking) { + // We perform this mark word preservation work lazily + // because it's only at this point that we know whether we + // absolutely have to do it; we want to avoid doing it for + // scavenge-only collections where it's unnecessary + BiasedLocking::preserve_marks(); + } + // Do collection work { // Note on ref discovery: For what appear to be historical reasons, @@ -389,15 +398,6 @@ rp->verify_no_references_recorded(); } - // Determine if allocation request was met. - if (size > 0) { - if (!is_tlab || gen->supports_tlab_allocation()) { - if (size * HeapWordSize <= gen->unsafe_max_alloc_nogc()) { - size = 0; - } - } - } - COMPILER2_PRESENT(DerivedPointerTable::update_pointers()); gen->stat_record()->accumulated_time.stop(); @@ -462,36 +462,52 @@ bool must_restore_marks_for_biased_locking = false; bool run_verification = total_collections() >= VerifyGCStartAt; - if (_young_gen->performs_in_place_marking() || - _old_gen->performs_in_place_marking()) { - // We want to avoid doing this for - // scavenge-only collections where it's unnecessary. - must_restore_marks_for_biased_locking = true; - BiasedLocking::preserve_marks(); - } - bool prepared_for_verification = false; int max_level_collected = 0; - if (!(full && _old_gen->full_collects_younger_generations()) && + if (!(max_level == 1 && full && _old_gen->full_collects_younger_generations()) && _young_gen->should_collect(full, size, is_tlab)) { if (run_verification && VerifyGCLevel <= 0 && VerifyBeforeGC) { prepare_for_verify(); prepared_for_verification = true; } - collect_generation(_young_gen, full, size, is_tlab, run_verification && VerifyGCLevel <= 0, do_clear_all_soft_refs); + if (_young_gen->performs_in_place_marking()) { + must_restore_marks_for_biased_locking = true; + } + collect_generation(_young_gen, + full, + size, + is_tlab, + run_verification && VerifyGCLevel <= 0, + do_clear_all_soft_refs, + must_restore_marks_for_biased_locking); + + if (size > 0 && (!is_tlab || _young_gen->supports_tlab_allocation()) && + size * HeapWordSize <= _young_gen->unsafe_max_alloc_nogc()) { + // Allocation request was met by young GC. + size = 0; + } } + if (max_level == 1 && _old_gen->should_collect(full, size, is_tlab)) { if (!complete) { // The full_collections increment was missed above. increment_total_full_collections(); } pre_full_gc_dump(NULL); // do any pre full gc dumps - if (run_verification && VerifyGCLevel <= 1 && VerifyBeforeGC) { - if (!prepared_for_verification) { - prepare_for_verify(); - } + if (!prepared_for_verification && run_verification && + VerifyGCLevel <= 1 && VerifyBeforeGC) { + prepare_for_verify(); } - collect_generation(_old_gen, full, size, is_tlab, run_verification && VerifyGCLevel <= 1, do_clear_all_soft_refs); + assert(_old_gen->performs_in_place_marking(), "All old generations do in place marking"); + collect_generation(_old_gen, + full, + size, + is_tlab, + run_verification && VerifyGCLevel <= 1, + do_clear_all_soft_refs, + !must_restore_marks_for_biased_locking); + + must_restore_marks_for_biased_locking = true; max_level_collected = 1; } @@ -649,11 +665,10 @@ #undef GCH_SINCE_SAVE_MARKS_ITERATE_DEFN bool GenCollectedHeap::no_allocs_since_save_marks(int level) { - if (level == 0) { - if (!_young_gen->no_allocs_since_save_marks()) return false; + if (level == 0 && !_young_gen->no_allocs_since_save_marks()) { + return false; } - if (!_old_gen->no_allocs_since_save_marks()) return false; - return true; + return _old_gen->no_allocs_since_save_marks(); } bool GenCollectedHeap::supports_inline_contig_alloc() const { --- old/src/share/vm/memory/genCollectedHeap.hpp 2015-01-28 18:28:53.000000000 +0100 +++ new/src/share/vm/memory/genCollectedHeap.hpp 2015-01-28 18:28:53.000000000 +0100 @@ -88,8 +88,10 @@ SubTasksDone* _gen_process_roots_tasks; SubTasksDone* gen_process_roots_tasks() { return _gen_process_roots_tasks; } + // Collects the given generation. void collect_generation(Generation* gen, bool full, size_t size, bool is_tlab, - bool run_verification, bool clear_soft_refs); + bool run_verification, bool clear_soft_refs, + bool restore_marks_for_biased_locking); // In block contents verification, the number of header words to skip NOT_PRODUCT(static size_t _skip_header_HeapWords;) --- old/src/share/vm/runtime/vmStructs.cpp 2015-01-28 18:28:54.000000000 +0100 +++ new/src/share/vm/runtime/vmStructs.cpp 2015-01-28 18:28:54.000000000 +0100 @@ -550,6 +550,8 @@ nonstatic_field(GenerationSpec, _max_size, size_t) \ \ static_field(GenCollectedHeap, _gch, GenCollectedHeap*) \ + nonstatic_field(GenCollectedHeap, _young_gen, Generation*) \ + nonstatic_field(GenCollectedHeap, _old_gen, Generation*) \ nonstatic_field(GenCollectedHeap, _n_gens, int) \ nonstatic_field(GenCollectedHeap, _gen_specs, GenerationSpec**) \ \ --- old/test/gc/metaspace/CompressedClassSpaceSizeInJmapHeap.java 2015-01-28 18:28:55.000000000 +0100 +++ new/test/gc/metaspace/CompressedClassSpaceSizeInJmapHeap.java 2015-01-28 18:28:55.000000000 +0100 @@ -36,6 +36,7 @@ import java.util.List; public class CompressedClassSpaceSizeInJmapHeap { + // Note that on some platforms it may require root privileges to run this test. public static void main(String[] args) throws Exception { if (!Platform.is64bit()) { // Compressed Class Space is only available on 64-bit JVMs