--- old/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp 2013-10-25 15:28:52.000000000 +0200 +++ new/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp 2013-10-25 15:28:52.000000000 +0200 @@ -2186,10 +2186,6 @@ return JNI_OK; } -size_t G1CollectedHeap::conservative_max_heap_alignment() { - return HeapRegion::max_region_size(); -} - void G1CollectedHeap::ref_processing_init() { // Reference processing in G1 currently works as follows: // --- old/src/share/vm/gc_implementation/g1/g1CollectedHeap.hpp 2013-10-25 15:28:53.000000000 +0200 +++ new/src/share/vm/gc_implementation/g1/g1CollectedHeap.hpp 2013-10-25 15:28:53.000000000 +0200 @@ -1092,7 +1092,9 @@ jint initialize(); // Return the (conservative) maximum heap alignment for any G1 heap - static size_t conservative_max_heap_alignment(); + static size_t conservative_max_heap_alignment() { + return HeapRegion::max_region_size(); + } // Initialize weak reference processing. virtual void ref_processing_init(); --- old/src/share/vm/gc_implementation/g1/g1CollectorPolicy.cpp 2013-10-25 15:28:54.000000000 +0200 +++ new/src/share/vm/gc_implementation/g1/g1CollectorPolicy.cpp 2013-10-25 15:28:54.000000000 +0200 @@ -320,7 +320,7 @@ void G1CollectorPolicy::initialize_flags() { _min_alignment = HeapRegion::GrainBytes; - size_t card_table_alignment = GenRemSet::max_alignment_constraint(rem_set_name()); + size_t card_table_alignment = GenRemSet::max_alignment_constraint(GenRemSet::CardTable); size_t page_size = UseLargePages ? os::large_page_size() : os::vm_page_size(); _max_alignment = MAX3(card_table_alignment, _min_alignment, page_size); if (SurvivorRatio < 1) { --- old/src/share/vm/gc_implementation/g1/g1CollectorPolicy.hpp 2013-10-25 15:28:54.000000000 +0200 +++ new/src/share/vm/gc_implementation/g1/g1CollectorPolicy.hpp 2013-10-25 15:28:54.000000000 +0200 @@ -217,7 +217,6 @@ return _during_marking; } -private: enum PredictionConstants { TruncatedSeqLength = 10 }; @@ -665,8 +664,6 @@ BarrierSet::Name barrier_set_name() { return BarrierSet::G1SATBCTLogging; } - GenRemSet::Name rem_set_name() { return GenRemSet::CardTable; } - bool need_to_start_conc_mark(const char* source, size_t alloc_word_size = 0); // Record the start and end of an evacuation pause. --- old/src/share/vm/gc_implementation/g1/g1RemSet.cpp 2013-10-25 15:28:55.000000000 +0200 +++ new/src/share/vm/gc_implementation/g1/g1RemSet.cpp 2013-10-25 15:28:55.000000000 +0200 @@ -377,11 +377,6 @@ DirtyCardQueueSet& dcqs = JavaThread::dirty_card_queue_set(); dcqs.concatenate_logs(); - if (G1CollectedHeap::use_parallel_gc_threads()) { - // Don't set the number of workers here. It will be set - // when the task is run - // _seq_task->set_n_termination((int)n_workers()); - } guarantee( _cards_scanned == NULL, "invariant" ); _cards_scanned = NEW_C_HEAP_ARRAY(size_t, n_workers(), mtGC); for (uint i = 0; i < n_workers(); ++i) { --- old/src/share/vm/gc_implementation/parallelScavenge/asPSOldGen.cpp 2013-10-25 15:28:55.000000000 +0200 +++ new/src/share/vm/gc_implementation/parallelScavenge/asPSOldGen.cpp 2013-10-25 15:28:55.000000000 +0200 @@ -54,7 +54,6 @@ int level) : PSOldGen(initial_size, min_size, size_limit, gen_name, level), _gen_size_limit(size_limit) - {} ASPSOldGen::ASPSOldGen(PSVirtualSpace* vs, @@ -65,13 +64,11 @@ int level) : PSOldGen(initial_size, min_size, size_limit, gen_name, level), _gen_size_limit(size_limit) - { _virtual_space = vs; } void ASPSOldGen::initialize_work(const char* perf_data_name, int level) { - PSOldGen::initialize_work(perf_data_name, level); // The old gen can grow to gen_size_limit(). _reserve reflects only --- old/src/share/vm/gc_implementation/parallelScavenge/asPSYoungGen.cpp 2013-10-25 15:28:56.000000000 +0200 +++ new/src/share/vm/gc_implementation/parallelScavenge/asPSYoungGen.cpp 2013-10-25 15:28:56.000000000 +0200 @@ -70,7 +70,6 @@ } size_t ASPSYoungGen::available_for_expansion() { - size_t current_committed_size = virtual_space()->committed_size(); assert((gen_size_limit() >= current_committed_size), "generation size limit is wrong"); @@ -85,7 +84,6 @@ // Future implementations could check the survivors and if to_space is in the // right place (below from_space), take a chunk from to_space. size_t ASPSYoungGen::available_for_contraction() { - size_t uncommitted_bytes = virtual_space()->uncommitted_size(); if (uncommitted_bytes != 0) { return uncommitted_bytes; @@ -121,7 +119,6 @@ gclog_or_tty->print_cr(" gen_avail %d K", gen_avail/K); } return result_aligned; - } return 0; --- old/src/share/vm/gc_implementation/parallelScavenge/parallelScavengeHeap.hpp 2013-10-25 15:28:57.000000000 +0200 +++ new/src/share/vm/gc_implementation/parallelScavenge/parallelScavengeHeap.hpp 2013-10-25 15:28:56.000000000 +0200 @@ -35,7 +35,6 @@ #include "utilities/ostream.hpp" class AdjoiningGenerations; -class CollectorPolicy; class GCHeapSummary; class GCTaskManager; class GenerationSizer; @@ -50,8 +49,8 @@ static PSOldGen* _old_gen; // Sizing policy for entire heap - static PSAdaptiveSizePolicy* _size_policy; - static PSGCAdaptivePolicyCounters* _gc_policy_counters; + static PSAdaptiveSizePolicy* _size_policy; + static PSGCAdaptivePolicyCounters* _gc_policy_counters; static ParallelScavengeHeap* _psh; @@ -67,7 +66,8 @@ AdjoiningGenerations* _gens; unsigned int _death_march_count; - static GCTaskManager* _gc_task_manager; // The task manager. + // The task manager + static GCTaskManager* _gc_task_manager; void trace_heap(GCWhen::Type when, GCTracer* tracer); @@ -80,15 +80,14 @@ HeapWord* mem_allocate_old_gen(size_t size); public: - ParallelScavengeHeap() : CollectedHeap() { - _death_march_count = 0; + ParallelScavengeHeap() : CollectedHeap(), _death_march_count(0) { set_alignment(_young_gen_alignment, intra_heap_alignment()); set_alignment(_old_gen_alignment, intra_heap_alignment()); } // Return the (conservative) maximum heap alignment static size_t conservative_max_heap_alignment() { - return intra_heap_alignment(); + return GenCollectorPolicy::intra_heap_alignment(); } // For use by VM operations @@ -103,8 +102,8 @@ virtual CollectorPolicy* collector_policy() const { return (CollectorPolicy*) _collector_policy; } - static PSYoungGen* young_gen() { return _young_gen; } - static PSOldGen* old_gen() { return _old_gen; } + static PSYoungGen* young_gen() { return _young_gen; } + static PSOldGen* old_gen() { return _old_gen; } virtual PSAdaptiveSizePolicy* size_policy() { return _size_policy; } @@ -127,7 +126,7 @@ // The alignment used for eden and survivors within the young gen // and for boundary between young gen and old gen. - static size_t intra_heap_alignment() { return 64 * K * HeapWordSize; } + size_t intra_heap_alignment() { return GenCollectorPolicy::intra_heap_alignment(); } size_t capacity() const; size_t used() const; @@ -157,16 +156,15 @@ virtual bool is_in_partial_collection(const void *p); #endif - bool is_in_young(oop p); // reserved part - bool is_in_old(oop p); // reserved part + bool is_in_young(oop p); // reserved part + bool is_in_old(oop p); // reserved part // Memory allocation. "gc_time_limit_was_exceeded" will // be set to true if the adaptive size policy determine that // an excessive amount of time is being spent doing collections // and caused a NULL to be returned. If a NULL is not returned, // "gc_time_limit_was_exceeded" has an undefined meaning. - HeapWord* mem_allocate(size_t size, - bool* gc_overhead_limit_was_exceeded); + HeapWord* mem_allocate(size_t size, bool* gc_overhead_limit_was_exceeded); // Allocation attempt(s) during a safepoint. It should never be called // to allocate a new TLAB as this allocation might be satisfied out @@ -257,7 +255,7 @@ // Call these in sequential code around the processing of strong roots. class ParStrongRootsScope : public MarkingCodeBlobClosure::MarkScope { - public: + public: ParStrongRootsScope(); ~ParStrongRootsScope(); }; --- old/src/share/vm/gc_implementation/parallelScavenge/psAdaptiveSizePolicy.cpp 2013-10-25 15:28:57.000000000 +0200 +++ new/src/share/vm/gc_implementation/parallelScavenge/psAdaptiveSizePolicy.cpp 2013-10-25 15:28:57.000000000 +0200 @@ -46,8 +46,7 @@ init_survivor_size, gc_pause_goal_sec, gc_cost_ratio), - _collection_cost_margin_fraction(AdaptiveSizePolicyCollectionCostMargin/ - 100.0), + _collection_cost_margin_fraction(AdaptiveSizePolicyCollectionCostMargin / 100.0), _intra_generation_alignment(intra_generation_alignment), _live_at_last_full_gc(init_promo_size), _gc_minor_pause_goal_sec(gc_minor_pause_goal_sec), --- old/src/share/vm/memory/collectorPolicy.cpp 2013-10-25 15:28:58.000000000 +0200 +++ new/src/share/vm/memory/collectorPolicy.cpp 2013-10-25 15:28:58.000000000 +0200 @@ -105,7 +105,6 @@ GenRemSet* CollectorPolicy::create_rem_set(MemRegion whole_heap, int max_covered_regions) { - assert(rem_set_name() == GenRemSet::CardTable, "unrecognized GenRemSet::Name"); return new CardTableRS(whole_heap, max_covered_regions); } @@ -147,11 +146,7 @@ // GenCollectorPolicy methods. size_t GenCollectorPolicy::scale_by_NewRatio_aligned(size_t base_size) { - size_t x = base_size / (NewRatio+1); - size_t new_gen_size = x > _min_alignment ? - align_size_down(x, _min_alignment) : - _min_alignment; - return new_gen_size; + return align_size_down_bounded(base_size / (NewRatio + 1), _min_alignment); } size_t GenCollectorPolicy::bound_minus_alignment(size_t desired_size, @@ -165,7 +160,7 @@ void GenCollectorPolicy::initialize_size_policy(size_t init_eden_size, size_t init_promo_size, size_t init_survivor_size) { - const double max_gc_pause_sec = ((double) MaxGCPauseMillis)/1000.0; + const double max_gc_pause_sec = ((double) MaxGCPauseMillis) / 1000.0; _size_policy = new AdaptiveSizePolicy(init_eden_size, init_promo_size, init_survivor_size, @@ -197,6 +192,7 @@ // make sure there room for eden and two survivor spaces vm_exit_during_initialization("Too small new size specified"); } + if (SurvivorRatio < 1 || NewRatio < 1) { vm_exit_during_initialization("Invalid young gen ratio specified"); } @@ -411,15 +407,11 @@ if ((heap_size < (*gen0_size_ptr + min_gen1_size)) && (heap_size >= min_gen1_size + _min_alignment)) { // Adjust gen0 down to accommodate min_gen1_size - *gen0_size_ptr = heap_size - min_gen1_size; - *gen0_size_ptr = - MAX2((uintx)align_size_down(*gen0_size_ptr, _min_alignment), _min_alignment); + *gen0_size_ptr = align_size_down_bounded(heap_size - min_gen1_size, _min_alignment); assert(*gen0_size_ptr > 0, "Min gen0 is too large"); result = true; } else { - *gen1_size_ptr = heap_size - *gen0_size_ptr; - *gen1_size_ptr = - MAX2((uintx)align_size_down(*gen1_size_ptr, _min_alignment), _min_alignment); + *gen1_size_ptr = align_size_down_bounded(heap_size - *gen0_size_ptr, _min_alignment); } } return result; @@ -474,7 +466,7 @@ "generation sizes: using minimum heap = " SIZE_FORMAT, _min_heap_byte_size); } - if ((OldSize > _max_gen1_size)) { + if (OldSize > _max_gen1_size) { warning("Inconsistency between maximum heap size and maximum " "generation sizes: using maximum heap = " SIZE_FORMAT " -XX:OldSize flag is being ignored", @@ -605,9 +597,7 @@ gc_count_before = Universe::heap()->total_collections(); } - VM_GenCollectForAllocation op(size, - is_tlab, - gc_count_before); + VM_GenCollectForAllocation op(size, is_tlab, gc_count_before); VMThread::execute(&op); if (op.prologue_succeeded()) { result = op.result(); @@ -842,8 +832,9 @@ void MarkSweepPolicy::initialize_generations() { _generations = NEW_C_HEAP_ARRAY3(GenerationSpecPtr, number_of_generations(), mtGC, 0, AllocFailStrategy::RETURN_NULL); - if (_generations == NULL) + if (_generations == NULL) { vm_exit_during_initialization("Unable to allocate gen spec"); + } if (UseParNewGC) { _generations[0] = new GenerationSpec(Generation::ParNew, _initial_gen0_size, _max_gen0_size); @@ -852,8 +843,9 @@ } _generations[1] = new GenerationSpec(Generation::MarkSweepCompact, _initial_gen1_size, _max_gen1_size); - if (_generations[0] == NULL || _generations[1] == NULL) + if (_generations[0] == NULL || _generations[1] == NULL) { vm_exit_during_initialization("Unable to allocate gen spec"); + } } void MarkSweepPolicy::initialize_gc_policy_counters() { --- old/src/share/vm/memory/collectorPolicy.hpp 2013-10-25 15:28:58.000000000 +0200 +++ new/src/share/vm/memory/collectorPolicy.hpp 2013-10-25 15:28:58.000000000 +0200 @@ -79,6 +79,7 @@ // Set to true when policy wants soft refs cleared. // Reset to false by gc after it clears all soft refs. bool _should_clear_all_soft_refs; + // Set to true by the GC if the just-completed gc cleared all // softrefs. This is set to true whenever a gc clears all softrefs, and // set to false each time gc returns to the mutator. For example, in the @@ -101,8 +102,8 @@ // Return maximum heap alignment that may be imposed by the policy static size_t compute_max_alignment(); - size_t min_alignment() { return _min_alignment; } - size_t max_alignment() { return _max_alignment; } + size_t min_alignment() { return _min_alignment; } + size_t max_alignment() { return _max_alignment; } size_t initial_heap_byte_size() { return _initial_heap_byte_size; } size_t max_heap_byte_size() { return _max_heap_byte_size; } @@ -151,7 +152,6 @@ virtual BarrierSet::Name barrier_set_name() = 0; - virtual GenRemSet::Name rem_set_name() = 0; // Create the remembered set (to cover the given reserved region, // allowing breaking up into at most "max_covered_regions"). @@ -249,7 +249,7 @@ virtual int number_of_generations() = 0; - virtual GenerationSpec **generations() { + virtual GenerationSpec **generations() { assert(_generations != NULL, "Sanity check"); return _generations; } @@ -274,6 +274,12 @@ virtual void initialize_size_policy(size_t init_eden_size, size_t init_promo_size, size_t init_survivor_size); + + // The alignment used for eden and survivors within the young gen + // and for boundary between young gen and old gen. + static size_t intra_heap_alignment() { + return 64 * K * HeapWordSize; + } }; // All of hotspot's current collectors are subtypes of this @@ -301,9 +307,8 @@ // Inherited methods TwoGenerationCollectorPolicy* as_two_generation_policy() { return this; } - int number_of_generations() { return 2; } - BarrierSet::Name barrier_set_name() { return BarrierSet::CardTableModRef; } - GenRemSet::Name rem_set_name() { return GenRemSet::CardTable; } + int number_of_generations() { return 2; } + BarrierSet::Name barrier_set_name() { return BarrierSet::CardTableModRef; } virtual CollectorPolicy::Name kind() { return CollectorPolicy::TwoGenerationCollectorPolicyKind; --- old/src/share/vm/memory/genCollectedHeap.cpp 2013-10-25 15:28:59.000000000 +0200 +++ new/src/share/vm/memory/genCollectedHeap.cpp 2013-10-25 15:28:59.000000000 +0200 @@ -1053,12 +1053,6 @@ } } -void GenCollectedHeap::compute_new_generation_sizes(int collectedGen) { - for (int i = 0; i <= collectedGen; i++) { - _gens[i]->compute_new_size(); - } -} - GenCollectedHeap* GenCollectedHeap::heap() { assert(_gch != NULL, "Uninitialized access to GenCollectedHeap::heap()"); assert(_gch->kind() == CollectedHeap::GenCollectedHeap, "not a generational heap"); --- old/src/share/vm/memory/genCollectedHeap.hpp 2013-10-25 15:28:59.000000000 +0200 +++ new/src/share/vm/memory/genCollectedHeap.hpp 2013-10-25 15:28:59.000000000 +0200 @@ -86,10 +86,6 @@ NOT_PRODUCT(static size_t _skip_header_HeapWords;) protected: - // Directs each generation up to and including "collectedGen" to recompute - // its desired size. - void compute_new_generation_sizes(int collectedGen); - // Helper functions for allocation HeapWord* attempt_allocation(size_t size, bool is_tlab, --- old/src/share/vm/memory/metaspace.cpp 2013-10-25 15:29:00.000000000 +0200 +++ new/src/share/vm/memory/metaspace.cpp 2013-10-25 15:29:00.000000000 +0200 @@ -2965,11 +2965,6 @@ #endif -// Align down. If the aligning result in 0, return 'alignment'. -static size_t restricted_align_down(size_t size, size_t alignment) { - return MAX2(alignment, align_size_down_(size, alignment)); -} - void Metaspace::ergo_initialize() { if (DumpSharedSpaces) { // Using large pages when dumping the shared archive is currently not implemented. @@ -2992,13 +2987,13 @@ // Ideally, we would be able to set the default value of MaxMetaspaceSize in // globals.hpp to the aligned value, but this is not possible, since the // alignment depends on other flags being parsed. - MaxMetaspaceSize = restricted_align_down(MaxMetaspaceSize, _reserve_alignment); + MaxMetaspaceSize = align_size_down_bounded(MaxMetaspaceSize, _reserve_alignment); if (MetaspaceSize > MaxMetaspaceSize) { MetaspaceSize = MaxMetaspaceSize; } - MetaspaceSize = restricted_align_down(MetaspaceSize, _commit_alignment); + MetaspaceSize = align_size_down_bounded(MetaspaceSize, _commit_alignment); assert(MetaspaceSize <= MaxMetaspaceSize, "MetaspaceSize should be limited by MaxMetaspaceSize"); @@ -3006,10 +3001,10 @@ vm_exit_during_initialization("Too small initial Metaspace size"); } - MinMetaspaceExpansion = restricted_align_down(MinMetaspaceExpansion, _commit_alignment); - MaxMetaspaceExpansion = restricted_align_down(MaxMetaspaceExpansion, _commit_alignment); + MinMetaspaceExpansion = align_size_down_bounded(MinMetaspaceExpansion, _commit_alignment); + MaxMetaspaceExpansion = align_size_down_bounded(MaxMetaspaceExpansion, _commit_alignment); - CompressedClassSpaceSize = restricted_align_down(CompressedClassSpaceSize, _reserve_alignment); + CompressedClassSpaceSize = align_size_down_bounded(CompressedClassSpaceSize, _reserve_alignment); set_class_metaspace_size(CompressedClassSpaceSize); } --- old/src/share/vm/memory/universe.cpp 2013-10-25 15:29:00.000000000 +0200 +++ new/src/share/vm/memory/universe.cpp 2013-10-25 15:29:00.000000000 +0200 @@ -1021,7 +1021,7 @@ Universe::_virtual_machine_error_instance = InstanceKlass::cast(k)->allocate_instance(CHECK_false); - Universe::_vm_exception = InstanceKlass::cast(k)->allocate_instance(CHECK_false); + Universe::_vm_exception = InstanceKlass::cast(k)->allocate_instance(CHECK_false); if (!DumpSharedSpaces) { // These are the only Java fields that are currently set during shared space dumping. --- old/src/share/vm/runtime/arguments.cpp 2013-10-25 15:29:01.000000000 +0200 +++ new/src/share/vm/runtime/arguments.cpp 2013-10-25 15:29:01.000000000 +0200 @@ -1408,7 +1408,7 @@ // NULL page is located before the heap, we pad the NULL page to the conservative // maximum alignment that the GC may ever impose upon the heap. size_t displacement_due_to_null_page = align_size_up_(os::vm_page_size(), - Arguments::conservative_max_heap_alignment()); + _conservative_max_heap_alignment); LP64_ONLY(return OopEncodingHeapMax - displacement_due_to_null_page); NOT_LP64(ShouldNotReachHere(); return 0); @@ -2678,9 +2678,10 @@ describe_range_error(errcode); return JNI_EINVAL; } - FLAG_SET_CMDLINE(uintx, InitialHeapSize, (uintx)long_initial_heap_size); + set_min_heap_size((uintx)long_initial_heap_size); // Currently the minimum size and the initial heap sizes are the same. - set_min_heap_size(InitialHeapSize); + // Can be overridden with -XX:InitialHeapSize. + FLAG_SET_CMDLINE(uintx, InitialHeapSize, (uintx)long_initial_heap_size); // -Xmx } else if (match_option(option, "-Xmx", &tail) || match_option(option, "-XX:MaxHeapSize=", &tail)) { julong long_max_heap_size = 0; --- old/src/share/vm/utilities/globalDefinitions.hpp 2013-10-25 15:29:02.000000000 +0200 +++ new/src/share/vm/utilities/globalDefinitions.hpp 2013-10-25 15:29:02.000000000 +0200 @@ -458,6 +458,13 @@ return (void*) align_size_up_((uintptr_t)addr, size); } +// Align down with a lower bound. If the aligning results in 0, return 'alignment'. + +inline size_t align_size_down_bounded(size_t size, size_t alignment) { + size_t aligned_size = align_size_down_(size, alignment); + return aligned_size > 0 ? aligned_size : alignment; +} + // Clamp an address to be within a specific page // 1. If addr is on the page it is returned as is // 2. If addr is above the page_address the start of the *next* page will be returned