--- old/agent/src/share/classes/sun/jvm/hotspot/memory/GenCollectedHeap.java 2014-10-17 16:28:29.000000000 +0200 +++ new/agent/src/share/classes/sun/jvm/hotspot/memory/GenCollectedHeap.java 2014-10-17 16:28:29.000000000 +0200 @@ -34,9 +34,11 @@ import sun.jvm.hotspot.utilities.*; public class GenCollectedHeap extends SharedHeap { - private static CIntegerField nGensField; - private static long gensOffset; - private static AddressField genSpecsField; + private static AddressField youngGenField; + private static AddressField oldGenField; + + private static AddressField youngGenSpecField; + private static AddressField oldGenSpecField; private static GenerationFactory genFactory; @@ -51,11 +53,14 @@ private static synchronized void initialize(TypeDataBase db) { Type type = db.lookupType("GenCollectedHeap"); - nGensField = type.getCIntegerField("_n_gens"); - gensOffset = type.getField("_gens").getOffset(); - genSpecsField = type.getAddressField("_gen_specs"); + youngGenField = type.getAddressField("_young_gen"); + oldGenField = type.getAddressField("_old_gen"); genFactory = new GenerationFactory(); + + Type colPolType = db.lookupType("GenCollectorPolicy"); + youngGenSpecField = colPolType.getAddressField("_young_gen_spec"); + oldGenSpecField = colPolType.getAddressField("_old_gen_spec"); } public GenCollectedHeap(Address addr) { @@ -63,7 +68,7 @@ } public int nGens() { - return (int) nGensField.getValue(addr); + return 2; } public Generation getGen(int i) { @@ -72,14 +77,15 @@ " out of range (should be between 0 and " + nGens() + ")"); } - if ((i < 0) || (i >= nGens())) { + switch (i) { + case 0: + return genFactory.newObject(youngGenField.getAddress()); + case 1: + return genFactory.newObject(oldGenField.getAddress()); + default: + // no generation for i, and assertions disabled. return null; } - - Address genAddr = addr.getAddressAt(gensOffset + - (i * VM.getVM().getAddressSize())); - return genFactory.newObject(addr.getAddressAt(gensOffset + - (i * VM.getVM().getAddressSize()))); } public boolean isIn(Address a) { @@ -120,13 +126,15 @@ return null; } - Address ptrList = genSpecsField.getValue(addr); - if (ptrList == null) { - return null; + if (level == 0) { + return (GenerationSpec) + VMObjectFactory.newObject(GenerationSpec.class, + youngGenSpecField.getAddress()); + } else { + return (GenerationSpec) + VMObjectFactory.newObject(GenerationSpec.class, + oldGenSpecField.getAddress()); } - return (GenerationSpec) - VMObjectFactory.newObject(GenerationSpec.class, - ptrList.getAddressAt(level * VM.getVM().getAddressSize())); } public CollectedHeapName kind() { --- old/agent/src/share/classes/sun/jvm/hotspot/memory/Generation.java 2014-10-17 16:28:30.000000000 +0200 +++ new/agent/src/share/classes/sun/jvm/hotspot/memory/Generation.java 2014-10-17 16:28:30.000000000 +0200 @@ -51,7 +51,7 @@ public abstract class Generation extends VMObject { private static long reservedFieldOffset; private static long virtualSpaceFieldOffset; - private static CIntegerField levelField; + private static int levelField; protected static final int K = 1024; // Fields for class StatRecord private static Field statRecordField; @@ -77,7 +77,7 @@ reservedFieldOffset = type.getField("_reserved").getOffset(); virtualSpaceFieldOffset = type.getField("_virtual_space").getOffset(); - levelField = type.getCIntegerField("_level"); + levelField = 0; // StatRecord statRecordField = type.getField("_stat_record"); type = db.lookupType("Generation::StatRecord"); @@ -137,7 +137,7 @@ } public int level() { - return (int) levelField.getValue(addr); + return levelField; } public int invocations() { --- old/src/share/vm/gc_implementation/concurrentMarkSweep/cmsCollectorPolicy.cpp 2014-10-17 16:28:31.000000000 +0200 +++ new/src/share/vm/gc_implementation/concurrentMarkSweep/cmsCollectorPolicy.cpp 2014-10-17 16:28:31.000000000 +0200 @@ -52,19 +52,14 @@ } void ConcurrentMarkSweepPolicy::initialize_generations() { - _generations = NEW_C_HEAP_ARRAY3(GenerationSpecPtr, number_of_generations(), mtGC, - CURRENT_PC, AllocFailStrategy::RETURN_NULL); - if (_generations == NULL) - vm_exit_during_initialization("Unable to allocate gen spec"); - Generation::Name yg_name = UseParNewGC ? Generation::ParNew : Generation::DefNew; - _generations[0] = new GenerationSpec(yg_name, _initial_young_size, - _max_young_size); - _generations[1] = new GenerationSpec(Generation::ConcurrentMarkSweep, - _initial_old_size, _max_old_size); + _young_gen_spec = new GenerationSpec(yg_name, _initial_young_size, + _max_young_size, _gen_alignment); + _old_gen_spec = new GenerationSpec(Generation::ConcurrentMarkSweep, + _initial_old_size, _max_old_size, _gen_alignment); - if (_generations[0] == NULL || _generations[1] == NULL) { + if (_young_gen_spec == NULL || _old_gen_spec == NULL) { vm_exit_during_initialization("Unable to allocate gen spec"); } } --- old/src/share/vm/gc_implementation/concurrentMarkSweep/compactibleFreeListSpace.cpp 2014-10-17 16:28:32.000000000 +0200 +++ new/src/share/vm/gc_implementation/concurrentMarkSweep/compactibleFreeListSpace.cpp 2014-10-17 16:28:31.000000000 +0200 @@ -187,7 +187,7 @@ cp->space->set_compaction_top(compact_top); cp->space = cp->space->next_compaction_space(); if (cp->space == NULL) { - cp->gen = GenCollectedHeap::heap()->prev_gen(cp->gen); + cp->gen = GenCollectedHeap::heap()->young_gen(); assert(cp->gen != NULL, "compaction must succeed"); cp->space = cp->gen->first_compaction_space(); assert(cp->space != NULL, "generation must have a first compaction space"); @@ -907,7 +907,6 @@ } } - // Callers of this iterator beware: The closure application should // be robust in the face of uninitialized objects and should (always) // return a correct size so that the next addr + size below gives us a --- old/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.cpp 2014-10-17 16:28:32.000000000 +0200 +++ new/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.cpp 2014-10-17 16:28:32.000000000 +0200 @@ -197,10 +197,10 @@ }; ConcurrentMarkSweepGeneration::ConcurrentMarkSweepGeneration( - ReservedSpace rs, size_t initial_byte_size, int level, + ReservedSpace rs, size_t initial_byte_size, CardTableRS* ct, bool use_adaptive_freelists, FreeBlockDictionary::DictionaryChoice dictionaryChoice) : - CardGeneration(rs, initial_byte_size, level, ct), + CardGeneration(rs, initial_byte_size, ct), _dilatation_factor(((double)MinChunkSize)/((double)(CollectedHeap::min_fill_size()))), _debug_collection_type(Concurrent_collection_type), _did_compact(false) @@ -380,7 +380,7 @@ double CMSStats::time_until_cms_gen_full() const { size_t cms_free = _cms_gen->cmsSpace()->free(); GenCollectedHeap* gch = GenCollectedHeap::heap(); - size_t expected_promotion = MIN2(gch->get_gen(0)->capacity(), + size_t expected_promotion = MIN2(gch->young_gen()->capacity(), (size_t) _cms_gen->gc_stats()->avg_promoted()->padded_average()); if (cms_free > expected_promotion) { // Start a cms collection if there isn't enough space to promote @@ -708,7 +708,7 @@ // Support for parallelizing young gen rescan GenCollectedHeap* gch = GenCollectedHeap::heap(); - _young_gen = gch->prev_gen(_cmsGen); + _young_gen = gch->young_gen(); if (gch->supports_inline_contig_alloc()) { _top_addr = gch->top_addr(); _end_addr = gch->end_addr(); @@ -819,12 +819,17 @@ void ConcurrentMarkSweepGeneration::printOccupancy(const char *s) { GenCollectedHeap* gch = GenCollectedHeap::heap(); if (PrintGCDetails) { + // I didn't want to change the logging when removing the level concept, + // but I guess this logging could say "old" or something instead of "1". + assert(this == gch->old_gen(), + "The CMS generation should be the old generation"); + int level = 1; if (Verbose) { gclog_or_tty->print("[%d %s-%s: "SIZE_FORMAT"("SIZE_FORMAT")]", - level(), short_name(), s, used(), capacity()); + level, short_name(), s, used(), capacity()); } else { gclog_or_tty->print("[%d %s-%s: "SIZE_FORMAT"K("SIZE_FORMAT"K)]", - level(), short_name(), s, used() / K, capacity() / K); + level, short_name(), s, used() / K, capacity() / K); } } if (Verbose) { @@ -945,27 +950,24 @@ gclog_or_tty->print_cr("\nFrom compute_new_size: "); gclog_or_tty->print_cr(" Free fraction %f", free_percentage); gclog_or_tty->print_cr(" Desired free fraction %f", - desired_free_percentage); + desired_free_percentage); gclog_or_tty->print_cr(" Maximum free fraction %f", - maximum_free_percentage); - gclog_or_tty->print_cr(" Capacity "SIZE_FORMAT, capacity()/1000); + maximum_free_percentage); + gclog_or_tty->print_cr(" Capacity "SIZE_FORMAT, capacity() / 1000); gclog_or_tty->print_cr(" Desired capacity "SIZE_FORMAT, - desired_capacity/1000); - int prev_level = level() - 1; - if (prev_level >= 0) { - size_t prev_size = 0; - GenCollectedHeap* gch = GenCollectedHeap::heap(); - Generation* prev_gen = gch->_gens[prev_level]; - prev_size = prev_gen->capacity(); - gclog_or_tty->print_cr(" Younger gen size "SIZE_FORMAT, - prev_size/1000); - } + desired_capacity / 1000); + GenCollectedHeap* gch = GenCollectedHeap::heap(); + assert(this == gch->_old_gen, + "The CMS generation should always be the old generation"); + size_t young_size = gch->_young_gen->capacity(); + gclog_or_tty->print_cr(" Young gen size "SIZE_FORMAT, + young_size / 1000); gclog_or_tty->print_cr(" unsafe_max_alloc_nogc "SIZE_FORMAT, - unsafe_max_alloc_nogc()/1000); + unsafe_max_alloc_nogc() / 1000); gclog_or_tty->print_cr(" contiguous available "SIZE_FORMAT, - contiguous_available()/1000); + contiguous_available() / 1000); gclog_or_tty->print_cr(" Expand by "SIZE_FORMAT" (bytes)", - expand_bytes); + expand_bytes); } // safe if expansion fails expand(expand_bytes, 0, CMSExpansionCause::_satisfy_free_ratio); @@ -1126,7 +1128,7 @@ void CMSCollector::icms_update_allocation_limits() { - Generation* young = GenCollectedHeap::heap()->get_gen(0); + Generation* young = GenCollectedHeap::heap()->young_gen(); EdenSpace* eden = young->as_DefNewGeneration()->eden(); const unsigned int duty_cycle = stats().icms_update_duty_cycle(); @@ -1267,11 +1269,8 @@ size_t s = _cmsSpace->expansionSpaceRequired(obj_size); // HeapWords expand(s*HeapWordSize, MinHeapDeltaBytes, CMSExpansionCause::_satisfy_promotion); - // Since there's currently no next generation, we don't try to promote + // Since this is the old generation, we don't try to promote // into a more senior generation. - assert(next_gen() == NULL, "assumption, based upon which no attempt " - "is made to pass on a possibly failing " - "promotion to next generation"); res = _cmsSpace->promote(obj, obj_size); } if (res != NULL) { @@ -2057,8 +2056,7 @@ _intra_sweep_estimate.padded_average()); } - GenMarkSweep::invoke_at_safepoint(_cmsGen->level(), - ref_processor(), clear_all_soft_refs); + GenMarkSweep::invoke_at_safepoint(ref_processor(), clear_all_soft_refs); #ifdef ASSERT CompactibleFreeListSpace* cms_space = _cmsGen->cmsSpace(); size_t free_size = cms_space->free(); @@ -3006,7 +3004,7 @@ MarkRefsIntoClosure notOlder(_span, verification_mark_bm()); gch->rem_set()->prepare_for_younger_refs_iterate(false); // Not parallel. - gch->gen_process_roots(_cmsGen->level(), + gch->gen_process_roots(Generation::Old, true, // younger gens are roots true, // activate StrongRootsScope SharedHeap::ScanningOption(roots_scanning_options()), @@ -3074,7 +3072,7 @@ gch->rem_set()->prepare_for_younger_refs_iterate(false); // Not parallel. - gch->gen_process_roots(_cmsGen->level(), + gch->gen_process_roots(Generation::Old, true, // younger gens are roots true, // activate StrongRootsScope SharedHeap::ScanningOption(roots_scanning_options()), @@ -3688,7 +3686,7 @@ // The serial version. CLDToOopClosure cld_closure(¬Older, true); gch->rem_set()->prepare_for_younger_refs_iterate(false); // Not parallel. - gch->gen_process_roots(_cmsGen->level(), + gch->gen_process_roots(Generation::Old, true, // younger gens are roots true, // activate StrongRootsScope SharedHeap::ScanningOption(roots_scanning_options()), @@ -4964,15 +4962,12 @@ FlagSetting fl(gch->_is_gc_active, false); NOT_PRODUCT(GCTraceTime t("Scavenge-Before-Remark", PrintGCDetails && Verbose, true, _gc_timer_cm, _gc_tracer_cm->gc_id());) - int level = _cmsGen->level() - 1; - if (level >= 0) { - gch->do_collection(true, // full (i.e. force, see below) - false, // !clear_all_soft_refs - 0, // size - false, // is_tlab - level // max_level - ); - } + gch->do_collection(true, // full (i.e. force, see below) + false, // !clear_all_soft_refs + 0, // size + false, // is_tlab + Generation::Young // type + ); } FreelistLocker x(this); MutexLockerEx y(bitMapLock(), @@ -5159,7 +5154,7 @@ CLDToOopClosure cld_closure(&par_mri_cl, true); - gch->gen_process_roots(_collector->_cmsGen->level(), + gch->gen_process_roots(Generation::Old, false, // yg was scanned above false, // this is parallel code SharedHeap::ScanningOption(_collector->CMSCollector::roots_scanning_options()), @@ -5295,7 +5290,7 @@ // ---------- remaining roots -------------- _timer.reset(); _timer.start(); - gch->gen_process_roots(_collector->_cmsGen->level(), + gch->gen_process_roots(Generation::Old, false, // yg was scanned above false, // this is parallel code SharedHeap::ScanningOption(_collector->CMSCollector::roots_scanning_options()), @@ -5887,7 +5882,7 @@ gch->rem_set()->prepare_for_younger_refs_iterate(false); // Not parallel. GenCollectedHeap::StrongRootsScope srs(gch); - gch->gen_process_roots(_cmsGen->level(), + gch->gen_process_roots(Generation::Old, true, // younger gens as roots false, // use the local StrongRootsScope SharedHeap::ScanningOption(roots_scanning_options()), @@ -6364,11 +6359,12 @@ return _cmsSpace->find_chunk_at_end(); } -void ConcurrentMarkSweepGeneration::update_gc_stats(int current_level, +void ConcurrentMarkSweepGeneration::update_gc_stats(Generation* current_generation, bool full) { - // The next lower level has been collected. Gather any statistics + // If the young generation has been collected. Gather any statistics // that are of interest at this point. - if (!full && (current_level + 1) == level()) { + bool current_is_young = (current_generation == GenCollectedHeap::heap()->young_gen()); + if (!full && current_is_young) { // Gather statistics on the young generation collection. collector()->stats().record_gc0_end(used()); } --- old/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.hpp 2014-10-17 16:28:33.000000000 +0200 +++ new/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.hpp 2014-10-17 16:28:33.000000000 +0200 @@ -1122,7 +1122,7 @@ void shrink_free_list_by(size_t bytes); // Update statistics for GC - virtual void update_gc_stats(int level, bool full); + virtual void update_gc_stats(Generation* current_generation, bool full); // Maximum available space in the generation (including uncommitted) // space. @@ -1134,7 +1134,7 @@ public: ConcurrentMarkSweepGeneration(ReservedSpace rs, size_t initial_byte_size, - int level, CardTableRS* ct, + CardTableRS* ct, bool use_adaptive_freelists, FreeBlockDictionary::DictionaryChoice); --- old/src/share/vm/gc_implementation/concurrentMarkSweep/vmCMSOperations.cpp 2014-10-17 16:28:34.000000000 +0200 +++ new/src/share/vm/gc_implementation/concurrentMarkSweep/vmCMSOperations.cpp 2014-10-17 16:28:34.000000000 +0200 @@ -197,7 +197,7 @@ "We can only be executing this arm of if at a safepoint"); GCCauseSetter gccs(gch, _gc_cause); gch->do_full_collection(gch->must_clear_all_soft_refs(), - 0 /* collect only youngest gen */); + Generation::Young /* collect only youngest gen */); } // Else no need for a foreground young gc assert((_gc_count_before < gch->total_collections()) || (GC_locker::is_active() /* gc may have been skipped */ --- old/src/share/vm/gc_implementation/parNew/parNewGeneration.cpp 2014-10-17 16:28:35.000000000 +0200 +++ new/src/share/vm/gc_implementation/parNew/parNewGeneration.cpp 2014-10-17 16:28:35.000000000 +0200 @@ -63,36 +63,47 @@ #pragma warning( disable:4355 ) // 'this' : used in base member initializer list #endif ParScanThreadState::ParScanThreadState(Space* to_space_, - ParNewGeneration* gen_, + ParNewGeneration* young_gen_, Generation* old_gen_, int thread_num_, ObjToScanQueueSet* work_queue_set_, Stack* overflow_stacks_, size_t desired_plab_sz_, - ParallelTaskTerminator& term_) : - _to_space(to_space_), _old_gen(old_gen_), _young_gen(gen_), _thread_num(thread_num_), - _work_queue(work_queue_set_->queue(thread_num_)), _to_space_full(false), - _overflow_stack(overflow_stacks_ ? overflow_stacks_ + thread_num_ : NULL), - _ageTable(false), // false ==> not the global age table, no perf data. - _to_space_alloc_buffer(desired_plab_sz_), - _to_space_closure(gen_, this), _old_gen_closure(gen_, this), - _to_space_root_closure(gen_, this), _old_gen_root_closure(gen_, this), - _older_gen_closure(gen_, this), - _evacuate_followers(this, &_to_space_closure, &_old_gen_closure, - &_to_space_root_closure, gen_, &_old_gen_root_closure, - work_queue_set_, &term_), - _is_alive_closure(gen_), _scan_weak_ref_closure(gen_, this), - _keep_alive_closure(&_scan_weak_ref_closure), - _strong_roots_time(0.0), _term_time(0.0) -{ + ParallelTaskTerminator& term_) + : _to_space(to_space_), + _old_gen(old_gen_), + _young_gen(young_gen_), + _thread_num(thread_num_), + _work_queue(work_queue_set_->queue(thread_num_)), + _to_space_full(false), + _overflow_stack(overflow_stacks_ ? overflow_stacks_ + thread_num_ : NULL), + _ageTable(false), // false ==> not the global age table, no perf data. + _to_space_alloc_buffer(desired_plab_sz_), + _to_space_closure(young_gen_, this), + _old_gen_closure(young_gen_, this), + _to_space_root_closure(young_gen_, this), + _old_gen_root_closure(young_gen_, this), + _older_gen_closure(young_gen_, this), + _evacuate_followers(this, + &_to_space_closure, + &_old_gen_closure, + &_to_space_root_closure, + young_gen_, + &_old_gen_root_closure, + work_queue_set_, + &term_), + _is_alive_closure(young_gen_), + _scan_weak_ref_closure(young_gen_, this), + _keep_alive_closure(&_scan_weak_ref_closure), + _strong_roots_time(0.0), + _term_time(0.0) { #if TASKQUEUE_STATS _term_attempts = 0; _overflow_refills = 0; _overflow_refill_objs = 0; #endif // TASKQUEUE_STATS - _survivor_chunk_array = - (ChunkArray*) old_gen()->get_data_recorder(thread_num()); + _survivor_chunk_array = (ChunkArray*) old_gen()->get_data_recorder(thread_num()); _hash_seed = 17; // Might want to take time-based random value. _start = os::elapsedTime(); _old_gen_closure.set_generation(old_gen_); @@ -155,7 +166,6 @@ } } - void ParScanThreadState::trim_queues(int max_size) { ObjToScanQueue* queue = work_queue(); do { @@ -223,15 +233,12 @@ } HeapWord* ParScanThreadState::alloc_in_to_space_slow(size_t word_sz) { - - // Otherwise, if the object is small enough, try to reallocate the - // buffer. + // If the object is small enough, try to reallocate the buffer. HeapWord* obj = NULL; if (!_to_space_full) { ParGCAllocBuffer* const plab = to_space_alloc_buffer(); Space* const sp = to_space(); - if (word_sz * 100 < - ParallelGCBufferWastePct * plab->word_sz()) { + if (word_sz * 100 < ParallelGCBufferWastePct * plab->word_sz()) { // Is small enough; abandon this buffer and start a new one. plab->retire(false, false); size_t buf_size = plab->word_sz(); @@ -273,9 +280,7 @@ return obj; } - -void ParScanThreadState::undo_alloc_in_to_space(HeapWord* obj, - size_t word_sz) { +void ParScanThreadState::undo_alloc_in_to_space(HeapWord* obj, size_t word_sz) { // Is the alloc in the current alloc buffer? if (to_space_alloc_buffer()->contains(obj)) { assert(to_space_alloc_buffer()->contains(obj + word_sz - 1), @@ -301,7 +306,7 @@ ParNewGeneration& gen, Generation& old_gen, ObjToScanQueueSet& queue_set, - Stack* overflow_stacks_, + Stack* overflow_stacks, size_t desired_plab_sz, ParallelTaskTerminator& term); @@ -326,21 +331,24 @@ private: ParallelTaskTerminator& _term; ParNewGeneration& _gen; - Generation& _next_gen; + Generation& _old_gen; public: bool is_valid(int id) const { return id < length(); } ParallelTaskTerminator* terminator() { return &_term; } }; - -ParScanThreadStateSet::ParScanThreadStateSet( - int num_threads, Space& to_space, ParNewGeneration& gen, - Generation& old_gen, ObjToScanQueueSet& queue_set, - Stack* overflow_stacks, - size_t desired_plab_sz, ParallelTaskTerminator& term) +ParScanThreadStateSet::ParScanThreadStateSet(int num_threads, + Space& to_space, + ParNewGeneration& gen, + Generation& old_gen, + ObjToScanQueueSet& queue_set, + Stack* overflow_stacks, + size_t desired_plab_sz, + ParallelTaskTerminator& term) : ResourceArray(sizeof(ParScanThreadState), num_threads), - _gen(gen), _next_gen(old_gen), _term(term) -{ + _gen(gen), + _old_gen(old_gen), + _term(term) { assert(num_threads > 0, "sanity check!"); assert(ParGCUseLocalOverflow == (overflow_stacks != NULL), "overflow_stack allocation mismatch"); @@ -352,8 +360,7 @@ } } -inline ParScanThreadState& ParScanThreadStateSet::thread_state(int i) -{ +inline ParScanThreadState& ParScanThreadStateSet::thread_state(int i) { assert(i >= 0 && i < length(), "sanity check!"); return ((ParScanThreadState*)_data)[i]; } @@ -367,8 +374,7 @@ } } -void ParScanThreadStateSet::reset(int active_threads, bool promotion_failed) -{ +void ParScanThreadStateSet::reset(int active_threads, bool promotion_failed) { _term.reset_for_reuse(active_threads); if (promotion_failed) { for (int i = 0; i < length(); ++i) { @@ -379,24 +385,20 @@ #if TASKQUEUE_STATS void -ParScanThreadState::reset_stats() -{ +ParScanThreadState::reset_stats() { taskqueue_stats().reset(); _term_attempts = 0; _overflow_refills = 0; _overflow_refill_objs = 0; } -void ParScanThreadStateSet::reset_stats() -{ +void ParScanThreadStateSet::reset_stats() { for (int i = 0; i < length(); ++i) { thread_state(i).reset_stats(); } } -void -ParScanThreadStateSet::print_termination_stats_hdr(outputStream* const st) -{ +void ParScanThreadStateSet::print_termination_stats_hdr(outputStream* const st) { st->print_raw_cr("GC Termination Stats"); st->print_raw_cr(" elapsed --strong roots-- " "-------termination-------"); @@ -406,8 +408,7 @@ "--------- ------ --------"); } -void ParScanThreadStateSet::print_termination_stats(outputStream* const st) -{ +void ParScanThreadStateSet::print_termination_stats(outputStream* const st) { print_termination_stats_hdr(st); for (int i = 0; i < length(); ++i) { @@ -423,15 +424,13 @@ } // Print stats related to work queue activity. -void ParScanThreadStateSet::print_taskqueue_stats_hdr(outputStream* const st) -{ +void ParScanThreadStateSet::print_taskqueue_stats_hdr(outputStream* const st) { st->print_raw_cr("GC Task Stats"); st->print_raw("thr "); TaskQueueStats::print_header(1, st); st->cr(); st->print_raw("--- "); TaskQueueStats::print_header(2, st); st->cr(); } -void ParScanThreadStateSet::print_taskqueue_stats(outputStream* const st) -{ +void ParScanThreadStateSet::print_taskqueue_stats(outputStream* const st) { print_taskqueue_stats_hdr(st); TaskQueueStats totals; @@ -453,8 +452,7 @@ } #endif // TASKQUEUE_STATS -void ParScanThreadStateSet::flush() -{ +void ParScanThreadStateSet::flush() { // Work in this loop should be kept as lightweight as // possible since this might otherwise become a bottleneck // to scaling. Should we add heavy-weight work into this @@ -475,8 +473,8 @@ _gen.age_table()->merge(local_table); // Inform old gen that we're done. - _next_gen.par_promote_alloc_done(i); - _next_gen.par_oop_since_save_marks_iterate_done(i); + _old_gen.par_promote_alloc_done(i); + _old_gen.par_oop_since_save_marks_iterate_done(i); } if (UseConcMarkSweepGC && ParallelGCThreads > 0) { @@ -490,10 +488,10 @@ } ParScanClosure::ParScanClosure(ParNewGeneration* g, - ParScanThreadState* par_scan_state) : - OopsInKlassOrGenClosure(g), _par_scan_state(par_scan_state), _g(g) -{ - assert(_g->level() == 0, "Optimized for youngest generation"); + ParScanThreadState* par_scan_state) + : OopsInKlassOrGenClosure(g), + _par_scan_state(par_scan_state), + _g(g) { _boundary = _g->reserved().end(); } @@ -511,8 +509,9 @@ ParScanWeakRefClosure::ParScanWeakRefClosure(ParNewGeneration* g, ParScanThreadState* par_scan_state) - : ScanWeakRefClosure(g), _par_scan_state(par_scan_state) -{} + : ScanWeakRefClosure(g), + _par_scan_state(par_scan_state) { +} void ParScanWeakRefClosure::do_oop(oop* p) { ParScanWeakRefClosure::do_oop_work(p); } void ParScanWeakRefClosure::do_oop(narrowOop* p) { ParScanWeakRefClosure::do_oop_work(p); } @@ -521,31 +520,28 @@ #pragma warning(disable: 4786) /* identifier was truncated to '255' characters in the browser information */ #endif -ParEvacuateFollowersClosure::ParEvacuateFollowersClosure( - ParScanThreadState* par_scan_state_, - ParScanWithoutBarrierClosure* to_space_closure_, - ParScanWithBarrierClosure* old_gen_closure_, - ParRootScanWithoutBarrierClosure* to_space_root_closure_, - ParNewGeneration* par_gen_, - ParRootScanWithBarrierTwoGensClosure* old_gen_root_closure_, - ObjToScanQueueSet* task_queues_, - ParallelTaskTerminator* terminator_) : - - _par_scan_state(par_scan_state_), - _to_space_closure(to_space_closure_), - _old_gen_closure(old_gen_closure_), - _to_space_root_closure(to_space_root_closure_), - _old_gen_root_closure(old_gen_root_closure_), - _par_gen(par_gen_), - _task_queues(task_queues_), - _terminator(terminator_) -{} +ParEvacuateFollowersClosure::ParEvacuateFollowersClosure(ParScanThreadState* par_scan_state, + ParScanWithoutBarrierClosure* to_space_closure, + ParScanWithBarrierClosure* old_gen_closure, + ParRootScanWithoutBarrierClosure* to_space_root_closure, + ParNewGeneration* par_gen, + ParRootScanWithBarrierTwoGensClosure* old_gen_root_closure, + ObjToScanQueueSet* task_queues, + ParallelTaskTerminator* terminator) + : _par_scan_state(par_scan_state), + _to_space_closure(to_space_closure), + _old_gen_closure(old_gen_closure), + _to_space_root_closure(to_space_root_closure), + _old_gen_root_closure(old_gen_root_closure), + _par_gen(par_gen), + _task_queues(task_queues), + _terminator(terminator) { +} void ParEvacuateFollowersClosure::do_void() { ObjToScanQueue* work_q = par_scan_state()->work_queue(); while (true) { - // Scan to-space and old-gen objs until we run out of both. oop obj_to_scan; par_scan_state()->trim_queues(0); @@ -578,18 +574,20 @@ par_scan_state()->end_term_time(); } -ParNewGenTask::ParNewGenTask(ParNewGeneration* gen, Generation* next_gen, - HeapWord* young_old_boundary, ParScanThreadStateSet* state_set) : - AbstractGangTask("ParNewGeneration collection"), - _gen(gen), _next_gen(next_gen), +ParNewGenTask::ParNewGenTask(ParNewGeneration* young_gen, + Generation* old_gen, + HeapWord* young_old_boundary, + ParScanThreadStateSet* state_set) + : AbstractGangTask("ParNewGeneration collection"), + _young_gen(young_gen), _old_gen(old_gen), _young_old_boundary(young_old_boundary), - _state_set(state_set) - {} + _state_set(state_set) { +} // Reset the terminator for the given number of // active threads. void ParNewGenTask::set_for_termination(int active_workers) { - _state_set->reset(active_workers, _gen->promotion_failed()); + _state_set->reset(active_workers, _young_gen->promotion_failed()); // Should the heap be passed in? There's only 1 for now so // grab it instead. GenCollectedHeap* gch = GenCollectedHeap::heap(); @@ -602,10 +600,8 @@ // and handle marks. ResourceMark rm; HandleMark hm; - // We would need multiple old-gen queues otherwise. - assert(gch->n_gens() == 2, "Par young collection currently only works with one older gen."); - Generation* old_gen = gch->next_gen(_gen); + Generation* old_gen = gch->old_gen(); ParScanThreadState& par_scan_state = _state_set->thread_state(worker_id); assert(_state_set->is_valid(worker_id), "Should not have been called"); @@ -619,7 +615,7 @@ false); par_scan_state.start_strong_roots(); - gch->gen_process_roots(_gen->level(), + gch->gen_process_roots(Generation::Young, true, // Process younger gens, if any, // as strong roots. false, // no scope; this is parallel code @@ -640,12 +636,11 @@ #pragma warning( disable:4355 ) // 'this' : used in base member initializer list #endif ParNewGeneration:: -ParNewGeneration(ReservedSpace rs, size_t initial_byte_size, int level) - : DefNewGeneration(rs, initial_byte_size, level, "PCopy"), - _overflow_list(NULL), - _is_alive_closure(this), - _plab_stats(YoungPLABSize, PLABWeight) -{ +ParNewGeneration(ReservedSpace rs, size_t initial_byte_size) + : DefNewGeneration(rs, initial_byte_size, "PCopy"), + _overflow_list(NULL), + _is_alive_closure(this), + _plab_stats(YoungPLABSize, PLABWeight) { NOT_PRODUCT(_overflow_counter = ParGCWorkQueueOverflowInterval;) NOT_PRODUCT(_num_par_pushes = 0;) _task_queues = new ObjToScanQueueSet(ParallelGCThreads); @@ -657,12 +652,12 @@ _task_queues->register_queue(i1, q); } - for (uint i2 = 0; i2 < ParallelGCThreads; i2++) + for (uint i2 = 0; i2 < ParallelGCThreads; i2++) { _task_queues->queue(i2)->initialize(); + } _overflow_stacks = NULL; if (ParGCUseLocalOverflow) { - // typedef to workaround NEW_C_HEAP_ARRAY macro, which can not deal // with ',' typedef Stack GCOopStack; @@ -688,8 +683,9 @@ #endif // ParNewGeneration:: -ParKeepAliveClosure::ParKeepAliveClosure(ParScanWeakRefClosure* cl) : - DefNewGeneration::KeepAliveClosure(cl), _par_cl(cl) {} +ParKeepAliveClosure::ParKeepAliveClosure(ParScanWeakRefClosure* cl) + : DefNewGeneration::KeepAliveClosure(cl), _par_cl(cl) { +} template void /*ParNewGeneration::*/ParKeepAliveClosure::do_oop_work(T* p) { @@ -715,8 +711,9 @@ void /*ParNewGeneration::*/ParKeepAliveClosure::do_oop(narrowOop* p) { ParKeepAliveClosure::do_oop_work(p); } // ParNewGeneration:: -KeepAliveClosure::KeepAliveClosure(ScanWeakRefClosure* cl) : - DefNewGeneration::KeepAliveClosure(cl) {} +KeepAliveClosure::KeepAliveClosure(ScanWeakRefClosure* cl) + : DefNewGeneration::KeepAliveClosure(cl) { +} template void /*ParNewGeneration::*/KeepAliveClosure::do_oop_work(T* p) { @@ -768,7 +765,7 @@ typedef AbstractRefProcTaskExecutor::ProcessTask ProcessTask; public: ParNewRefProcTaskProxy(ProcessTask& task, ParNewGeneration& gen, - Generation& next_gen, + Generation& old_gen, HeapWord* young_old_boundary, ParScanThreadStateSet& state_set); @@ -778,29 +775,27 @@ _state_set.terminator()->reset_for_reuse(active_workers); } private: - ParNewGeneration& _gen; + ParNewGeneration& _young_gen; ProcessTask& _task; - Generation& _next_gen; + Generation& _old_gen; HeapWord* _young_old_boundary; ParScanThreadStateSet& _state_set; }; -ParNewRefProcTaskProxy::ParNewRefProcTaskProxy( - ProcessTask& task, ParNewGeneration& gen, - Generation& next_gen, - HeapWord* young_old_boundary, - ParScanThreadStateSet& state_set) +ParNewRefProcTaskProxy::ParNewRefProcTaskProxy(ProcessTask& task, + ParNewGeneration& young_gen, + Generation& old_gen, + HeapWord* young_old_boundary, + ParScanThreadStateSet& state_set) : AbstractGangTask("ParNewGeneration parallel reference processing"), - _gen(gen), + _young_gen(young_gen), _task(task), - _next_gen(next_gen), + _old_gen(old_gen), _young_old_boundary(young_old_boundary), - _state_set(state_set) -{ + _state_set(state_set) { } -void ParNewRefProcTaskProxy::work(uint worker_id) -{ +void ParNewRefProcTaskProxy::work(uint worker_id) { ResourceMark rm; HandleMark hm; ParScanThreadState& par_scan_state = _state_set.thread_state(worker_id); @@ -817,33 +812,29 @@ public: ParNewRefEnqueueTaskProxy(EnqueueTask& task) : AbstractGangTask("ParNewGeneration parallel reference enqueue"), - _task(task) - { } + _task(task) { + } - virtual void work(uint worker_id) - { + virtual void work(uint worker_id) { _task.work(worker_id); } }; - -void ParNewRefProcTaskExecutor::execute(ProcessTask& task) -{ +void ParNewRefProcTaskExecutor::execute(ProcessTask& task) { GenCollectedHeap* gch = GenCollectedHeap::heap(); assert(gch->kind() == CollectedHeap::GenCollectedHeap, "not a generational heap"); FlexibleWorkGang* workers = gch->workers(); assert(workers != NULL, "Need parallel worker threads."); _state_set.reset(workers->active_workers(), _generation.promotion_failed()); - ParNewRefProcTaskProxy rp_task(task, _generation, *_generation.next_gen(), + ParNewRefProcTaskProxy rp_task(task, _generation, *(gch->old_gen()), _generation.reserved().end(), _state_set); workers->run_task(&rp_task); _state_set.reset(0 /* bad value in debug if not reset */, _generation.promotion_failed()); } -void ParNewRefProcTaskExecutor::execute(EnqueueTask& task) -{ +void ParNewRefProcTaskExecutor::execute(EnqueueTask& task) { GenCollectedHeap* gch = GenCollectedHeap::heap(); FlexibleWorkGang* workers = gch->workers(); assert(workers != NULL, "Need parallel worker threads."); @@ -851,34 +842,35 @@ workers->run_task(&enq_task); } -void ParNewRefProcTaskExecutor::set_single_threaded_mode() -{ +void ParNewRefProcTaskExecutor::set_single_threaded_mode() { _state_set.flush(); GenCollectedHeap* gch = GenCollectedHeap::heap(); gch->set_par_threads(0); // 0 ==> non-parallel. gch->save_marks(); } -ScanClosureWithParBarrier:: -ScanClosureWithParBarrier(ParNewGeneration* g, bool gc_barrier) : - ScanClosure(g, gc_barrier) {} +ScanClosureWithParBarrier::ScanClosureWithParBarrier(ParNewGeneration* g, + bool gc_barrier) + : ScanClosure(g, gc_barrier) { +} EvacuateFollowersClosureGeneral:: -EvacuateFollowersClosureGeneral(GenCollectedHeap* gch, int level, +EvacuateFollowersClosureGeneral(GenCollectedHeap* gch, OopsInGenClosure* cur, - OopsInGenClosure* older) : - _gch(gch), _level(level), - _scan_cur_or_nonheap(cur), _scan_older(older) -{} + OopsInGenClosure* older) + : _gch(gch), + _scan_cur_or_nonheap(cur), + _scan_older(older) { +} void EvacuateFollowersClosureGeneral::do_void() { do { // Beware: this call will lead to closure applications via virtual // calls. - _gch->oop_since_save_marks_iterate(_level, + _gch->oop_since_save_marks_iterate(Generation::Young, _scan_cur_or_nonheap, _scan_older); - } while (!_gch->no_allocs_since_save_marks(_level)); + } while (!_gch->no_allocs_since_save_marks(true /* include_young */)); } @@ -886,7 +878,9 @@ bool ParNewGeneration::_avoid_promotion_undo = false; -void ParNewGeneration::handle_promotion_failed(GenCollectedHeap* gch, ParScanThreadStateSet& thread_state_set, ParNewTracer& gc_tracer) { +void ParNewGeneration::handle_promotion_failed(GenCollectedHeap* gch, + ParScanThreadStateSet& thread_state_set, + ParNewTracer& gc_tracer) { assert(_promo_failure_scan_stack.is_empty(), "post condition"); _promo_failure_scan_stack.clear(true); // Clear cached segments. @@ -899,7 +893,7 @@ from()->set_next_compaction_space(to()); gch->set_incremental_collection_failed(); // Inform the next generation that a promotion failure occurred. - _next_gen->promotion_failure_occurred(); + _old_gen->promotion_failure_occurred(); // Trace promotion failure in the parallel GC threads thread_state_set.trace_promotion_failed(gc_tracer); @@ -931,9 +925,7 @@ workers->active_workers(), Threads::number_of_non_daemon_threads()); workers->set_active_workers(active_workers); - assert(gch->n_gens() == 2, - "Par collection currently only works with single older gen."); - _next_gen = gch->next_gen(this); + _old_gen = gch->old_gen(); // Do we have to avoid promotion_undo? if (gch->collector_policy()->is_concurrent_mark_sweep_policy()) { set_avoid_promotion_undo(true); @@ -979,10 +971,10 @@ // because only those workers go through the termination protocol. ParallelTaskTerminator _term(n_workers, task_queues()); ParScanThreadStateSet thread_state_set(workers->active_workers(), - *to(), *this, *_next_gen, *task_queues(), + *to(), *this, *_old_gen, *task_queues(), _overflow_stacks, desired_plab_sz(), _term); - ParNewGenTask tsk(this, _next_gen, reserved().end(), &thread_state_set); + ParNewGenTask tsk(this, _old_gen, reserved().end(), &thread_state_set); gch->set_par_threads(n_workers); gch->rem_set()->prepare_for_younger_refs_iterate(true); // It turns out that even when we're using 1 thread, doing the work in a @@ -1007,7 +999,7 @@ ScanClosure scan_without_gc_barrier(this, false); ScanClosureWithParBarrier scan_with_gc_barrier(this, true); set_promo_failure_scan_stack_closure(&scan_without_gc_barrier); - EvacuateFollowersClosureGeneral evacuate_followers(gch, _level, + EvacuateFollowersClosureGeneral evacuate_followers(gch, &scan_without_gc_barrier, &scan_with_gc_barrier); rp->setup_policy(clear_all_soft_refs); // Can the mt_degree be set later (at run_task() time would be best)? @@ -1193,11 +1185,10 @@ forward_ptr = old->forward_to_atomic(ClaimedForwardPtr); if (forward_ptr != NULL) { // someone else beat us to it. - return real_forwardee(old); + return real_forwardee(old); } - new_obj = _next_gen->par_promote(par_scan_state->thread_num(), - old, m, sz); + new_obj = _old_gen->par_promote(par_scan_state->thread_num(), old, m, sz); if (new_obj == NULL) { // promotion failed, forward to self @@ -1227,8 +1218,11 @@ // information. if (TraceScavenge) { gclog_or_tty->print_cr("{%s %s " PTR_FORMAT " -> " PTR_FORMAT " (%d)}", - is_in_reserved(new_obj) ? "copying" : "tenuring", - new_obj->klass()->internal_name(), (void *)old, (void *)new_obj, new_obj->size()); + is_in_reserved(new_obj) ? "copying" : "tenuring", + new_obj->klass()->internal_name(), + (void *)old, + (void *)new_obj, + new_obj->size()); } #endif @@ -1314,8 +1308,8 @@ if (new_obj == NULL) { // Either to-space is full or we decided to promote // try allocating obj tenured - new_obj = _next_gen->par_promote(par_scan_state->thread_num(), - old, m, sz); + new_obj = _old_gen->par_promote(par_scan_state->thread_num(), + old, m, sz); if (new_obj == NULL) { // promotion failed, forward to self @@ -1348,8 +1342,11 @@ // information. if (TraceScavenge) { gclog_or_tty->print_cr("{%s %s " PTR_FORMAT " -> " PTR_FORMAT " (%d)}", - is_in_reserved(new_obj) ? "copying" : "tenuring", - new_obj->klass()->internal_name(), (void *)old, (void *)new_obj, new_obj->size()); + is_in_reserved(new_obj) ? "copying" : "tenuring", + new_obj->klass()->internal_name(), + (void*)old, + (void*)new_obj, + new_obj->size()); } #endif @@ -1395,8 +1392,8 @@ par_scan_state->undo_alloc_in_to_space((HeapWord*)new_obj, sz); } else { assert(!_avoid_promotion_undo, "Should not be here if avoiding."); - _next_gen->par_promote_alloc_undo(par_scan_state->thread_num(), - (HeapWord*)new_obj, sz); + _old_gen->par_promote_alloc_undo(par_scan_state->thread_num(), + (HeapWord*)new_obj, sz); } return forward_ptr; @@ -1511,7 +1508,9 @@ assert(!UseCompressedOops, "Error"); assert(par_scan_state->overflow_stack() == NULL, "Error"); - if (_overflow_list == NULL) return false; + if (_overflow_list == NULL) { + return false; + } // Otherwise, there was something there; try claiming the list. oop prefix = cast_to_oop(Atomic::xchg_ptr(BUSY, &_overflow_list)); --- old/src/share/vm/gc_implementation/parNew/parNewGeneration.hpp 2014-10-17 16:28:36.000000000 +0200 +++ new/src/share/vm/gc_implementation/parNew/parNewGeneration.hpp 2014-10-17 16:28:35.000000000 +0200 @@ -232,14 +232,14 @@ class ParNewGenTask: public AbstractGangTask { private: - ParNewGeneration* _gen; - Generation* _next_gen; + ParNewGeneration* _young_gen; + Generation* _old_gen; HeapWord* _young_old_boundary; class ParScanThreadStateSet* _state_set; public: - ParNewGenTask(ParNewGeneration* gen, - Generation* next_gen, + ParNewGenTask(ParNewGeneration* young_gen, + Generation* old_gen, HeapWord* young_old_boundary, ParScanThreadStateSet* state_set); @@ -264,11 +264,10 @@ class EvacuateFollowersClosureGeneral: public VoidClosure { private: GenCollectedHeap* _gch; - int _level; OopsInGenClosure* _scan_cur_or_nonheap; OopsInGenClosure* _scan_older; public: - EvacuateFollowersClosureGeneral(GenCollectedHeap* gch, int level, + EvacuateFollowersClosureGeneral(GenCollectedHeap* gch, OopsInGenClosure* cur, OopsInGenClosure* older); virtual void do_void(); @@ -356,7 +355,7 @@ void set_survivor_overflow(bool v) { _survivor_overflow = v; } public: - ParNewGeneration(ReservedSpace rs, size_t initial_byte_size, int level); + ParNewGeneration(ReservedSpace rs, size_t initial_byte_size); ~ParNewGeneration() { for (uint i = 0; i < ParallelGCThreads; i++) --- old/src/share/vm/gc_implementation/parNew/parOopClosures.inline.hpp 2014-10-17 16:28:36.000000000 +0200 +++ new/src/share/vm/gc_implementation/parNew/parOopClosures.inline.hpp 2014-10-17 16:28:36.000000000 +0200 @@ -70,7 +70,7 @@ bool root_scan) { assert((!Universe::heap()->is_in_reserved(p) || generation()->is_in_reserved(p)) - && (generation()->level() == 0 || gc_barrier), + && (generation() == GenCollectedHeap::heap()->young_gen() || gc_barrier), "The gen must be right, and we must be doing the barrier " "in older generations."); T heap_oop = oopDesc::load_heap_oop(p); --- old/src/share/vm/gc_implementation/parallelScavenge/parallelScavengeHeap.inline.hpp 2014-10-17 16:28:37.000000000 +0200 +++ new/src/share/vm/gc_implementation/parallelScavenge/parallelScavengeHeap.inline.hpp 2014-10-17 16:28:37.000000000 +0200 @@ -49,7 +49,6 @@ inline bool ParallelScavengeHeap::is_in_young(oop p) { // Assumes the the old gen address range is lower than that of the young gen. - const void* loc = (void*) p; bool result = ((HeapWord*)p) >= young_gen()->reserved().start(); assert(result == young_gen()->is_in_reserved(p), err_msg("incorrect test - result=%d, p=" PTR_FORMAT, result, p2i((void*)p))); --- old/src/share/vm/gc_implementation/shared/vmGCOperations.cpp 2014-10-17 16:28:38.000000000 +0200 +++ new/src/share/vm/gc_implementation/shared/vmGCOperations.cpp 2014-10-17 16:28:37.000000000 +0200 @@ -192,7 +192,7 @@ GenCollectedHeap* gch = GenCollectedHeap::heap(); GCCauseSetter gccs(gch, _gc_cause); - gch->do_full_collection(gch->must_clear_all_soft_refs(), _max_level); + gch->do_full_collection(gch->must_clear_all_soft_refs(), _max_generation); } // Returns true iff concurrent GCs unloads metadata. --- old/src/share/vm/gc_implementation/shared/vmGCOperations.hpp 2014-10-17 16:28:38.000000000 +0200 +++ new/src/share/vm/gc_implementation/shared/vmGCOperations.hpp 2014-10-17 16:28:38.000000000 +0200 @@ -186,14 +186,14 @@ // GenCollectedHeap heap. class VM_GenCollectFull: public VM_GC_Operation { private: - int _max_level; + Generation::Type _max_generation; public: VM_GenCollectFull(unsigned int gc_count_before, unsigned int full_gc_count_before, GCCause::Cause gc_cause, - int max_level) + Generation::Type max_generation) : VM_GC_Operation(gc_count_before, gc_cause, full_gc_count_before, true /* full */), - _max_level(max_level) { } + _max_generation(max_generation) { } ~VM_GenCollectFull() {} virtual VMOp_Type type() const { return VMOp_GenCollectFull; } virtual void doit(); @@ -207,10 +207,11 @@ ClassLoaderData* _loader_data; public: VM_CollectForMetadataAllocation(ClassLoaderData* loader_data, - size_t size, Metaspace::MetadataType mdtype, - unsigned int gc_count_before, - unsigned int full_gc_count_before, - GCCause::Cause gc_cause) + size_t size, + Metaspace::MetadataType mdtype, + unsigned int gc_count_before, + unsigned int full_gc_count_before, + GCCause::Cause gc_cause) : VM_GC_Operation(gc_count_before, gc_cause, full_gc_count_before, true), _loader_data(loader_data), _size(size), _mdtype(mdtype), _result(NULL) { } --- old/src/share/vm/memory/cardTableRS.cpp 2014-10-17 16:28:39.000000000 +0200 +++ new/src/share/vm/memory/cardTableRS.cpp 2014-10-17 16:28:39.000000000 +0200 @@ -56,12 +56,15 @@ #endif _ct_bs->initialize(); set_bs(_ct_bs); - _last_cur_val_in_gen = NEW_C_HEAP_ARRAY3(jbyte, GenCollectedHeap::max_gens + 1, + // max_gens is really GenCollectedHeap::heap()->gen_policy()->number_of_generations() + // (which always is 2), but GenCollectedHeap has not been initialized yet. + int max_gens = 2; + _last_cur_val_in_gen = NEW_C_HEAP_ARRAY3(jbyte, max_gens + 1, mtGC, CURRENT_PC, AllocFailStrategy::RETURN_NULL); if (_last_cur_val_in_gen == NULL) { vm_exit_during_initialization("Could not create last_cur_val_in_gen array."); } - for (int i = 0; i < GenCollectedHeap::max_gens + 1; i++) { + for (int i = 0; i < max_gens + 1; i++) { _last_cur_val_in_gen[i] = clean_card_val(); } _ct_bs->set_CTRS(this); @@ -115,7 +118,7 @@ void CardTableRS::younger_refs_iterate(Generation* g, OopsInGenClosure* blk) { - _last_cur_val_in_gen[g->level()+1] = cur_youngergen_card_val(); + _last_cur_val_in_gen[2 /* Number of generations */] = cur_youngergen_card_val(); g->younger_refs_iterate(blk); } @@ -313,7 +316,8 @@ } void CardTableRS::clear_into_younger(Generation* old_gen) { - assert(old_gen->level() == 1, "Should only be called for the old generation"); + assert(old_gen == GenCollectedHeap::heap()->old_gen(), + "Should only be called for the old generation"); // The card tables for the youngest gen need never be cleared. // There's a bit of subtlety in the clear() and invalidate() // methods that we exploit here and in invalidate_or_clear() @@ -324,7 +328,8 @@ } void CardTableRS::invalidate_or_clear(Generation* old_gen) { - assert(old_gen->level() == 1, "Should only be called for the old generation"); + assert(old_gen == GenCollectedHeap::heap()->old_gen(), + "Should only be called for the old generation"); // Invalidate the cards for the currently occupied part of // the old generation and clear the cards for the // unoccupied part of the generation (if any, making use @@ -390,7 +395,9 @@ VerifyCTGenClosure(CardTableRS* ct) : _ct(ct) {} void do_generation(Generation* gen) { // Skip the youngest generation. - if (gen->level() == 0) return; + if (gen == GenCollectedHeap::heap()->young_gen()) { + return; + } // Normally, we're interested in pointers to younger generations. VerifyCTSpaceClosure blk(_ct, gen->reserved().start()); gen->space_iterate(&blk, true); @@ -399,7 +406,9 @@ void CardTableRS::verify_space(Space* s, HeapWord* gen_boundary) { // We don't need to do young-gen spaces. - if (s->end() <= gen_boundary) return; + if (s->end() <= gen_boundary) { + return; + } MemRegion used = s->used_region(); jbyte* cur_entry = byte_for(used.start()); --- old/src/share/vm/memory/collectorPolicy.cpp 2014-10-17 16:28:40.000000000 +0200 +++ new/src/share/vm/memory/collectorPolicy.cpp 2014-10-17 16:28:40.000000000 +0200 @@ -191,11 +191,12 @@ _min_young_size(0), _initial_young_size(0), _max_young_size(0), - _gen_alignment(0), _min_old_size(0), _initial_old_size(0), _max_old_size(0), - _generations(NULL) + _gen_alignment(0), + _young_gen_spec(NULL), + _old_gen_spec(NULL) {} size_t GenCollectorPolicy::scale_by_NewRatio_aligned(size_t base_size) { @@ -602,7 +603,7 @@ HandleMark hm; // Discard any handles allocated in each iteration. // First allocation attempt is lock-free. - Generation *young = gch->get_gen(0); + Generation *young = gch->young_gen(); assert(young->supports_inline_contig_alloc(), "Otherwise, must do alloc within heap lock"); if (young->should_allocate(size, is_tlab)) { @@ -616,8 +617,8 @@ { MutexLocker ml(Heap_lock); if (PrintGC && Verbose) { - gclog_or_tty->print_cr("TwoGenerationCollectorPolicy::mem_allocate_work:" - " attempting locked slow path allocation"); + gclog_or_tty->print_cr("GenCollectorPolicy::mem_allocate_work:" + " attempting locked slow path allocation"); } // Note that only large objects get a shot at being // allocated in later generations. @@ -706,7 +707,7 @@ // Give a warning if we seem to be looping forever. if ((QueuedAllocationWarningCount > 0) && (try_count % QueuedAllocationWarningCount == 0)) { - warning("TwoGenerationCollectorPolicy::mem_allocate_work retries %d times \n\t" + warning("GenCollectorPolicy::mem_allocate_work retries %d times \n\t" " size=" SIZE_FORMAT " %s", try_count, size, is_tlab ? "(TLAB)" : ""); } } @@ -716,10 +717,14 @@ bool is_tlab) { GenCollectedHeap *gch = GenCollectedHeap::heap(); HeapWord* result = NULL; - for (int i = number_of_generations() - 1; i >= 0 && result == NULL; i--) { - Generation *gen = gch->get_gen(i); - if (gen->should_allocate(size, is_tlab)) { - result = gen->expand_and_allocate(size, is_tlab); + Generation *old = gch->old_gen(); + if (old->should_allocate(size, is_tlab)) { + result = old->expand_and_allocate(size, is_tlab); + } + if (result == NULL) { + Generation *young = gch->young_gen(); + if (young->should_allocate(size, is_tlab)) { + result = young->expand_and_allocate(size, is_tlab); } } assert(result == NULL || gch->is_in_reserved(result), "result not in heap"); @@ -746,7 +751,7 @@ false /* clear_all_soft_refs */, size /* size */, is_tlab /* is_tlab */, - number_of_generations() - 1 /* max_level */); + Generation::Old /* max_gen */); } else { if (Verbose && PrintGCDetails) { gclog_or_tty->print(" :: Trying full because partial may fail :: "); @@ -759,7 +764,7 @@ false /* clear_all_soft_refs */, size /* size */, is_tlab /* is_tlab */, - number_of_generations() - 1 /* max_level */); + Generation::Old /* max_gen */); } result = gch->attempt_allocation(size, is_tlab, false /*first_only*/); @@ -787,7 +792,7 @@ true /* clear_all_soft_refs */, size /* size */, is_tlab /* is_tlab */, - number_of_generations() - 1 /* max_level */); + Generation::Old /* max_gen */); } result = gch->attempt_allocation(size, is_tlab, false /* first_only */); @@ -892,7 +897,7 @@ bool GenCollectorPolicy::should_try_older_generation_allocation( size_t word_size) const { GenCollectedHeap* gch = GenCollectedHeap::heap(); - size_t young_capacity = gch->get_gen(0)->capacity_before_gc(); + size_t young_capacity = gch->young_gen()->capacity_before_gc(); return (word_size > heap_word_size(young_capacity)) || GC_locker::is_active_and_needs_gc() || gch->incremental_collection_failed(); @@ -909,20 +914,14 @@ } void MarkSweepPolicy::initialize_generations() { - _generations = NEW_C_HEAP_ARRAY3(GenerationSpecPtr, number_of_generations(), mtGC, CURRENT_PC, - AllocFailStrategy::RETURN_NULL); - if (_generations == NULL) { - vm_exit_during_initialization("Unable to allocate gen spec"); - } - if (UseParNewGC) { - _generations[0] = new GenerationSpec(Generation::ParNew, _initial_young_size, _max_young_size); + _young_gen_spec = new GenerationSpec(Generation::ParNew, _initial_young_size, _max_young_size, _gen_alignment); } else { - _generations[0] = new GenerationSpec(Generation::DefNew, _initial_young_size, _max_young_size); + _young_gen_spec = new GenerationSpec(Generation::DefNew, _initial_young_size, _max_young_size, _gen_alignment); } - _generations[1] = new GenerationSpec(Generation::MarkSweepCompact, _initial_old_size, _max_old_size); + _old_gen_spec = new GenerationSpec(Generation::MarkSweepCompact, _initial_old_size, _max_old_size, _gen_alignment); - if (_generations[0] == NULL || _generations[1] == NULL) { + if (_young_gen_spec == NULL || _old_gen_spec == NULL) { vm_exit_during_initialization("Unable to allocate gen spec"); } } --- old/src/share/vm/memory/collectorPolicy.hpp 2014-10-17 16:28:40.000000000 +0200 +++ new/src/share/vm/memory/collectorPolicy.hpp 2014-10-17 16:28:40.000000000 +0200 @@ -216,7 +216,8 @@ }; class GenCollectorPolicy : public CollectorPolicy { -friend class TestGenCollectorPolicy; + friend class TestGenCollectorPolicy; + friend class VMStructs; protected: size_t _min_young_size; size_t _initial_young_size; @@ -229,7 +230,8 @@ // time. When using large pages they can differ. size_t _gen_alignment; - GenerationSpec **_generations; + GenerationSpec* _young_gen_spec; + GenerationSpec* _old_gen_spec; // Return true if an allocation should be attempted in the older generation // if it fails in the younger generation. Return false, otherwise. @@ -269,9 +271,14 @@ int number_of_generations() { return 2; } - virtual GenerationSpec **generations() { - assert(_generations != NULL, "Sanity check"); - return _generations; + virtual GenerationSpec* young_gen_spec() const { + assert(_young_gen_spec != NULL, "Sanity check"); + return _young_gen_spec; + } + + virtual GenerationSpec* old_gen_spec() const { + assert(_old_gen_spec != NULL, "Sanity check"); + return _old_gen_spec; } virtual GenCollectorPolicy* as_generation_policy() { return this; } --- old/src/share/vm/memory/defNewGeneration.cpp 2014-10-17 16:28:41.000000000 +0200 +++ new/src/share/vm/memory/defNewGeneration.cpp 2014-10-17 16:28:41.000000000 +0200 @@ -56,9 +56,7 @@ // Methods of protected closure types. -DefNewGeneration::IsAliveClosure::IsAliveClosure(Generation* g) : _g(g) { - assert(g->level() == 0, "Optimized for youngest gen."); -} +DefNewGeneration::IsAliveClosure::IsAliveClosure(Generation* g) : _g(g) { } bool DefNewGeneration::IsAliveClosure::do_object_b(oop p) { return (HeapWord*)p >= _g->reserved().end() || p->is_forwarded(); } @@ -83,39 +81,36 @@ void DefNewGeneration::FastKeepAliveClosure::do_oop(narrowOop* p) { DefNewGeneration::FastKeepAliveClosure::do_oop_work(p); } DefNewGeneration::EvacuateFollowersClosure:: -EvacuateFollowersClosure(GenCollectedHeap* gch, int level, +EvacuateFollowersClosure(GenCollectedHeap* gch, ScanClosure* cur, ScanClosure* older) : - _gch(gch), _level(level), - _scan_cur_or_nonheap(cur), _scan_older(older) + _gch(gch), _scan_cur_or_nonheap(cur), _scan_older(older) {} void DefNewGeneration::EvacuateFollowersClosure::do_void() { do { - _gch->oop_since_save_marks_iterate(_level, _scan_cur_or_nonheap, + _gch->oop_since_save_marks_iterate(Generation::Young, _scan_cur_or_nonheap, _scan_older); - } while (!_gch->no_allocs_since_save_marks(_level)); + } while (!_gch->no_allocs_since_save_marks(Generation::Young)); } DefNewGeneration::FastEvacuateFollowersClosure:: -FastEvacuateFollowersClosure(GenCollectedHeap* gch, int level, +FastEvacuateFollowersClosure(GenCollectedHeap* gch, DefNewGeneration* gen, FastScanClosure* cur, FastScanClosure* older) : - _gch(gch), _level(level), _gen(gen), - _scan_cur_or_nonheap(cur), _scan_older(older) + _gch(gch), _gen(gen), _scan_cur_or_nonheap(cur), _scan_older(older) {} void DefNewGeneration::FastEvacuateFollowersClosure::do_void() { do { - _gch->oop_since_save_marks_iterate(_level, _scan_cur_or_nonheap, + _gch->oop_since_save_marks_iterate(Generation::Young, _scan_cur_or_nonheap, _scan_older); - } while (!_gch->no_allocs_since_save_marks(_level)); + } while (!_gch->no_allocs_since_save_marks(Generation::Young)); guarantee(_gen->promo_failure_scan_is_complete(), "Failed to finish scan"); } ScanClosure::ScanClosure(DefNewGeneration* g, bool gc_barrier) : OopsInKlassOrGenClosure(g), _g(g), _gc_barrier(gc_barrier) { - assert(_g->level() == 0, "Optimized for youngest generation"); _boundary = _g->reserved().end(); } @@ -125,7 +120,6 @@ FastScanClosure::FastScanClosure(DefNewGeneration* g, bool gc_barrier) : OopsInKlassOrGenClosure(g), _g(g), _gc_barrier(gc_barrier) { - assert(_g->level() == 0, "Optimized for youngest generation"); _boundary = _g->reserved().end(); } @@ -166,7 +160,6 @@ ScanWeakRefClosure::ScanWeakRefClosure(DefNewGeneration* g) : _g(g) { - assert(_g->level() == 0, "Optimized for youngest generation"); _boundary = _g->reserved().end(); } @@ -184,9 +177,8 @@ DefNewGeneration::DefNewGeneration(ReservedSpace rs, size_t initial_size, - int level, const char* policy) - : Generation(rs, initial_size, level), + : Generation(rs, initial_size), _promo_failure_drain_in_progress(false), _should_allocate_from_space(false) { @@ -230,7 +222,7 @@ compute_space_boundaries(0, SpaceDecorator::Clear, SpaceDecorator::Mangle); update_counters(); - _next_gen = NULL; + _old_gen = NULL; _tenuring_threshold = MaxTenuringThreshold; _pretenure_size_threshold_words = PretenureSizeThreshold >> LogHeapWordSize; @@ -382,13 +374,9 @@ return; } - int next_level = level() + 1; GenCollectedHeap* gch = GenCollectedHeap::heap(); - assert(next_level < gch->_n_gens, - "DefNewGeneration cannot be an oldest gen"); - Generation* next_gen = gch->_gens[next_level]; - size_t old_size = next_gen->capacity(); + size_t old_size = gch->old_gen()->capacity(); size_t new_size_before = _virtual_space.committed_size(); size_t min_new_size = spec()->init_size(); size_t max_new_size = reserved().byte_size(); @@ -572,7 +560,7 @@ DefNewTracer gc_tracer; gc_tracer.report_gc_start(gch->gc_cause(), _gc_timer->gc_start()); - _next_gen = gch->next_gen(this); + _old_gen = gch->old_gen(); // If the next generation is too full to accommodate promotion // from this generation, pass on collection; let the next generation @@ -605,7 +593,7 @@ gch->rem_set()->prepare_for_younger_refs_iterate(false); - assert(gch->no_allocs_since_save_marks(0), + assert(gch->no_allocs_since_save_marks(Generation::Young), "save marks have not been newly set."); // Not very pretty. @@ -621,14 +609,14 @@ false); set_promo_failure_scan_stack_closure(&fsc_with_no_gc_barrier); - FastEvacuateFollowersClosure evacuate_followers(gch, _level, this, + FastEvacuateFollowersClosure evacuate_followers(gch, this, &fsc_with_no_gc_barrier, &fsc_with_gc_barrier); - assert(gch->no_allocs_since_save_marks(0), + assert(gch->no_allocs_since_save_marks(Generation::Young), "save marks have not been newly set."); - gch->gen_process_roots(_level, + gch->gen_process_roots(Generation::Young, true, // Process younger gens, if any, // as strong roots. true, // activate StrongRootsScope @@ -692,7 +680,7 @@ gch->set_incremental_collection_failed(); // Inform the next generation that a promotion failure occurred. - _next_gen->promotion_failure_occurred(); + _old_gen->promotion_failure_occurred(); gc_tracer.report_promotion_failed(_promotion_failed_info); // Reset the PromotionFailureALot counters. @@ -797,7 +785,7 @@ // Otherwise try allocating obj tenured if (obj == NULL) { - obj = _next_gen->promote(old, s); + obj = _old_gen->promote(old, s); if (obj == NULL) { handle_promotion_failure(old); return old; @@ -866,8 +854,10 @@ void DefNewGeneration::contribute_scratch(ScratchBlock*& list, Generation* requestor, size_t max_alloc_words) { - if (requestor == this || _promotion_failed) return; - assert(requestor->level() > level(), "DefNewGeneration must be youngest"); + if (requestor == this || _promotion_failed) { + return; + } + assert(requestor == GenCollectedHeap::heap()->old_gen(), "We should not call our own generation"); /* $$$ Assert this? "trace" is a "MarkSweep" function so that's not appropriate. if (to_space->top() > to_space->bottom()) { @@ -902,11 +892,11 @@ } return false; } - if (_next_gen == NULL) { + if (_old_gen == NULL) { GenCollectedHeap* gch = GenCollectedHeap::heap(); - _next_gen = gch->next_gen(this); + _old_gen = gch->old_gen(); } - return _next_gen->promotion_attempt_is_safe(used()); + return _old_gen->promotion_attempt_is_safe(used()); } void DefNewGeneration::gc_epilogue(bool full) { @@ -1026,8 +1016,7 @@ return eden(); } -HeapWord* DefNewGeneration::allocate(size_t word_size, - bool is_tlab) { +HeapWord* DefNewGeneration::allocate(size_t word_size, bool is_tlab) { // This is the slow-path allocation for the DefNewGeneration. // Most allocations are fast-path in compiled code. // We try to allocate from the eden. If that works, we are happy. @@ -1035,17 +1024,17 @@ // have to use it here, as well. HeapWord* result = eden()->par_allocate(word_size); if (result != NULL) { - if (CMSEdenChunksRecordAlways && _next_gen != NULL) { - _next_gen->sample_eden_chunk(); + if (CMSEdenChunksRecordAlways && _old_gen != NULL) { + _old_gen->sample_eden_chunk(); } return result; } do { HeapWord* old_limit = eden()->soft_end(); if (old_limit < eden()->end()) { - // Tell the next generation we reached a limit. + // Tell the old generation we reached a limit. HeapWord* new_limit = - next_gen()->allocation_limit_reached(eden(), eden()->top(), word_size); + _old_gen->allocation_limit_reached(eden(), eden()->top(), word_size); if (new_limit != NULL) { Atomic::cmpxchg_ptr(new_limit, eden()->soft_end_addr(), old_limit); } else { @@ -1068,8 +1057,8 @@ // circular dependency at compile time. if (result == NULL) { result = allocate_from_space(word_size); - } else if (CMSEdenChunksRecordAlways && _next_gen != NULL) { - _next_gen->sample_eden_chunk(); + } else if (CMSEdenChunksRecordAlways && _old_gen != NULL) { + _old_gen->sample_eden_chunk(); } return result; } @@ -1077,8 +1066,8 @@ HeapWord* DefNewGeneration::par_allocate(size_t word_size, bool is_tlab) { HeapWord* res = eden()->par_allocate(word_size); - if (CMSEdenChunksRecordAlways && _next_gen != NULL) { - _next_gen->sample_eden_chunk(); + if (CMSEdenChunksRecordAlways && _old_gen != NULL) { + _old_gen->sample_eden_chunk(); } return res; } --- old/src/share/vm/memory/defNewGeneration.hpp 2014-10-17 16:28:42.000000000 +0200 +++ new/src/share/vm/memory/defNewGeneration.hpp 2014-10-17 16:28:42.000000000 +0200 @@ -44,7 +44,7 @@ friend class VMStructs; protected: - Generation* _next_gen; + Generation* _old_gen; uint _tenuring_threshold; // Tenuring threshold for next collection. ageTable _age_table; // Size of object to pretenure in words; command line provides bytes @@ -182,23 +182,21 @@ class EvacuateFollowersClosure: public VoidClosure { GenCollectedHeap* _gch; - int _level; ScanClosure* _scan_cur_or_nonheap; ScanClosure* _scan_older; public: - EvacuateFollowersClosure(GenCollectedHeap* gch, int level, + EvacuateFollowersClosure(GenCollectedHeap* gch, ScanClosure* cur, ScanClosure* older); void do_void(); }; class FastEvacuateFollowersClosure: public VoidClosure { GenCollectedHeap* _gch; - int _level; DefNewGeneration* _gen; FastScanClosure* _scan_cur_or_nonheap; FastScanClosure* _scan_older; public: - FastEvacuateFollowersClosure(GenCollectedHeap* gch, int level, + FastEvacuateFollowersClosure(GenCollectedHeap* gch, DefNewGeneration* gen, FastScanClosure* cur, FastScanClosure* older); @@ -206,7 +204,7 @@ }; public: - DefNewGeneration(ReservedSpace rs, size_t initial_byte_size, int level, + DefNewGeneration(ReservedSpace rs, size_t initial_byte_size, const char* policy="Copy"); virtual void ref_processor_init(); --- old/src/share/vm/memory/genCollectedHeap.cpp 2014-10-17 16:28:42.000000000 +0200 +++ new/src/share/vm/memory/genCollectedHeap.cpp 2014-10-17 16:28:42.000000000 +0200 @@ -85,9 +85,6 @@ jint GenCollectedHeap::initialize() { CollectedHeap::pre_initialize(); - int i; - _n_gens = gen_policy()->number_of_generations(); - // While there are no constraints in the GC code that HeapWordSize // be any particular value, there are multiple other areas in the // system which believe this to be true (e.g. oop->object_size in some @@ -95,16 +92,6 @@ // HeapWordSize). guarantee(HeapWordSize == wordSize, "HeapWordSize must equal wordSize"); - // The heap must be at least as aligned as generations. - size_t gen_alignment = Generation::GenGrain; - - _gen_specs = gen_policy()->generations(); - - // Make sure the sizes are all aligned. - for (i = 0; i < _n_gens; i++) { - _gen_specs[i]->align(gen_alignment); - } - // Allocate space for the heap. char* heap_address; @@ -130,11 +117,14 @@ _gch = this; - for (i = 0; i < _n_gens; i++) { - ReservedSpace this_rs = heap_rs.first_part(_gen_specs[i]->max_size(), false, false); - _gens[i] = _gen_specs[i]->init(this_rs, i, rem_set()); - heap_rs = heap_rs.last_part(_gen_specs[i]->max_size()); - } + ReservedSpace young_rs = heap_rs.first_part(gen_policy()->young_gen_spec()->max_size(), false, false); + _young_gen = gen_policy()->young_gen_spec()->init(young_rs, rem_set()); + heap_rs = heap_rs.last_part(gen_policy()->young_gen_spec()->max_size()); + + ReservedSpace old_rs = heap_rs.first_part(gen_policy()->old_gen_spec()->max_size(), false, false); + _old_gen = gen_policy()->old_gen_spec()->init(old_rs, rem_set()); + heap_rs = heap_rs.last_part(gen_policy()->old_gen_spec()->max_size()); + clear_incremental_collection_failed(); #if INCLUDE_ALL_GCS @@ -149,7 +139,6 @@ return JNI_OK; } - char* GenCollectedHeap::allocate(size_t alignment, size_t* _total_reserved, int* _n_covered_regions, @@ -158,24 +147,21 @@ "the maximum representable size"; // Now figure out the total size. - size_t total_reserved = 0; - int n_covered_regions = 0; - const size_t pageSize = UseLargePages ? - os::large_page_size() : os::vm_page_size(); - + const size_t pageSize = UseLargePages ? os::large_page_size() : os::vm_page_size(); assert(alignment % pageSize == 0, "Must be"); - for (int i = 0; i < _n_gens; i++) { - total_reserved += _gen_specs[i]->max_size(); - if (total_reserved < _gen_specs[i]->max_size()) { - vm_exit_during_initialization(overflow_msg); - } - n_covered_regions += _gen_specs[i]->n_covered_regions(); + size_t total_reserved = gen_policy()->young_gen_spec()->max_size() + + gen_policy()->old_gen_spec()->max_size(); + if (total_reserved < gen_policy()->young_gen_spec()->max_size() || + total_reserved < gen_policy()->old_gen_spec()->max_size()) { + vm_exit_during_initialization(overflow_msg); } assert(total_reserved % alignment == 0, err_msg("Gen size; total_reserved=" SIZE_FORMAT ", alignment=" SIZE_FORMAT, total_reserved, alignment)); + int n_covered_regions = 2; // Young + Old + // Needed until the cardtable is fixed to have the right number // of covered regions. n_covered_regions += 2; @@ -187,64 +173,46 @@ return heap_rs->base(); } - void GenCollectedHeap::post_initialize() { SharedHeap::post_initialize(); GenCollectorPolicy *policy = (GenCollectorPolicy *)collector_policy(); guarantee(policy->is_generation_policy(), "Illegal policy type"); - DefNewGeneration* def_new_gen = (DefNewGeneration*) get_gen(0); + DefNewGeneration* def_new_gen = (DefNewGeneration*) _young_gen; assert(def_new_gen->kind() == Generation::DefNew || def_new_gen->kind() == Generation::ParNew, "Wrong generation kind"); - Generation* old_gen = get_gen(1); - assert(old_gen->kind() == Generation::ConcurrentMarkSweep || - old_gen->kind() == Generation::MarkSweepCompact, + assert(_old_gen->kind() == Generation::ConcurrentMarkSweep || + _old_gen->kind() == Generation::MarkSweepCompact, "Wrong generation kind"); policy->initialize_size_policy(def_new_gen->eden()->capacity(), - old_gen->capacity(), + _old_gen->capacity(), def_new_gen->from()->capacity()); policy->initialize_gc_policy_counters(); } void GenCollectedHeap::ref_processing_init() { SharedHeap::ref_processing_init(); - for (int i = 0; i < _n_gens; i++) { - _gens[i]->ref_processor_init(); - } + _young_gen->ref_processor_init(); + _old_gen->ref_processor_init(); } size_t GenCollectedHeap::capacity() const { - size_t res = 0; - for (int i = 0; i < _n_gens; i++) { - res += _gens[i]->capacity(); - } - return res; + return _young_gen->capacity() + _old_gen->capacity(); } size_t GenCollectedHeap::used() const { - size_t res = 0; - for (int i = 0; i < _n_gens; i++) { - res += _gens[i]->used(); - } - return res; + return _young_gen->used() + _old_gen->used(); } -// Save the "used_region" for generations level and lower. -void GenCollectedHeap::save_used_regions(int level) { - assert(level < _n_gens, "Illegal level parameter"); - for (int i = level; i >= 0; i--) { - _gens[i]->save_used_region(); - } +void GenCollectedHeap::save_used_regions() { + _old_gen->save_used_region(); + _young_gen->save_used_region(); } size_t GenCollectedHeap::max_capacity() const { - size_t res = 0; - for (int i = 0; i < _n_gens; i++) { - res += _gens[i]->max_capacity(); - } - return res; + return _young_gen->max_capacity() + _old_gen->max_capacity(); } // Update the _full_collections_completed counter @@ -308,16 +276,20 @@ HeapWord* GenCollectedHeap::attempt_allocation(size_t size, bool is_tlab, bool first_only) { - HeapWord* res; - for (int i = 0; i < _n_gens; i++) { - if (_gens[i]->should_allocate(size, is_tlab)) { - res = _gens[i]->allocate(size, is_tlab); - if (res != NULL) return res; - else if (first_only) break; + HeapWord* res = NULL; + + if (_young_gen->should_allocate(size, is_tlab)) { + res = _young_gen->allocate(size, is_tlab); + if (res != NULL || first_only) { + return res; } } - // Otherwise... - return NULL; + + if (_old_gen->should_allocate(size, is_tlab)) { + res = _old_gen->allocate(size, is_tlab); + } + + return res; } HeapWord* GenCollectedHeap::mem_allocate(size_t size, @@ -337,12 +309,115 @@ (cause == GCCause::_java_lang_system_gc && ExplicitGCInvokesConcurrent)); } -void GenCollectedHeap::do_collection(bool full, - bool clear_all_soft_refs, - size_t size, - bool is_tlab, - int max_level) { - bool prepared_for_verification = false; +void GenCollectedHeap::collect_generation(Generation* gen, bool full, size_t size, + bool is_tlab, bool run_verification, bool clear_soft_refs) { + // Timer for individual generations. Last argument is false: no CR + // FIXME: We should try to start the timing earlier to cover more of the GC pause + // The PrintGCDetails logging starts before we have incremented the GC id. We will do that later + // so we can assume here that the next GC id is what we want. + GCTraceTime t1(gen->short_name(), PrintGCDetails, false, NULL, GCId::peek()); + TraceCollectorStats tcs(gen->counters()); + TraceMemoryManagerStats tmms(gen->kind(),gc_cause()); + + size_t prev_used = gen->used(); + gen->stat_record()->invocations++; + gen->stat_record()->accumulated_time.start(); + + // Must be done anew before each collection because + // a previous collection will do mangling and will + // change top of some spaces. + record_gen_tops_before_GC(); + + if (PrintGC && Verbose) { + // I didn't want to change the logging when removing the level concept, + // but I guess this logging could say young/old or something instead of 0/1. + int level; + if (gen == GenCollectedHeap::heap()->young_gen()) { + level = 0; + } else { + level = 1; + } + gclog_or_tty->print("level=%d invoke=%d size=" SIZE_FORMAT, + level, + gen->stat_record()->invocations, + size * HeapWordSize); + } + + if (run_verification && VerifyBeforeGC) { + HandleMark hm; // Discard invalid handles created during verification + Universe::verify(" VerifyBeforeGC:"); + } + COMPILER2_PRESENT(DerivedPointerTable::clear()); + + // Do collection work + { + // Note on ref discovery: For what appear to be historical reasons, + // GCH enables and disabled (by enqueing) refs discovery. + // In the future this should be moved into the generation's + // collect method so that ref discovery and enqueueing concerns + // are local to a generation. The collect method could return + // an appropriate indication in the case that notification on + // the ref lock was needed. This will make the treatment of + // weak refs more uniform (and indeed remove such concerns + // from GCH). XXX + + HandleMark hm; // Discard invalid handles created during gc + save_marks(); // save marks for all gens + // We want to discover references, but not process them yet. + // This mode is disabled in process_discovered_references if the + // generation does some collection work, or in + // enqueue_discovered_references if the generation returns + // without doing any work. + ReferenceProcessor* rp = gen->ref_processor(); + // If the discovery of ("weak") refs in this generation is + // atomic wrt other collectors in this configuration, we + // are guaranteed to have empty discovered ref lists. + if (rp->discovery_is_atomic()) { + rp->enable_discovery(true /*verify_disabled*/, true /*verify_no_refs*/); + rp->setup_policy(clear_soft_refs); + } else { + // collect() below will enable discovery as appropriate + } + gen->collect(full, clear_soft_refs, size, is_tlab); + if (!rp->enqueuing_is_done()) { + rp->enqueue_discovered_references(); + } else { + rp->set_enqueuing_is_done(false); + } + rp->verify_no_references_recorded(); + } + + // Determine if allocation request was met. + if (size > 0) { + if (!is_tlab || gen->supports_tlab_allocation()) { + if (size * HeapWordSize <= gen->unsafe_max_alloc_nogc()) { + size = 0; + } + } + } + + COMPILER2_PRESENT(DerivedPointerTable::update_pointers()); + + gen->stat_record()->accumulated_time.stop(); + + update_gc_stats(gen, full); + + if (run_verification && VerifyAfterGC) { + HandleMark hm; // Discard invalid handles created during verification + Universe::verify(" VerifyAfterGC:"); + } + + if (PrintGCDetails) { + gclog_or_tty->print(":"); + gen->print_heap_change(prev_used); + } +} + +void GenCollectedHeap::do_collection(bool full, + bool clear_all_soft_refs, + size_t size, + bool is_tlab, + Generation::Type max_generation) { ResourceMark rm; DEBUG_ONLY(Thread* my_thread = Thread::current();) @@ -353,7 +428,6 @@ assert(Heap_lock->is_locked(), "the requesting thread should have the Heap_lock"); guarantee(!is_gc_active(), "collection is not reentrant"); - assert(max_level < n_gens(), "sanity check"); if (GC_locker::check_active_before_gc()) { return; // GC is disabled (e.g. JNI GetXXXCritical operation) @@ -371,7 +445,7 @@ { FlagSetting fl(_is_gc_active, true); - bool complete = full && (max_level == (n_gens()-1)); + bool complete = full && (max_generation == Generation::Old); const char* gc_cause_prefix = complete ? "Full GC" : "GC"; gclog_or_tty->date_stamp(PrintGC && PrintGCDateStamps); TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty); @@ -383,147 +457,46 @@ increment_total_collections(complete); size_t gch_prev_used = used(); + bool must_restore_marks_for_biased_locking = false; + bool old_collected = false; + bool run_verification = total_collections() >= VerifyGCStartAt; - int starting_level = 0; - if (full) { - // Search for the oldest generation which will collect all younger - // generations, and start collection loop there. - for (int i = max_level; i >= 0; i--) { - if (_gens[i]->full_collects_younger_generations()) { - starting_level = i; - break; - } + if (_young_gen->performs_in_place_marking() || + _old_gen->performs_in_place_marking()) { + // We want to avoid doing this for + // scavenge-only collections where it's unnecessary. + must_restore_marks_for_biased_locking = true; + BiasedLocking::preserve_marks(); + } + + bool prepared_for_verification = false; + if (!(full && _old_gen->full_collects_younger_generations()) && + _young_gen->should_collect(full, size, is_tlab)) { + if (run_verification && VerifyGCLevel <= 0 && VerifyBeforeGC) { + prepare_for_verify(); + prepared_for_verification = true; } + collect_generation(_young_gen, full, size, is_tlab, run_verification && VerifyGCLevel <= 0, do_clear_all_soft_refs); } - - bool must_restore_marks_for_biased_locking = false; - - int max_level_collected = starting_level; - for (int i = starting_level; i <= max_level; i++) { - if (_gens[i]->should_collect(full, size, is_tlab)) { - if (i == n_gens() - 1) { // a major collection is to happen - if (!complete) { - // The full_collections increment was missed above. - increment_total_full_collections(); - } - pre_full_gc_dump(NULL); // do any pre full gc dumps - } - // Timer for individual generations. Last argument is false: no CR - // FIXME: We should try to start the timing earlier to cover more of the GC pause - // The PrintGCDetails logging starts before we have incremented the GC id. We will do that later - // so we can assume here that the next GC id is what we want. - GCTraceTime t1(_gens[i]->short_name(), PrintGCDetails, false, NULL, GCId::peek()); - TraceCollectorStats tcs(_gens[i]->counters()); - TraceMemoryManagerStats tmms(_gens[i]->kind(),gc_cause()); - - size_t prev_used = _gens[i]->used(); - _gens[i]->stat_record()->invocations++; - _gens[i]->stat_record()->accumulated_time.start(); - - // Must be done anew before each collection because - // a previous collection will do mangling and will - // change top of some spaces. - record_gen_tops_before_GC(); - - if (PrintGC && Verbose) { - gclog_or_tty->print("level=%d invoke=%d size=" SIZE_FORMAT, - i, - _gens[i]->stat_record()->invocations, - size*HeapWordSize); - } - - if (VerifyBeforeGC && i >= VerifyGCLevel && - total_collections() >= VerifyGCStartAt) { - HandleMark hm; // Discard invalid handles created during verification - if (!prepared_for_verification) { - prepare_for_verify(); - prepared_for_verification = true; - } - Universe::verify(" VerifyBeforeGC:"); - } - COMPILER2_PRESENT(DerivedPointerTable::clear()); - - if (!must_restore_marks_for_biased_locking && - _gens[i]->performs_in_place_marking()) { - // We perform this mark word preservation work lazily - // because it's only at this point that we know whether we - // absolutely have to do it; we want to avoid doing it for - // scavenge-only collections where it's unnecessary - must_restore_marks_for_biased_locking = true; - BiasedLocking::preserve_marks(); - } - - // Do collection work - { - // Note on ref discovery: For what appear to be historical reasons, - // GCH enables and disabled (by enqueing) refs discovery. - // In the future this should be moved into the generation's - // collect method so that ref discovery and enqueueing concerns - // are local to a generation. The collect method could return - // an appropriate indication in the case that notification on - // the ref lock was needed. This will make the treatment of - // weak refs more uniform (and indeed remove such concerns - // from GCH). XXX - - HandleMark hm; // Discard invalid handles created during gc - save_marks(); // save marks for all gens - // We want to discover references, but not process them yet. - // This mode is disabled in process_discovered_references if the - // generation does some collection work, or in - // enqueue_discovered_references if the generation returns - // without doing any work. - ReferenceProcessor* rp = _gens[i]->ref_processor(); - // If the discovery of ("weak") refs in this generation is - // atomic wrt other collectors in this configuration, we - // are guaranteed to have empty discovered ref lists. - if (rp->discovery_is_atomic()) { - rp->enable_discovery(true /*verify_disabled*/, true /*verify_no_refs*/); - rp->setup_policy(do_clear_all_soft_refs); - } else { - // collect() below will enable discovery as appropriate - } - _gens[i]->collect(full, do_clear_all_soft_refs, size, is_tlab); - if (!rp->enqueuing_is_done()) { - rp->enqueue_discovered_references(); - } else { - rp->set_enqueuing_is_done(false); - } - rp->verify_no_references_recorded(); - } - max_level_collected = i; - - // Determine if allocation request was met. - if (size > 0) { - if (!is_tlab || _gens[i]->supports_tlab_allocation()) { - if (size*HeapWordSize <= _gens[i]->unsafe_max_alloc_nogc()) { - size = 0; - } - } - } - - COMPILER2_PRESENT(DerivedPointerTable::update_pointers()); - - _gens[i]->stat_record()->accumulated_time.stop(); - - update_gc_stats(i, full); - - if (VerifyAfterGC && i >= VerifyGCLevel && - total_collections() >= VerifyGCStartAt) { - HandleMark hm; // Discard invalid handles created during verification - Universe::verify(" VerifyAfterGC:"); - } - - if (PrintGCDetails) { - gclog_or_tty->print(":"); - _gens[i]->print_heap_change(prev_used); + if (max_generation == Generation::Old && _old_gen->should_collect(full, size, is_tlab)) { + if (!complete) { + // The full_collections increment was missed above. + increment_total_full_collections(); + } + pre_full_gc_dump(NULL); // do any pre full gc dumps + if (run_verification && VerifyGCLevel <= 1 && VerifyBeforeGC) { + if (!prepared_for_verification) { + prepare_for_verify(); } } + collect_generation(_old_gen, full, size, is_tlab, run_verification && VerifyGCLevel <= 1, do_clear_all_soft_refs); + old_collected = true; } // Update "complete" boolean wrt what actually transpired -- // for instance, a promotion failure could have led to // a whole heap collection. - complete = complete || (max_level_collected == n_gens() - 1); + complete = complete || old_collected; if (complete) { // We did a "major" collection // FIXME: See comment at pre_full_gc_dump call @@ -539,10 +512,11 @@ } } - for (int j = max_level_collected; j >= 0; j -= 1) { - // Adjust generation sizes. - _gens[j]->compute_new_size(); + // Adjust generation sizes. + if (old_collected) { + _old_gen->compute_new_size(); } + _young_gen->compute_new_size(); if (complete) { // Delete metaspaces for unloaded class loaders and clean up loader_data graph @@ -580,7 +554,7 @@ } void GenCollectedHeap:: -gen_process_roots(int level, +gen_process_roots(Generation::Type type, bool younger_gens_as_roots, bool activate_scope, SharedHeap::ScanningOption so, @@ -599,18 +573,18 @@ if (younger_gens_as_roots) { if (!_gen_process_roots_tasks->is_task_claimed(GCH_PS_younger_gens)) { - for (int i = 0; i < level; i++) { - not_older_gens->set_generation(_gens[i]); - _gens[i]->oop_iterate(not_older_gens); + if (type == Generation::Old) { + not_older_gens->set_generation(_young_gen); + _young_gen->oop_iterate(not_older_gens); } not_older_gens->reset_generation(); } } // When collection is parallel, all threads get to cooperate to do - // older-gen scanning. - for (int i = level+1; i < _n_gens; i++) { - older_gens->set_generation(_gens[i]); - rem_set()->younger_refs_iterate(_gens[i], older_gens); + // old generation scanning. + if (type == Generation::Young) { + older_gens->set_generation(_old_gen); + rem_set()->younger_refs_iterate(_old_gen, older_gens); older_gens->reset_generation(); } @@ -618,7 +592,7 @@ } void GenCollectedHeap:: -gen_process_roots(int level, +gen_process_roots(Generation::Type type, bool younger_gens_as_roots, bool activate_scope, SharedHeap::ScanningOption so, @@ -630,7 +604,7 @@ const bool is_adjust_phase = !only_strong_roots && !younger_gens_as_roots; bool is_moving_collection = false; - if (level == 0 || is_adjust_phase) { + if (type == Generation::Young || is_adjust_phase) { // young collections are always moving is_moving_collection = true; } @@ -638,7 +612,7 @@ MarkingCodeBlobClosure mark_code_closure(not_older_gens, is_moving_collection); CodeBlobClosure* code_closure = &mark_code_closure; - gen_process_roots(level, + gen_process_roots(type, younger_gens_as_roots, activate_scope, so, not_older_gens, only_strong_roots ? NULL : not_older_gens, @@ -651,19 +625,20 @@ void GenCollectedHeap::gen_process_weak_roots(OopClosure* root_closure) { SharedHeap::process_weak_roots(root_closure); // "Local" "weak" refs - for (int i = 0; i < _n_gens; i++) { - _gens[i]->ref_processor()->weak_oops_do(root_closure); - } + _young_gen->ref_processor()->weak_oops_do(root_closure); + _old_gen->ref_processor()->weak_oops_do(root_closure); } #define GCH_SINCE_SAVE_MARKS_ITERATE_DEFN(OopClosureType, nv_suffix) \ void GenCollectedHeap:: \ -oop_since_save_marks_iterate(int level, \ +oop_since_save_marks_iterate(Generation::Type gen, \ OopClosureType* cur, \ OopClosureType* older) { \ - _gens[level]->oop_since_save_marks_iterate##nv_suffix(cur); \ - for (int i = level+1; i < n_gens(); i++) { \ - _gens[i]->oop_since_save_marks_iterate##nv_suffix(older); \ + if (gen == Generation::Young) { \ + _young_gen->oop_since_save_marks_iterate##nv_suffix(cur); \ + _old_gen->oop_since_save_marks_iterate##nv_suffix(older); \ + } else { \ + _old_gen->oop_since_save_marks_iterate##nv_suffix(cur); \ } \ } @@ -671,23 +646,21 @@ #undef GCH_SINCE_SAVE_MARKS_ITERATE_DEFN -bool GenCollectedHeap::no_allocs_since_save_marks(int level) { - for (int i = level; i < _n_gens; i++) { - if (!_gens[i]->no_allocs_since_save_marks()) return false; - } - return true; +bool GenCollectedHeap::no_allocs_since_save_marks(bool include_young) { + return include_young && _young_gen->no_allocs_since_save_marks() || + _old_gen->no_allocs_since_save_marks(); } bool GenCollectedHeap::supports_inline_contig_alloc() const { - return _gens[0]->supports_inline_contig_alloc(); + return _young_gen->supports_inline_contig_alloc(); } HeapWord** GenCollectedHeap::top_addr() const { - return _gens[0]->top_addr(); + return _young_gen->top_addr(); } HeapWord** GenCollectedHeap::end_addr() const { - return _gens[0]->end_addr(); + return _young_gen->end_addr(); } // public collection interfaces @@ -702,47 +675,47 @@ #endif // INCLUDE_ALL_GCS } else if (cause == GCCause::_wb_young_gc) { // minor collection for WhiteBox API - collect(cause, 0); + collect(cause, Generation::Young); } else { #ifdef ASSERT - if (cause == GCCause::_scavenge_alot) { - // minor collection only - collect(cause, 0); - } else { - // Stop-the-world full collection - collect(cause, n_gens() - 1); - } + if (cause == GCCause::_scavenge_alot) { + // minor collection only + collect(cause, Generation::Young); + } else { + // Stop-the-world full collection + collect(cause, Generation::Old); + } #else // Stop-the-world full collection - collect(cause, n_gens() - 1); + collect(cause, Generation::Old); #endif } } -void GenCollectedHeap::collect(GCCause::Cause cause, int max_level) { +void GenCollectedHeap::collect(GCCause::Cause cause, Generation::Type max_gen) { // The caller doesn't have the Heap_lock assert(!Heap_lock->owned_by_self(), "this thread should not own the Heap_lock"); MutexLocker ml(Heap_lock); - collect_locked(cause, max_level); + collect_locked(cause, max_gen); } void GenCollectedHeap::collect_locked(GCCause::Cause cause) { // The caller has the Heap_lock assert(Heap_lock->owned_by_self(), "this thread should own the Heap_lock"); - collect_locked(cause, n_gens() - 1); + collect_locked(cause, Generation::Old); } // this is the private collection interface // The Heap_lock is expected to be held on entry. -void GenCollectedHeap::collect_locked(GCCause::Cause cause, int max_level) { +void GenCollectedHeap::collect_locked(GCCause::Cause cause, Generation::Type max_generation) { // Read the GC count while holding the Heap_lock unsigned int gc_count_before = total_collections(); unsigned int full_gc_count_before = total_full_collections(); { MutexUnlocker mu(Heap_lock); // give up heap lock, execute gets it back VM_GenCollectFull op(gc_count_before, full_gc_count_before, - cause, max_level); + cause, max_generation); VMThread::execute(&op); } } @@ -750,12 +723,12 @@ #if INCLUDE_ALL_GCS bool GenCollectedHeap::create_cms_collector() { - assert(_gens[1]->kind() == Generation::ConcurrentMarkSweep, + assert(_old_gen->kind() == Generation::ConcurrentMarkSweep, "Unexpected generation kinds"); // Skip two header words in the block content verification NOT_PRODUCT(_skip_header_HeapWords = CMSCollector::skip_header_HeapWords();) CMSCollector* collector = new CMSCollector( - (ConcurrentMarkSweepGeneration*)_gens[1], + (ConcurrentMarkSweepGeneration*)_old_gen, _rem_set->as_CardTableRS(), (ConcurrentMarkSweepPolicy*) collector_policy()); @@ -785,28 +758,28 @@ #endif // INCLUDE_ALL_GCS void GenCollectedHeap::do_full_collection(bool clear_all_soft_refs) { - do_full_collection(clear_all_soft_refs, _n_gens - 1); + do_full_collection(clear_all_soft_refs, Generation::Old); } void GenCollectedHeap::do_full_collection(bool clear_all_soft_refs, - int max_level) { - int local_max_level; + Generation::Type max_gen) { + Generation::Type local_max_gen; if (!incremental_collection_will_fail(false /* don't consult_young */) && gc_cause() == GCCause::_gc_locker) { - local_max_level = 0; + local_max_gen = Generation::Young; } else { - local_max_level = max_level; + local_max_gen = max_gen; } do_collection(true /* full */, clear_all_soft_refs /* clear_all_soft_refs */, 0 /* size */, false /* is_tlab */, - local_max_level /* max_level */); + local_max_gen /* max_gen */); // Hack XXX FIX ME !!! // A scavenge may not have been attempted, or may have // been attempted and failed, because the old gen was too full - if (local_max_level == 0 && gc_cause() == GCCause::_gc_locker && + if (local_max_gen == Generation::Young && gc_cause() == GCCause::_gc_locker && incremental_collection_will_fail(false /* don't consult_young */)) { if (PrintGCDetails) { gclog_or_tty->print_cr("GC locker: Trying a full collection " @@ -817,13 +790,13 @@ clear_all_soft_refs /* clear_all_soft_refs */, 0 /* size */, false /* is_tlab */, - n_gens() - 1 /* max_level */); + Generation::Old /* max_gen */); } } bool GenCollectedHeap::is_in_young(oop p) { - bool result = ((HeapWord*)p) < _gens[_n_gens - 1]->reserved().start(); - assert(result == _gens[0]->is_in_reserved(p), + bool result = ((HeapWord*)p) < _old_gen->reserved().start(); + assert(result == _young_gen->is_in_reserved(p), err_msg("incorrect test - result=%d, p=" INTPTR_FORMAT, result, p2i((void*)p))); return result; } @@ -843,8 +816,8 @@ #endif // This might be sped up with a cache of the last generation that // answered yes. - for (int i = 0; i < _n_gens; i++) { - if (_gens[i]->is_in(p)) return true; + if (_young_gen->is_in(p) || _old_gen->is_in(p)) { + return true; } // Otherwise... return false; @@ -856,114 +829,97 @@ bool GenCollectedHeap::is_in_partial_collection(const void* p) { assert(is_in_reserved(p) || p == NULL, "Does not work if address is non-null and outside of the heap"); - return p < _gens[_n_gens - 2]->reserved().end() && p != NULL; + return p < _young_gen->reserved().end() && p != NULL; } #endif void GenCollectedHeap::oop_iterate(ExtendedOopClosure* cl) { - for (int i = 0; i < _n_gens; i++) { - _gens[i]->oop_iterate(cl); - } + _young_gen->oop_iterate(cl); + _old_gen->oop_iterate(cl); } void GenCollectedHeap::object_iterate(ObjectClosure* cl) { - for (int i = 0; i < _n_gens; i++) { - _gens[i]->object_iterate(cl); - } + _young_gen->object_iterate(cl); + _old_gen->object_iterate(cl); } void GenCollectedHeap::safe_object_iterate(ObjectClosure* cl) { - for (int i = 0; i < _n_gens; i++) { - _gens[i]->safe_object_iterate(cl); - } + _young_gen->safe_object_iterate(cl); + _old_gen->safe_object_iterate(cl); } Space* GenCollectedHeap::space_containing(const void* addr) const { - for (int i = 0; i < _n_gens; i++) { - Space* res = _gens[i]->space_containing(addr); - if (res != NULL) return res; + Space* res = _young_gen->space_containing(addr); + if (res != NULL) { + return res; } - // Otherwise... - assert(false, "Could not find containing space"); - return NULL; + res = _old_gen->space_containing(addr); + assert(res != NULL, "Could not find containing space"); + return res; } - HeapWord* GenCollectedHeap::block_start(const void* addr) const { assert(is_in_reserved(addr), "block_start of address outside of heap"); - for (int i = 0; i < _n_gens; i++) { - if (_gens[i]->is_in_reserved(addr)) { - assert(_gens[i]->is_in(addr), - "addr should be in allocated part of generation"); - return _gens[i]->block_start(addr); - } + if (_young_gen->is_in_reserved(addr)) { + assert(_young_gen->is_in(addr), "addr should be in allocated part of generation"); + return _young_gen->block_start(addr); } - assert(false, "Some generation should contain the address"); - return NULL; + + assert(_old_gen->is_in_reserved(addr), "Some generation should contain the address"); + assert(_old_gen->is_in(addr), "addr should be in allocated part of generation"); + return _old_gen->block_start(addr); } size_t GenCollectedHeap::block_size(const HeapWord* addr) const { assert(is_in_reserved(addr), "block_size of address outside of heap"); - for (int i = 0; i < _n_gens; i++) { - if (_gens[i]->is_in_reserved(addr)) { - assert(_gens[i]->is_in(addr), - "addr should be in allocated part of generation"); - return _gens[i]->block_size(addr); - } + if (_young_gen->is_in_reserved(addr)) { + assert(_young_gen->is_in(addr), "addr should be in allocated part of generation"); + return _young_gen->block_size(addr); } - assert(false, "Some generation should contain the address"); - return 0; + + assert(_old_gen->is_in_reserved(addr), "Some generation should contain the address"); + assert(_old_gen->is_in(addr), "addr should be in allocated part of generation"); + return _old_gen->block_size(addr); } bool GenCollectedHeap::block_is_obj(const HeapWord* addr) const { assert(is_in_reserved(addr), "block_is_obj of address outside of heap"); assert(block_start(addr) == addr, "addr must be a block start"); - for (int i = 0; i < _n_gens; i++) { - if (_gens[i]->is_in_reserved(addr)) { - return _gens[i]->block_is_obj(addr); - } + if (_young_gen->is_in_reserved(addr)) { + return _young_gen->block_is_obj(addr); } - assert(false, "Some generation should contain the address"); - return false; + + assert(_old_gen->is_in_reserved(addr), "Some generation should contain the address"); + return _old_gen->block_is_obj(addr); } bool GenCollectedHeap::supports_tlab_allocation() const { - for (int i = 0; i < _n_gens; i += 1) { - if (_gens[i]->supports_tlab_allocation()) { - return true; - } - } - return false; + assert(!_old_gen->supports_tlab_allocation(), "Old gen supports TLAB allocation?!"); + return _young_gen->supports_tlab_allocation(); } size_t GenCollectedHeap::tlab_capacity(Thread* thr) const { - size_t result = 0; - for (int i = 0; i < _n_gens; i += 1) { - if (_gens[i]->supports_tlab_allocation()) { - result += _gens[i]->tlab_capacity(); - } + assert(!_old_gen->supports_tlab_allocation(), "Old gen supports TLAB allocation?!"); + if (_young_gen->supports_tlab_allocation()) { + return _young_gen->tlab_capacity(); } - return result; + return 0; } size_t GenCollectedHeap::tlab_used(Thread* thr) const { - size_t result = 0; - for (int i = 0; i < _n_gens; i += 1) { - if (_gens[i]->supports_tlab_allocation()) { - result += _gens[i]->tlab_used(); - } + assert(!_old_gen->supports_tlab_allocation(), "Old gen supports TLAB allocation?!"); + if (_young_gen->supports_tlab_allocation()) { + return _young_gen->tlab_used(); } - return result; + return 0; } size_t GenCollectedHeap::unsafe_max_tlab_alloc(Thread* thr) const { - size_t result = 0; - for (int i = 0; i < _n_gens; i += 1) { - if (_gens[i]->supports_tlab_allocation()) { - result += _gens[i]->unsafe_max_tlab_alloc(); - } + assert(!_old_gen->supports_tlab_allocation(), "Old gen supports TLAB allocation?!"); + if (_young_gen->supports_tlab_allocation()) { + return _young_gen->unsafe_max_tlab_alloc(); } - return result; + return 0; } HeapWord* GenCollectedHeap::allocate_new_tlab(size_t size) { @@ -1012,17 +968,15 @@ ScratchBlock* GenCollectedHeap::gather_scratch(Generation* requestor, size_t max_alloc_words) { ScratchBlock* res = NULL; - for (int i = 0; i < _n_gens; i++) { - _gens[i]->contribute_scratch(res, requestor, max_alloc_words); - } + _young_gen->contribute_scratch(res, requestor, max_alloc_words); + _old_gen->contribute_scratch(res, requestor, max_alloc_words); sort_scratch_list(res); return res; } void GenCollectedHeap::release_scratch() { - for (int i = 0; i < _n_gens; i++) { - _gens[i]->reset_scratch(); - } + _young_gen->reset_scratch(); + _old_gen->reset_scratch(); } class GenPrepareForVerifyClosure: public GenCollectedHeap::GenClosure { @@ -1037,39 +991,29 @@ generation_iterate(&blk, false); } - void GenCollectedHeap::generation_iterate(GenClosure* cl, bool old_to_young) { if (old_to_young) { - for (int i = _n_gens-1; i >= 0; i--) { - cl->do_generation(_gens[i]); - } + cl->do_generation(_old_gen); + cl->do_generation(_young_gen); } else { - for (int i = 0; i < _n_gens; i++) { - cl->do_generation(_gens[i]); - } + cl->do_generation(_young_gen); + cl->do_generation(_old_gen); } } void GenCollectedHeap::space_iterate(SpaceClosure* cl) { - for (int i = 0; i < _n_gens; i++) { - _gens[i]->space_iterate(cl, true); - } + _young_gen->space_iterate(cl, true); + _old_gen->space_iterate(cl, true); } bool GenCollectedHeap::is_maximal_no_gc() const { - for (int i = 0; i < _n_gens; i++) { - if (!_gens[i]->is_maximal_no_gc()) { - return false; - } - } - return true; + return _young_gen->is_maximal_no_gc() && _old_gen->is_maximal_no_gc(); } void GenCollectedHeap::save_marks() { - for (int i = 0; i < _n_gens; i++) { - _gens[i]->save_marks(); - } + _young_gen->save_marks(); + _old_gen->save_marks(); } GenCollectedHeap* GenCollectedHeap::heap() { @@ -1078,30 +1022,26 @@ return _gch; } - void GenCollectedHeap::prepare_for_compaction() { - guarantee(_n_gens = 2, "Wrong number of generations"); - Generation* old_gen = _gens[1]; // Start by compacting into same gen. - CompactPoint cp(old_gen); - old_gen->prepare_for_compaction(&cp); - Generation* young_gen = _gens[0]; - young_gen->prepare_for_compaction(&cp); -} - -GCStats* GenCollectedHeap::gc_stats(int level) const { - return _gens[level]->gc_stats(); + CompactPoint cp(_old_gen); + _old_gen->prepare_for_compaction(&cp); + _young_gen->prepare_for_compaction(&cp); } void GenCollectedHeap::verify(bool silent, VerifyOption option /* ignored */) { - for (int i = _n_gens-1; i >= 0; i--) { - Generation* g = _gens[i]; - if (!silent) { - gclog_or_tty->print("%s", g->name()); - gclog_or_tty->print(" "); - } - g->verify(); + if (!silent) { + gclog_or_tty->print("%s", _old_gen->name()); + gclog_or_tty->print(" "); } + _old_gen->verify(); + + if (!silent) { + gclog_or_tty->print("%s", _young_gen->name()); + gclog_or_tty->print(" "); + } + _young_gen->verify(); + if (!silent) { gclog_or_tty->print("remset "); } @@ -1109,9 +1049,8 @@ } void GenCollectedHeap::print_on(outputStream* st) const { - for (int i = 0; i < _n_gens; i++) { - _gens[i]->print_on(st); - } + _young_gen->print_on(st); + _old_gen->print_on(st); MetaspaceAux::print_on(st); } @@ -1150,10 +1089,10 @@ void GenCollectedHeap::print_tracing_info() const { if (TraceYoungGenTime) { - get_gen(0)->print_summary_info(); + _young_gen->print_summary_info(); } if (TraceOldGenTime) { - get_gen(1)->print_summary_info(); + _old_gen->print_summary_info(); } } @@ -1259,7 +1198,7 @@ oop GenCollectedHeap::handle_failed_promotion(Generation* old_gen, oop obj, size_t obj_size) { - guarantee(old_gen->level() == 1, "We only get here with an old generation"); + guarantee(old_gen == _old_gen, "We only get here with an old generation"); assert(obj_size == (size_t)obj->size(), "bad obj_size passed in"); HeapWord* result = NULL; --- old/src/share/vm/memory/genCollectedHeap.hpp 2014-10-17 16:28:43.000000000 +0200 +++ new/src/share/vm/memory/genCollectedHeap.hpp 2014-10-17 16:28:43.000000000 +0200 @@ -33,7 +33,7 @@ class SubTasksDone; // A "GenCollectedHeap" is a SharedHeap that uses generational -// collection. It is represented with a sequence of Generation's. +// collection. It has two generations, young and old. class GenCollectedHeap : public SharedHeap { friend class GenCollectorPolicy; friend class Generation; @@ -51,10 +51,6 @@ friend class GCCauseSetter; friend class VMStructs; public: - enum SomeConstants { - max_gens = 10 - }; - friend class VM_PopulateDumpSharedSpace; protected: @@ -62,9 +58,8 @@ static GenCollectedHeap* _gch; private: - int _n_gens; - Generation* _gens[max_gens]; - GenerationSpec** _gen_specs; + Generation* _young_gen; + Generation* _old_gen; // The generational collector policy. GenCollectorPolicy* _gen_policy; @@ -82,6 +77,9 @@ SubTasksDone* _gen_process_roots_tasks; SubTasksDone* gen_process_roots_tasks() { return _gen_process_roots_tasks; } + void collect_generation(Generation* gen, bool full, size_t size, bool is_tlab, + bool run_verification, bool clear_soft_refs); + // In block contents verification, the number of header words to skip NOT_PRODUCT(static size_t _skip_header_HeapWords;) @@ -93,11 +91,11 @@ // Helper function for two callbacks below. // Considers collection of the first max_level+1 generations. - void do_collection(bool full, - bool clear_all_soft_refs, - size_t size, - bool is_tlab, - int max_level); + void do_collection(bool full, + bool clear_all_soft_refs, + size_t size, + bool is_tlab, + Generation::Type max_generation); // Callback from VM_GenCollectForAllocation operation. // This function does everything necessary/possible to satisfy an @@ -108,7 +106,7 @@ // Callback from VM_GenCollectFull operation. // Perform a full collection of the first max_level+1 generations. virtual void do_full_collection(bool clear_all_soft_refs); - void do_full_collection(bool clear_all_soft_refs, int max_level); + void do_full_collection(bool clear_all_soft_refs, Generation::Type max_gen); // Does the "cause" of GC indicate that // we absolutely __must__ clear soft refs? @@ -117,10 +115,11 @@ public: GenCollectedHeap(GenCollectorPolicy *policy); - GCStats* gc_stats(int level) const; + GCStats* gc_stats(Generation* gen) const; // Returns JNI_OK on success virtual jint initialize(); + char* allocate(size_t alignment, size_t* _total_reserved, int* _n_covered_regions, ReservedSpace* heap_rs); @@ -135,8 +134,12 @@ return CollectedHeap::GenCollectedHeap; } + Generation* young_gen() const { return _young_gen; } + Generation* old_gen() const { return _old_gen; } + // The generational collector policy. GenCollectorPolicy* gen_policy() const { return _gen_policy; } + virtual CollectorPolicy* collector_policy() const { return (CollectorPolicy*) gen_policy(); } // Adaptive size policy @@ -152,13 +155,12 @@ size_t capacity() const; size_t used() const; - // Save the "used_region" for generations level and lower. - void save_used_regions(int level); + // Save the "used_region" for both generations. + void save_used_regions(); size_t max_capacity() const; - HeapWord* mem_allocate(size_t size, - bool* gc_overhead_limit_was_exceeded); + HeapWord* mem_allocate(size_t size, bool* gc_overhead_limit_was_exceeded); // We may support a shared contiguous allocation area, if the youngest // generation does. @@ -177,9 +179,9 @@ // The same as above but assume that the caller holds the Heap_lock. void collect_locked(GCCause::Cause cause); - // Perform a full collection of the first max_level+1 generations. + // Perform a full collection of generations up to and including max_gen. // Mostly used for testing purposes. Caller does not hold the Heap_lock on entry. - void collect(GCCause::Cause cause, int max_level); + void collect(GCCause::Cause cause, Generation::Type max_gen); // Returns "TRUE" iff "p" points into the committed areas of the heap. // The methods is_in(), is_in_closed_subset() and is_in_youngest() may @@ -306,20 +308,15 @@ // Update above counter, as appropriate, at the end of a concurrent GC cycle unsigned int update_full_collections_completed(unsigned int count); - // Update "time of last gc" for all constituent generations - // to "now". + // Update "time of last gc" for all generations to "now". void update_time_of_last_gc(jlong now) { - for (int i = 0; i < _n_gens; i++) { - _gens[i]->update_time_of_last_gc(now); - } + _young_gen->update_time_of_last_gc(now); + _old_gen->update_time_of_last_gc(now); } // Update the gc statistics for each generation. - // "level" is the level of the latest collection. - void update_gc_stats(int current_level, bool full) { - for (int i = 0; i < _n_gens; i++) { - _gens[i]->update_gc_stats(current_level, full); - } + void update_gc_stats(Generation* current_generation, bool full) { + _old_gen->update_gc_stats(current_generation, full); } // Override. @@ -361,30 +358,6 @@ // collection. virtual bool is_maximal_no_gc() const; - // Return the generation before "gen". - Generation* prev_gen(Generation* gen) const { - int l = gen->level(); - guarantee(l > 0, "Out of bounds"); - return _gens[l-1]; - } - - // Return the generation after "gen". - Generation* next_gen(Generation* gen) const { - int l = gen->level() + 1; - guarantee(l < _n_gens, "Out of bounds"); - return _gens[l]; - } - - Generation* get_gen(int i) const { - guarantee(i >= 0 && i < _n_gens, "Out of bounds"); - return _gens[i]; - } - - int n_gens() const { - assert(_n_gens == gen_policy()->number_of_generations(), "Sanity"); - return _n_gens; - } - // Convenience function to be used in situations where the heap type can be // asserted to be this type. static GenCollectedHeap* heap(); @@ -392,8 +365,8 @@ void set_par_threads(uint t); // Invoke the "do_oop" method of one of the closures "not_older_gens" - // or "older_gens" on root locations for the generation at - // "level". (The "older_gens" closure is used for scanning references + // or "older_gens" on root locations for the generations depending on + // the type. (The "older_gens" closure is used for scanning references // from older generations; "not_older_gens" is used everywhere else.) // If "younger_gens_as_roots" is false, younger generations are // not scanned as roots; in this case, the caller must be arranging to @@ -404,7 +377,7 @@ // the closure is applied to: // "SO_None" does none; private: - void gen_process_roots(int level, + void gen_process_roots(Generation::Type type, bool younger_gens_as_roots, bool activate_scope, SharedHeap::ScanningOption so, @@ -419,7 +392,7 @@ static const bool StrongAndWeakRoots = false; static const bool StrongRootsOnly = true; - void gen_process_roots(int level, + void gen_process_roots(Generation::Type type, bool younger_gens_as_roots, bool activate_scope, SharedHeap::ScanningOption so, @@ -444,7 +417,7 @@ // applied to references in the generation at "level", and the "older" // closure to older generations. #define GCH_SINCE_SAVE_MARKS_ITERATE_DECL(OopClosureType, nv_suffix) \ - void oop_since_save_marks_iterate(int level, \ + void oop_since_save_marks_iterate(Generation::Type start_gen, \ OopClosureType* cur, \ OopClosureType* older); @@ -452,23 +425,19 @@ #undef GCH_SINCE_SAVE_MARKS_ITERATE_DECL - // Returns "true" iff no allocations have occurred in any generation at - // "level" or above since the last + // Returns "true" iff no allocations have occurred since the last // call to "save_marks". - bool no_allocs_since_save_marks(int level); + bool no_allocs_since_save_marks(bool include_young); // Returns true if an incremental collection is likely to fail. // We optionally consult the young gen, if asked to do so; // otherwise we base our answer on whether the previous incremental // collection attempt failed with no corrective action as of yet. bool incremental_collection_will_fail(bool consult_young) { - // Assumes a 2-generation system; the first disjunct remembers if an - // incremental collection failed, even when we thought (second disjunct) - // that it would not. - assert(heap()->collector_policy()->is_generation_policy(), - "the following definition may not be suitable for an n(>2)-generation system"); + // The first disjunct remembers if an incremental collection failed, even + // when we thought (second disjunct) that it would not. return incremental_collection_failed() || - (consult_young && !get_gen(0)->collection_attempt_is_safe()); + (consult_young && !_young_gen->collection_attempt_is_safe()); } // If a generation bails out of an incremental collection, @@ -506,10 +475,10 @@ // iterating over spaces. void prepare_for_compaction(); - // Perform a full collection of the first max_level+1 generations. + // Perform a full collection of the generations up to and including max_gen. // This is the low level interface used by the public versions of // collect() and collect_locked(). Caller holds the Heap_lock on entry. - void collect_locked(GCCause::Cause cause, int max_level); + void collect_locked(GCCause::Cause cause, Generation::Type max_gen); // Returns success or failure. bool create_cms_collector(); --- old/src/share/vm/memory/genMarkSweep.cpp 2014-10-17 16:28:44.000000000 +0200 +++ new/src/share/vm/memory/genMarkSweep.cpp 2014-10-17 16:28:44.000000000 +0200 @@ -37,6 +37,7 @@ #include "memory/genCollectedHeap.hpp" #include "memory/genMarkSweep.hpp" #include "memory/genOopClosures.inline.hpp" +#include "memory/generation.hpp" #include "memory/generation.inline.hpp" #include "memory/modRefBarrierSet.hpp" #include "memory/referencePolicy.hpp" @@ -52,8 +53,7 @@ #include "utilities/copy.hpp" #include "utilities/events.hpp" -void GenMarkSweep::invoke_at_safepoint(int level, ReferenceProcessor* rp, bool clear_all_softrefs) { - guarantee(level == 1, "We always collect both old and young."); +void GenMarkSweep::invoke_at_safepoint(ReferenceProcessor* rp, bool clear_all_softrefs) { assert(SafepointSynchronize::is_at_safepoint(), "must be at a safepoint"); GenCollectedHeap* gch = GenCollectedHeap::heap(); @@ -86,11 +86,11 @@ // Capture used regions for each generation that will be // subject to collection, so that card table adjustments can // be made intelligently (see clear / invalidate further below). - gch->save_used_regions(level); + gch->save_used_regions(); allocate_stacks(); - mark_sweep_phase1(level, clear_all_softrefs); + mark_sweep_phase1(clear_all_softrefs); mark_sweep_phase2(); @@ -98,7 +98,7 @@ COMPILER2_PRESENT(assert(DerivedPointerTable::is_active(), "Sanity")); COMPILER2_PRESENT(DerivedPointerTable::set_active(false)); - mark_sweep_phase3(level); + mark_sweep_phase3(); mark_sweep_phase4(); @@ -110,19 +110,14 @@ deallocate_stacks(); - // If compaction completely evacuated all generations younger than this - // one, then we can clear the card table. Otherwise, we must invalidate - // it (consider all cards dirty). In the future, we might consider doing - // compaction within generations only, and doing card-table sliding. - bool all_empty = true; - for (int i = 0; all_empty && i < level; i++) { - Generation* g = gch->get_gen(i); - all_empty = all_empty && gch->get_gen(i)->used() == 0; - } + // If compaction completely evacuated the young generation we can clear + // the card table. Otherwise, we must invalidate it (consider all cards dirty). + // In the future, we might consider doing compaction within generations only, + // and doing card-table sliding. GenRemSet* rs = gch->rem_set(); - Generation* old_gen = gch->get_gen(level); + Generation* old_gen = gch->old_gen(); // Clear/invalidate below make use of the "prev_used_regions" saved earlier. - if (all_empty) { + if (gch->young_gen()->used() == 0) { // We've evacuated all generations below us. rs->clear_into_younger(old_gen); } else { @@ -160,7 +155,7 @@ GenCollectedHeap* gch = GenCollectedHeap::heap(); // Scratch request on behalf of oldest generation; will do no // allocation. - ScratchBlock* scratch = gch->gather_scratch(gch->_gens[gch->_n_gens-1], 0); + ScratchBlock* scratch = gch->gather_scratch(gch->old_gen(), 0); // $$$ To cut a corner, we'll only use the first scratch block, and then // revert to malloc. @@ -188,8 +183,7 @@ _objarray_stack.clear(true); } -void GenMarkSweep::mark_sweep_phase1(int level, - bool clear_all_softrefs) { +void GenMarkSweep::mark_sweep_phase1(bool clear_all_softrefs) { // Recursively traverse all live objects and mark them GCTraceTime tm("phase 1", PrintGC && Verbose, true, _gc_timer, _gc_tracer->gc_id()); trace(" 1"); @@ -200,12 +194,12 @@ // use OopsInGenClosure constructor which takes a generation, // as the Universe has not been created when the static constructors // are run. - follow_root_closure.set_orig_generation(gch->get_gen(level)); + follow_root_closure.set_orig_generation(gch->old_gen()); // Need new claim bits before marking starts. ClassLoaderDataGraph::clear_claimed_marks(); - gch->gen_process_roots(level, + gch->gen_process_roots(Generation::Old, false, // Younger gens are not roots. true, // activate StrongRootsScope SharedHeap::SO_None, @@ -274,7 +268,7 @@ } }; -void GenMarkSweep::mark_sweep_phase3(int level) { +void GenMarkSweep::mark_sweep_phase3() { GenCollectedHeap* gch = GenCollectedHeap::heap(); // Adjust the pointers to reflect the new locations @@ -288,9 +282,9 @@ // use OopsInGenClosure constructor which takes a generation, // as the Universe has not been created when the static constructors // are run. - adjust_pointer_closure.set_orig_generation(gch->get_gen(level)); + adjust_pointer_closure.set_orig_generation(gch->old_gen()); - gch->gen_process_roots(level, + gch->gen_process_roots(Generation::Old, false, // Younger gens are not roots. true, // activate StrongRootsScope SharedHeap::SO_AllCodeCache, --- old/src/share/vm/memory/genMarkSweep.hpp 2014-10-17 16:28:45.000000000 +0200 +++ new/src/share/vm/memory/genMarkSweep.hpp 2014-10-17 16:28:45.000000000 +0200 @@ -31,17 +31,16 @@ friend class VM_MarkSweep; friend class G1MarkSweep; public: - static void invoke_at_safepoint(int level, ReferenceProcessor* rp, - bool clear_all_softrefs); + static void invoke_at_safepoint(ReferenceProcessor* rp, bool clear_all_softrefs); private: // Mark live objects - static void mark_sweep_phase1(int level, bool clear_all_softrefs); + static void mark_sweep_phase1(bool clear_all_softrefs); // Calculate new addresses static void mark_sweep_phase2(); // Update pointers - static void mark_sweep_phase3(int level); + static void mark_sweep_phase3(); // Move objects to new positions static void mark_sweep_phase4(); --- old/src/share/vm/memory/generation.cpp 2014-10-17 16:28:45.000000000 +0200 +++ new/src/share/vm/memory/generation.cpp 2014-10-17 16:28:45.000000000 +0200 @@ -45,8 +45,7 @@ PRAGMA_FORMAT_MUTE_WARNINGS_FOR_GCC -Generation::Generation(ReservedSpace rs, size_t initial_size, int level) : - _level(level), +Generation::Generation(ReservedSpace rs, size_t initial_size) : _ref_processor(NULL) { if (!_virtual_space.initialize(rs, initial_size)) { vm_exit_during_initialization("Could not reserve enough space for " @@ -64,8 +63,10 @@ GenerationSpec* Generation::spec() { GenCollectedHeap* gch = GenCollectedHeap::heap(); - assert(0 <= level() && level() < gch->_n_gens, "Bad gen level"); - return gch->_gen_specs[level()]; + if (this == gch->young_gen()) { + return gch->gen_policy()->young_gen_spec(); + } + return gch->gen_policy()->old_gen_spec(); } size_t Generation::max_capacity() const { @@ -114,9 +115,17 @@ void Generation::print_summary_info_on(outputStream* st) { StatRecord* sr = stat_record(); double time = sr->accumulated_time.seconds(); + // I didn't want to change the logging when removing the level concept, + // but I guess this logging could say young/old or something instead of 0/1. + int level; + if (this == GenCollectedHeap::heap()->young_gen()) { + level = 0; + } else { + level = 1; + } st->print_cr("[Accumulated GC generation %d time %3.7f secs, " "%d GC's, avg GC time %3.7f]", - level(), time, sr->invocations, + level, time, sr->invocations, sr->invocations > 0 ? time / sr->invocations : 0.0); } @@ -159,26 +168,14 @@ return (DefNewGeneration*) this; } -Generation* Generation::next_gen() const { - GenCollectedHeap* gch = GenCollectedHeap::heap(); - int next = level() + 1; - if (next < gch->_n_gens) { - return gch->_gens[next]; - } else { - return NULL; - } -} - size_t Generation::max_contiguous_available() const { // The largest number of contiguous free words in this or any higher generation. - size_t max = 0; - for (const Generation* gen = this; gen != NULL; gen = gen->next_gen()) { - size_t avail = gen->contiguous_available(); - if (avail > max) { - max = avail; - } + size_t avail = contiguous_available(); + size_t old_avail = 0; + if (this == GenCollectedHeap::heap()->young_gen()) { + old_avail = GenCollectedHeap::heap()->old_gen()->contiguous_available(); } - return max; + return MAX2(avail, old_avail); } bool Generation::promotion_attempt_is_safe(size_t max_promotion_in_bytes) const { @@ -377,9 +374,8 @@ } CardGeneration::CardGeneration(ReservedSpace rs, size_t initial_byte_size, - int level, GenRemSet* remset) : - Generation(rs, initial_byte_size, level), _rs(remset), + Generation(rs, initial_byte_size), _rs(remset), _shrink_factor(0), _min_heap_delta_bytes(), _capacity_at_prologue(), _used_at_prologue() { @@ -635,7 +631,7 @@ SerialOldTracer* gc_tracer = GenMarkSweep::gc_tracer(); gc_tracer->report_gc_start(gch->gc_cause(), gc_timer->gc_start()); - GenMarkSweep::invoke_at_safepoint(_level, ref_processor(), clear_all_soft_refs); + GenMarkSweep::invoke_at_safepoint(ref_processor(), clear_all_soft_refs); gc_timer->register_gc_end(); --- old/src/share/vm/memory/generation.hpp 2014-10-17 16:28:46.000000000 +0200 +++ new/src/share/vm/memory/generation.hpp 2014-10-17 16:28:46.000000000 +0200 @@ -84,7 +84,6 @@ // first two fields are word-sized.) }; - class Generation: public CHeapObj { friend class VMStructs; private: @@ -102,9 +101,6 @@ // Memory area reserved for generation VirtualSpace _virtual_space; - // Level in the generation hierarchy. - int _level; - // ("Weak") Reference processing support ReferenceProcessor* _ref_processor; @@ -114,12 +110,8 @@ // Statistics for garbage collection GCStats* _gc_stats; - // Returns the next generation in the configuration, or else NULL if this - // is the highest generation. - Generation* next_gen() const; - // Initialize the generation. - Generation(ReservedSpace rs, size_t initial_byte_size, int level); + Generation(ReservedSpace rs, size_t initial_byte_size); // Apply "cl->do_oop" to (the address of) (exactly) all the ref fields in // "sp" that point into younger generations. @@ -138,6 +130,11 @@ Other }; + enum Type { + Young, + Old + }; + enum SomePublicConstants { // Generations are GenGrain-aligned and have size that are multiples of // GenGrain. @@ -438,7 +435,7 @@ // generation can decide to gather the amount of promoted data // if the collection of the younger generations has completed. GCStats* gc_stats() const { return _gc_stats; } - virtual void update_gc_stats(int current_level, bool full) {} + virtual void update_gc_stats(Generation* current_generation, bool full) {} // Mark sweep support phase2 virtual void prepare_for_compaction(CompactPoint* cp); @@ -523,8 +520,6 @@ virtual const char* name() const = 0; virtual const char* short_name() const = 0; - int level() const { return _level; } - // Attributes // True iff the given generation may only be the youngest generation. @@ -638,8 +633,7 @@ size_t _capacity_at_prologue; size_t _used_at_prologue; - CardGeneration(ReservedSpace rs, size_t initial_byte_size, int level, - GenRemSet* remset); + CardGeneration(ReservedSpace rs, size_t initial_byte_size, GenRemSet* remset); public: @@ -696,9 +690,8 @@ public: OneContigSpaceCardGeneration(ReservedSpace rs, size_t initial_byte_size, - int level, GenRemSet* remset, - ContiguousSpace* space) : - CardGeneration(rs, initial_byte_size, level, remset), + GenRemSet* remset, ContiguousSpace* space) : + CardGeneration(rs, initial_byte_size, remset), _the_space(space) {} --- old/src/share/vm/memory/generationSpec.cpp 2014-10-17 16:28:47.000000000 +0200 +++ new/src/share/vm/memory/generationSpec.cpp 2014-10-17 16:28:47.000000000 +0200 @@ -36,18 +36,17 @@ #include "gc_implementation/parNew/parNewGeneration.hpp" #endif // INCLUDE_ALL_GCS -Generation* GenerationSpec::init(ReservedSpace rs, int level, - GenRemSet* remset) { +Generation* GenerationSpec::init(ReservedSpace rs, GenRemSet* remset) { switch (name()) { case Generation::DefNew: - return new DefNewGeneration(rs, init_size(), level); + return new DefNewGeneration(rs, init_size()); case Generation::MarkSweepCompact: - return new TenuredGeneration(rs, init_size(), level, remset); + return new TenuredGeneration(rs, init_size(), remset); #if INCLUDE_ALL_GCS case Generation::ParNew: - return new ParNewGeneration(rs, init_size(), level); + return new ParNewGeneration(rs, init_size()); case Generation::ConcurrentMarkSweep: { assert(UseConcMarkSweepGC, "UseConcMarkSweepGC should be set"); @@ -61,7 +60,7 @@ ConcurrentMarkSweepGeneration* g = NULL; g = new ConcurrentMarkSweepGeneration(rs, - init_size(), level, ctrs, UseCMSAdaptiveFreeLists, + init_size(), ctrs, UseCMSAdaptiveFreeLists, (FreeBlockDictionary::DictionaryChoice)CMSDictionaryChoice); g->initialize_performance_counters(); --- old/src/share/vm/memory/generationSpec.hpp 2014-10-17 16:28:47.000000000 +0200 +++ new/src/share/vm/memory/generationSpec.hpp 2014-10-17 16:28:47.000000000 +0200 @@ -39,13 +39,13 @@ size_t _max_size; public: - GenerationSpec(Generation::Name name, size_t init_size, size_t max_size) { + GenerationSpec(Generation::Name name, size_t init_size, size_t max_size, size_t alignment) { _name = name; - _init_size = init_size; - _max_size = max_size; + _init_size = align_size_up(init_size, alignment); + _max_size = align_size_up(max_size, alignment); } - Generation* init(ReservedSpace rs, int level, GenRemSet* remset); + Generation* init(ReservedSpace rs, GenRemSet* remset); // Accessors Generation::Name name() const { return _name; } @@ -53,16 +53,6 @@ void set_init_size(size_t size) { _init_size = size; } size_t max_size() const { return _max_size; } void set_max_size(size_t size) { _max_size = size; } - - // Alignment - void align(size_t alignment) { - set_init_size(align_size_up(init_size(), alignment)); - set_max_size(align_size_up(max_size(), alignment)); - } - - // Return the number of regions contained in the generation which - // might need to be independently covered by a remembered set. - virtual int n_covered_regions() const { return 1; } }; typedef GenerationSpec* GenerationSpecPtr; --- old/src/share/vm/memory/space.cpp 2014-10-17 16:28:48.000000000 +0200 +++ new/src/share/vm/memory/space.cpp 2014-10-17 16:28:48.000000000 +0200 @@ -389,7 +389,7 @@ cp->space->set_compaction_top(compact_top); cp->space = cp->space->next_compaction_space(); if (cp->space == NULL) { - cp->gen = GenCollectedHeap::heap()->prev_gen(cp->gen); + cp->gen = GenCollectedHeap::heap()->young_gen(); assert(cp->gen != NULL, "compaction must succeed"); cp->space = cp->gen->first_compaction_space(); assert(cp->space != NULL, "generation must have a first compaction space"); --- old/src/share/vm/memory/tenuredGeneration.cpp 2014-10-17 16:28:49.000000000 +0200 +++ new/src/share/vm/memory/tenuredGeneration.cpp 2014-10-17 16:28:49.000000000 +0200 @@ -36,10 +36,9 @@ #include "utilities/macros.hpp" TenuredGeneration::TenuredGeneration(ReservedSpace rs, - size_t initial_byte_size, int level, + size_t initial_byte_size, GenRemSet* remset) : - OneContigSpaceCardGeneration(rs, initial_byte_size, - level, remset, NULL) + OneContigSpaceCardGeneration(rs, initial_byte_size, remset, NULL) { HeapWord* bottom = (HeapWord*) _virtual_space.low(); HeapWord* end = (HeapWord*) _virtual_space.high(); @@ -171,11 +170,13 @@ err_msg("used: " SIZE_FORMAT " used_after_gc: " SIZE_FORMAT " capacity: " SIZE_FORMAT, used(), used_after_gc, capacity())); } -void TenuredGeneration::update_gc_stats(int current_level, + +void TenuredGeneration::update_gc_stats(Generation* current_generation, bool full) { - // If the next lower level(s) has been collected, gather any statistics + // If the young generation has been collected, gather any statistics // that are of interest at this point. - if (!full && (current_level + 1) == level()) { + bool current_is_young = (current_generation == GenCollectedHeap::heap()->young_gen()); + if (!full && current_is_young) { // Calculate size of data promoted from the younger generations // before doing the collection. size_t used_before_gc = used(); --- old/src/share/vm/memory/tenuredGeneration.hpp 2014-10-17 16:28:49.000000000 +0200 +++ new/src/share/vm/memory/tenuredGeneration.hpp 2014-10-17 16:28:49.000000000 +0200 @@ -53,7 +53,7 @@ CSpaceCounters* _space_counters; public: - TenuredGeneration(ReservedSpace rs, size_t initial_byte_size, int level, + TenuredGeneration(ReservedSpace rs, size_t initial_byte_size, GenRemSet* remset); Generation::Name kind() { return Generation::MarkSweepCompact; } @@ -98,7 +98,7 @@ // Statistics - virtual void update_gc_stats(int level, bool full); + virtual void update_gc_stats(Generation* current_generation, bool full); virtual bool promotion_attempt_is_safe(size_t max_promoted_in_bytes) const; --- old/src/share/vm/runtime/vmStructs.cpp 2014-10-17 16:28:50.000000000 +0200 +++ new/src/share/vm/runtime/vmStructs.cpp 2014-10-17 16:28:50.000000000 +0200 @@ -515,16 +515,15 @@ nonstatic_field(CollectedHeap, _defer_initial_card_mark, bool) \ nonstatic_field(CollectedHeap, _is_gc_active, bool) \ nonstatic_field(CollectedHeap, _total_collections, unsigned int) \ + \ nonstatic_field(CompactibleSpace, _compaction_top, HeapWord*) \ nonstatic_field(CompactibleSpace, _first_dead, HeapWord*) \ nonstatic_field(CompactibleSpace, _end_of_live, HeapWord*) \ \ - \ nonstatic_field(ContiguousSpace, _top, HeapWord*) \ nonstatic_field(ContiguousSpace, _concurrent_iteration_safe_limit, HeapWord*) \ nonstatic_field(ContiguousSpace, _saved_mark_word, HeapWord*) \ \ - nonstatic_field(DefNewGeneration, _next_gen, Generation*) \ nonstatic_field(DefNewGeneration, _tenuring_threshold, uint) \ nonstatic_field(DefNewGeneration, _age_table, ageTable) \ nonstatic_field(DefNewGeneration, _eden_space, EdenSpace*) \ @@ -535,7 +534,6 @@ \ nonstatic_field(Generation, _reserved, MemRegion) \ nonstatic_field(Generation, _virtual_space, VirtualSpace) \ - nonstatic_field(Generation, _level, int) \ nonstatic_field(Generation, _stat_record, Generation::StatRecord) \ \ nonstatic_field(Generation::StatRecord, invocations, int) \ @@ -545,10 +543,10 @@ nonstatic_field(GenerationSpec, _init_size, size_t) \ nonstatic_field(GenerationSpec, _max_size, size_t) \ \ - static_field(GenCollectedHeap, _gch, GenCollectedHeap*) \ - nonstatic_field(GenCollectedHeap, _n_gens, int) \ - unchecked_nonstatic_field(GenCollectedHeap, _gens, sizeof(GenCollectedHeap::_gens)) /* NOTE: no type */ \ - nonstatic_field(GenCollectedHeap, _gen_specs, GenerationSpec**) \ + static_field(GenCollectedHeap, _gch, GenCollectedHeap*) \ + \ + nonstatic_field(GenCollectorPolicy, _young_gen_spec, GenerationSpec*) \ + nonstatic_field(GenCollectorPolicy, _old_gen_spec, GenerationSpec*) \ \ nonstatic_field(HeapWord, i, char*) \ \ @@ -1486,6 +1484,7 @@ declare_type(CardGeneration, Generation) \ declare_type(OneContigSpaceCardGeneration, CardGeneration) \ declare_type(TenuredGeneration, OneContigSpaceCardGeneration) \ + declare_toplevel_type(GenCollectorPolicy) \ declare_toplevel_type(Space) \ declare_toplevel_type(BitMap) \ declare_type(CompactibleSpace, Space) \ @@ -2237,8 +2236,6 @@ declare_constant(CollectedHeap::SharedHeap) \ declare_constant(CollectedHeap::GenCollectedHeap) \ \ - declare_constant(GenCollectedHeap::max_gens) \ - \ /* constants from Generation::Name enum */ \ \ declare_constant(Generation::DefNew) \ --- old/src/share/vm/services/memoryService.cpp 2014-10-17 16:28:51.000000000 +0200 +++ new/src/share/vm/services/memoryService.cpp 2014-10-17 16:28:51.000000000 +0200 @@ -126,12 +126,10 @@ CollectorPolicy* policy = heap->collector_policy(); assert(policy->is_generation_policy(), "Only support two generations"); - guarantee(heap->n_gens() == 2, "Only support two-generation heap"); - GenCollectorPolicy* gen_policy = policy->as_generation_policy(); + guarantee(gen_policy->number_of_generations() == 2, "Only support two-generation heap"); if (gen_policy != NULL) { - GenerationSpec** specs = gen_policy->generations(); - Generation::Name kind = specs[0]->name(); + Generation::Name kind = gen_policy->young_gen_spec()->name(); switch (kind) { case Generation::DefNew: _minor_gc_manager = MemoryManager::get_copy_memory_manager(); @@ -160,8 +158,8 @@ _managers_list->append(_minor_gc_manager); _managers_list->append(_major_gc_manager); - add_generation_memory_pool(heap->get_gen(minor), _major_gc_manager, _minor_gc_manager); - add_generation_memory_pool(heap->get_gen(major), _major_gc_manager); + add_generation_memory_pool(heap->young_gen(), _major_gc_manager, _minor_gc_manager); + add_generation_memory_pool(heap->old_gen(), _major_gc_manager); } #if INCLUDE_ALL_GCS --- old/src/share/vm/services/memoryService.hpp 2014-10-17 16:28:52.000000000 +0200 +++ new/src/share/vm/services/memoryService.hpp 2014-10-17 16:28:52.000000000 +0200 @@ -57,13 +57,6 @@ init_code_heap_pools_size = 9 }; - // index for minor and major generations - enum { - minor = 0, - major = 1, - n_gens = 2 - }; - static GrowableArray* _pools_list; static GrowableArray* _managers_list;