src/share/vm/gc_implementation/parNew/parNewGeneration.cpp
Index Unified diffs Context diffs Sdiffs Patch New Old Previous File Next File
*** old/src/share/vm/gc_implementation/parNew/parNewGeneration.cpp	Fri Oct 17 16:09:56 2014
--- new/src/share/vm/gc_implementation/parNew/parNewGeneration.cpp	Fri Oct 17 16:09:56 2014

*** 61,89 **** --- 61,89 ---- #ifdef _MSC_VER #pragma warning( push ) #pragma warning( disable:4355 ) // 'this' : used in base member initializer list #endif ParScanThreadState::ParScanThreadState(Space* to_space_, ! ParNewGeneration* young_gen_, Generation* old_gen_, int thread_num_, ObjToScanQueueSet* work_queue_set_, Stack<oop, mtGC>* overflow_stacks_, size_t desired_plab_sz_, ParallelTaskTerminator& term_) : ! _to_space(to_space_), _old_gen(old_gen_), _young_gen(young_gen_), _thread_num(thread_num_), _work_queue(work_queue_set_->queue(thread_num_)), _to_space_full(false), _overflow_stack(overflow_stacks_ ? overflow_stacks_ + thread_num_ : NULL), _ageTable(false), // false ==> not the global age table, no perf data. _to_space_alloc_buffer(desired_plab_sz_), ! _to_space_closure(young_gen_, this), _old_gen_closure(young_gen_, this), ! _to_space_root_closure(young_gen_, this), _old_gen_root_closure(young_gen_, this), ! _older_gen_closure(young_gen_, this), _evacuate_followers(this, &_to_space_closure, &_old_gen_closure, ! &_to_space_root_closure, young_gen_, &_old_gen_root_closure, work_queue_set_, &term_), ! _is_alive_closure(young_gen_), _scan_weak_ref_closure(young_gen_, this), _keep_alive_closure(&_scan_weak_ref_closure), _strong_roots_time(0.0), _term_time(0.0) { #if TASKQUEUE_STATS _term_attempts = 0;
*** 491,501 **** --- 491,500 ---- ParScanClosure::ParScanClosure(ParNewGeneration* g, ParScanThreadState* par_scan_state) : OopsInKlassOrGenClosure(g), _par_scan_state(par_scan_state), _g(g) { assert(_g->level() == 0, "Optimized for youngest generation"); _boundary = _g->reserved().end(); } void ParScanWithBarrierClosure::do_oop(oop* p) { ParScanClosure::do_oop_work(p, true, false); } void ParScanWithBarrierClosure::do_oop(narrowOop* p) { ParScanClosure::do_oop_work(p, true, false); }
*** 576,597 **** --- 575,596 ---- "Broken overflow list?"); // Finish the last termination pause. par_scan_state()->end_term_time(); } ! ParNewGenTask::ParNewGenTask(ParNewGeneration* young_gen, Generation* old_gen, HeapWord* young_old_boundary, ParScanThreadStateSet* state_set) : AbstractGangTask("ParNewGeneration collection"), ! _young_gen(young_gen), _old_gen(old_gen), _young_old_boundary(young_old_boundary), _state_set(state_set) {} // Reset the terminator for the given number of // active threads. void ParNewGenTask::set_for_termination(int active_workers) { ! _state_set->reset(active_workers, _young_gen->promotion_failed()); // Should the heap be passed in? There's only 1 for now so // grab it instead. GenCollectedHeap* gch = GenCollectedHeap::heap(); gch->set_n_termination(active_workers); }
*** 600,611 **** --- 599,608 ---- GenCollectedHeap* gch = GenCollectedHeap::heap(); // Since this is being done in a separate thread, need new resource // and handle marks. ResourceMark rm; HandleMark hm; // We would need multiple old-gen queues otherwise. assert(gch->n_gens() == 2, "Par young collection currently only works with one older gen."); Generation* old_gen = gch->old_gen(); ParScanThreadState& par_scan_state = _state_set->thread_state(worker_id); assert(_state_set->is_valid(worker_id), "Should not have been called");
*** 617,627 **** --- 614,624 ---- CLDToKlassAndOopClosure cld_scan_closure(&klass_scan_closure, &par_scan_state.to_space_root_closure(), false); par_scan_state.start_strong_roots(); ! gch->gen_process_roots(_gen->level(), ! gch->gen_process_roots(Generation::Young, true, // Process younger gens, if any, // as strong roots. false, // no scope; this is parallel code SharedHeap::SO_ScavengeCodeCache, GenCollectedHeap::StrongAndWeakRoots,
*** 638,649 **** --- 635,646 ---- #ifdef _MSC_VER #pragma warning( push ) #pragma warning( disable:4355 ) // 'this' : used in base member initializer list #endif ParNewGeneration:: - ParNewGeneration(ReservedSpace rs, size_t initial_byte_size, int level) - : DefNewGeneration(rs, initial_byte_size, level, "PCopy"), _overflow_list(NULL), _is_alive_closure(this), _plab_stats(YoungPLABSize, PLABWeight) { NOT_PRODUCT(_overflow_counter = ParGCWorkQueueOverflowInterval;)
*** 776,799 **** --- 773,796 ---- virtual void work(uint worker_id); virtual void set_for_termination(int active_workers) { _state_set.terminator()->reset_for_reuse(active_workers); } private: ! ParNewGeneration& _young_gen; ProcessTask& _task; Generation& _old_gen; HeapWord* _young_old_boundary; ParScanThreadStateSet& _state_set; }; ParNewRefProcTaskProxy::ParNewRefProcTaskProxy( ! ProcessTask& task, ParNewGeneration& young_gen, Generation& old_gen, HeapWord* young_old_boundary, ParScanThreadStateSet& state_set) : AbstractGangTask("ParNewGeneration parallel reference processing"), ! _young_gen(young_gen), _task(task), _old_gen(old_gen), _young_old_boundary(young_old_boundary), _state_set(state_set) {
*** 833,843 **** --- 830,840 ---- assert(gch->kind() == CollectedHeap::GenCollectedHeap, "not a generational heap"); FlexibleWorkGang* workers = gch->workers(); assert(workers != NULL, "Need parallel worker threads."); _state_set.reset(workers->active_workers(), _generation.promotion_failed()); ! ParNewRefProcTaskProxy rp_task(task, _generation, *_generation.next_gen(), ! ParNewRefProcTaskProxy rp_task(task, _generation, *(gch->old_gen()), _generation.reserved().end(), _state_set); workers->run_task(&rp_task); _state_set.reset(0 /* bad value in debug if not reset */, _generation.promotion_failed()); }
*** 862,886 **** --- 859,883 ---- ScanClosureWithParBarrier:: ScanClosureWithParBarrier(ParNewGeneration* g, bool gc_barrier) : ScanClosure(g, gc_barrier) {} EvacuateFollowersClosureGeneral:: - EvacuateFollowersClosureGeneral(GenCollectedHeap* gch, int level, OopsInGenClosure* cur, OopsInGenClosure* older) : - _gch(gch), _level(level), _scan_cur_or_nonheap(cur), _scan_older(older) {} void EvacuateFollowersClosureGeneral::do_void() { do { // Beware: this call will lead to closure applications via virtual // calls. ! _gch->oop_since_save_marks_iterate(_level, ! _gch->oop_since_save_marks_iterate(Generation::Young, _scan_cur_or_nonheap, _scan_older); ! } while (!_gch->no_allocs_since_save_marks(_level)); ! } while (!_gch->no_allocs_since_save_marks(true /* include_young */)); } // A Generation that does parallel young-gen collection.
*** 929,940 **** --- 926,935 ---- int active_workers = AdaptiveSizePolicy::calc_active_workers(workers->total_workers(), workers->active_workers(), Threads::number_of_non_daemon_threads()); workers->set_active_workers(active_workers); assert(gch->n_gens() == 2, "Par collection currently only works with single older gen."); _old_gen = gch->old_gen(); // Do we have to avoid promotion_undo? if (gch->collector_policy()->is_concurrent_mark_sweep_policy()) { set_avoid_promotion_undo(true); }
*** 1005,1015 **** --- 1000,1010 ---- ScanWeakRefClosure scan_weak_ref(this); KeepAliveClosure keep_alive(&scan_weak_ref); ScanClosure scan_without_gc_barrier(this, false); ScanClosureWithParBarrier scan_with_gc_barrier(this, true); set_promo_failure_scan_stack_closure(&scan_without_gc_barrier); - EvacuateFollowersClosureGeneral evacuate_followers(gch, _level, &scan_without_gc_barrier, &scan_with_gc_barrier); rp->setup_policy(clear_all_soft_refs); // Can the mt_degree be set later (at run_task() time would be best)? rp->set_active_mt_degree(active_workers); ReferenceProcessorStats stats;

src/share/vm/gc_implementation/parNew/parNewGeneration.cpp
Index Unified diffs Context diffs Sdiffs Patch New Old Previous File Next File