src/share/vm/gc_implementation/parNew/parNewGeneration.cpp
Index Unified diffs Context diffs Sdiffs Patch New Old Previous File Next File hotspot Cdiff src/share/vm/gc_implementation/parNew/parNewGeneration.cpp

src/share/vm/gc_implementation/parNew/parNewGeneration.cpp

Print this page
rev 7212 : [mq]: remove_get_gen

*** 324,334 **** #endif // TASKQUEUE_STATS private: ParallelTaskTerminator& _term; ParNewGeneration& _gen; ! Generation& _next_gen; public: bool is_valid(int id) const { return id < length(); } ParallelTaskTerminator* terminator() { return &_term; } }; --- 324,334 ---- #endif // TASKQUEUE_STATS private: ParallelTaskTerminator& _term; ParNewGeneration& _gen; ! Generation& _old_gen; public: bool is_valid(int id) const { return id < length(); } ParallelTaskTerminator* terminator() { return &_term; } };
*** 337,347 **** int num_threads, Space& to_space, ParNewGeneration& gen, Generation& old_gen, ObjToScanQueueSet& queue_set, Stack<oop, mtGC>* overflow_stacks, size_t desired_plab_sz, ParallelTaskTerminator& term) : ResourceArray(sizeof(ParScanThreadState), num_threads), ! _gen(gen), _next_gen(old_gen), _term(term) { assert(num_threads > 0, "sanity check!"); assert(ParGCUseLocalOverflow == (overflow_stacks != NULL), "overflow_stack allocation mismatch"); // Initialize states. --- 337,347 ---- int num_threads, Space& to_space, ParNewGeneration& gen, Generation& old_gen, ObjToScanQueueSet& queue_set, Stack<oop, mtGC>* overflow_stacks, size_t desired_plab_sz, ParallelTaskTerminator& term) : ResourceArray(sizeof(ParScanThreadState), num_threads), ! _gen(gen), _old_gen(old_gen), _term(term) { assert(num_threads > 0, "sanity check!"); assert(ParGCUseLocalOverflow == (overflow_stacks != NULL), "overflow_stack allocation mismatch"); // Initialize states.
*** 473,484 **** // them all into one. ageTable *local_table = par_scan_state.age_table(); _gen.age_table()->merge(local_table); // Inform old gen that we're done. ! _next_gen.par_promote_alloc_done(i); ! _next_gen.par_oop_since_save_marks_iterate_done(i); } if (UseConcMarkSweepGC && ParallelGCThreads > 0) { // We need to call this even when ResizeOldPLAB is disabled // so as to avoid breaking some asserts. While we may be able --- 473,484 ---- // them all into one. ageTable *local_table = par_scan_state.age_table(); _gen.age_table()->merge(local_table); // Inform old gen that we're done. ! _old_gen.par_promote_alloc_done(i); ! _old_gen.par_oop_since_save_marks_iterate_done(i); } if (UseConcMarkSweepGC && ParallelGCThreads > 0) { // We need to call this even when ResizeOldPLAB is disabled // so as to avoid breaking some asserts. While we may be able
*** 576,589 **** "Broken overflow list?"); // Finish the last termination pause. par_scan_state()->end_term_time(); } ! ParNewGenTask::ParNewGenTask(ParNewGeneration* gen, Generation* next_gen, HeapWord* young_old_boundary, ParScanThreadStateSet* state_set) : AbstractGangTask("ParNewGeneration collection"), ! _gen(gen), _next_gen(next_gen), _young_old_boundary(young_old_boundary), _state_set(state_set) {} // Reset the terminator for the given number of --- 576,589 ---- "Broken overflow list?"); // Finish the last termination pause. par_scan_state()->end_term_time(); } ! ParNewGenTask::ParNewGenTask(ParNewGeneration* gen, Generation* old_gen, HeapWord* young_old_boundary, ParScanThreadStateSet* state_set) : AbstractGangTask("ParNewGeneration collection"), ! _gen(gen), _old_gen(old_gen), _young_old_boundary(young_old_boundary), _state_set(state_set) {} // Reset the terminator for the given number of
*** 603,613 **** ResourceMark rm; HandleMark hm; // We would need multiple old-gen queues otherwise. assert(gch->n_gens() == 2, "Par young collection currently only works with one older gen."); ! Generation* old_gen = gch->next_gen(_gen); ParScanThreadState& par_scan_state = _state_set->thread_state(worker_id); assert(_state_set->is_valid(worker_id), "Should not have been called"); par_scan_state.set_young_old_boundary(_young_old_boundary); --- 603,613 ---- ResourceMark rm; HandleMark hm; // We would need multiple old-gen queues otherwise. assert(gch->n_gens() == 2, "Par young collection currently only works with one older gen."); ! Generation* old_gen = gch->old_gen(); ParScanThreadState& par_scan_state = _state_set->thread_state(worker_id); assert(_state_set->is_valid(worker_id), "Should not have been called"); par_scan_state.set_young_old_boundary(_young_old_boundary);
*** 766,776 **** class ParNewRefProcTaskProxy: public AbstractGangTask { typedef AbstractRefProcTaskExecutor::ProcessTask ProcessTask; public: ParNewRefProcTaskProxy(ProcessTask& task, ParNewGeneration& gen, ! Generation& next_gen, HeapWord* young_old_boundary, ParScanThreadStateSet& state_set); private: virtual void work(uint worker_id); --- 766,776 ---- class ParNewRefProcTaskProxy: public AbstractGangTask { typedef AbstractRefProcTaskExecutor::ProcessTask ProcessTask; public: ParNewRefProcTaskProxy(ProcessTask& task, ParNewGeneration& gen, ! Generation& old_gen, HeapWord* young_old_boundary, ParScanThreadStateSet& state_set); private: virtual void work(uint worker_id);
*** 778,801 **** _state_set.terminator()->reset_for_reuse(active_workers); } private: ParNewGeneration& _gen; ProcessTask& _task; ! Generation& _next_gen; HeapWord* _young_old_boundary; ParScanThreadStateSet& _state_set; }; ParNewRefProcTaskProxy::ParNewRefProcTaskProxy( ProcessTask& task, ParNewGeneration& gen, ! Generation& next_gen, HeapWord* young_old_boundary, ParScanThreadStateSet& state_set) : AbstractGangTask("ParNewGeneration parallel reference processing"), _gen(gen), _task(task), ! _next_gen(next_gen), _young_old_boundary(young_old_boundary), _state_set(state_set) { } --- 778,801 ---- _state_set.terminator()->reset_for_reuse(active_workers); } private: ParNewGeneration& _gen; ProcessTask& _task; ! Generation& _old_gen; HeapWord* _young_old_boundary; ParScanThreadStateSet& _state_set; }; ParNewRefProcTaskProxy::ParNewRefProcTaskProxy( ProcessTask& task, ParNewGeneration& gen, ! Generation& old_gen, HeapWord* young_old_boundary, ParScanThreadStateSet& state_set) : AbstractGangTask("ParNewGeneration parallel reference processing"), _gen(gen), _task(task), ! _old_gen(old_gen), _young_old_boundary(young_old_boundary), _state_set(state_set) { }
*** 897,907 **** // All the spaces are in play for mark-sweep. swap_spaces(); // Make life simpler for CMS || rescan; see 6483690. from()->set_next_compaction_space(to()); gch->set_incremental_collection_failed(); // Inform the next generation that a promotion failure occurred. ! _next_gen->promotion_failure_occurred(); // Trace promotion failure in the parallel GC threads thread_state_set.trace_promotion_failed(gc_tracer); // Single threaded code may have reported promotion failure to the global state if (_promotion_failed_info.has_failed()) { --- 897,907 ---- // All the spaces are in play for mark-sweep. swap_spaces(); // Make life simpler for CMS || rescan; see 6483690. from()->set_next_compaction_space(to()); gch->set_incremental_collection_failed(); // Inform the next generation that a promotion failure occurred. ! _old_gen->promotion_failure_occurred(); // Trace promotion failure in the parallel GC threads thread_state_set.trace_promotion_failed(gc_tracer); // Single threaded code may have reported promotion failure to the global state if (_promotion_failed_info.has_failed()) {
*** 931,941 **** workers->active_workers(), Threads::number_of_non_daemon_threads()); workers->set_active_workers(active_workers); assert(gch->n_gens() == 2, "Par collection currently only works with single older gen."); ! _next_gen = gch->next_gen(this); // Do we have to avoid promotion_undo? if (gch->collector_policy()->is_concurrent_mark_sweep_policy()) { set_avoid_promotion_undo(true); } --- 931,941 ---- workers->active_workers(), Threads::number_of_non_daemon_threads()); workers->set_active_workers(active_workers); assert(gch->n_gens() == 2, "Par collection currently only works with single older gen."); ! _old_gen = gch->old_gen(); // Do we have to avoid promotion_undo? if (gch->collector_policy()->is_concurrent_mark_sweep_policy()) { set_avoid_promotion_undo(true); }
*** 977,990 **** // Always set the terminator for the active number of workers // because only those workers go through the termination protocol. ParallelTaskTerminator _term(n_workers, task_queues()); ParScanThreadStateSet thread_state_set(workers->active_workers(), ! *to(), *this, *_next_gen, *task_queues(), _overflow_stacks, desired_plab_sz(), _term); ! ParNewGenTask tsk(this, _next_gen, reserved().end(), &thread_state_set); gch->set_par_threads(n_workers); gch->rem_set()->prepare_for_younger_refs_iterate(true); // It turns out that even when we're using 1 thread, doing the work in a // separate thread causes wide variance in run times. We can't help this // in the multi-threaded case, but we special-case n=1 here to get --- 977,990 ---- // Always set the terminator for the active number of workers // because only those workers go through the termination protocol. ParallelTaskTerminator _term(n_workers, task_queues()); ParScanThreadStateSet thread_state_set(workers->active_workers(), ! *to(), *this, *_old_gen, *task_queues(), _overflow_stacks, desired_plab_sz(), _term); ! ParNewGenTask tsk(this, _old_gen, reserved().end(), &thread_state_set); gch->set_par_threads(n_workers); gch->rem_set()->prepare_for_younger_refs_iterate(true); // It turns out that even when we're using 1 thread, doing the work in a // separate thread causes wide variance in run times. We can't help this // in the multi-threaded case, but we special-case n=1 here to get
*** 1194,1204 **** if (forward_ptr != NULL) { // someone else beat us to it. return real_forwardee(old); } ! new_obj = _next_gen->par_promote(par_scan_state->thread_num(), old, m, sz); if (new_obj == NULL) { // promotion failed, forward to self _promotion_failed = true; --- 1194,1204 ---- if (forward_ptr != NULL) { // someone else beat us to it. return real_forwardee(old); } ! new_obj = _old_gen->par_promote(par_scan_state->thread_num(), old, m, sz); if (new_obj == NULL) { // promotion failed, forward to self _promotion_failed = true;
*** 1312,1322 **** } if (new_obj == NULL) { // Either to-space is full or we decided to promote // try allocating obj tenured ! new_obj = _next_gen->par_promote(par_scan_state->thread_num(), old, m, sz); if (new_obj == NULL) { // promotion failed, forward to self forward_ptr = old->forward_to_atomic(old); --- 1312,1322 ---- } if (new_obj == NULL) { // Either to-space is full or we decided to promote // try allocating obj tenured ! new_obj = _old_gen->par_promote(par_scan_state->thread_num(), old, m, sz); if (new_obj == NULL) { // promotion failed, forward to self forward_ptr = old->forward_to_atomic(old);
*** 1393,1403 **** // Must be in to_space. assert(to()->is_in_reserved(new_obj), "Checking"); par_scan_state->undo_alloc_in_to_space((HeapWord*)new_obj, sz); } else { assert(!_avoid_promotion_undo, "Should not be here if avoiding."); ! _next_gen->par_promote_alloc_undo(par_scan_state->thread_num(), (HeapWord*)new_obj, sz); } return forward_ptr; } --- 1393,1403 ---- // Must be in to_space. assert(to()->is_in_reserved(new_obj), "Checking"); par_scan_state->undo_alloc_in_to_space((HeapWord*)new_obj, sz); } else { assert(!_avoid_promotion_undo, "Should not be here if avoiding."); ! _old_gen->par_promote_alloc_undo(par_scan_state->thread_num(), (HeapWord*)new_obj, sz); } return forward_ptr; }
src/share/vm/gc_implementation/parNew/parNewGeneration.cpp
Index Unified diffs Context diffs Sdiffs Patch New Old Previous File Next File