src/share/vm/gc_implementation/parNew/parNewGeneration.cpp
Index Unified diffs Context diffs Sdiffs Patch New Old Previous File Next File
*** old/src/share/vm/gc_implementation/parNew/parNewGeneration.cpp	Fri Oct 17 16:28:35 2014
--- new/src/share/vm/gc_implementation/parNew/parNewGeneration.cpp	Fri Oct 17 16:28:35 2014

*** 61,100 **** --- 61,111 ---- #ifdef _MSC_VER #pragma warning( push ) #pragma warning( disable:4355 ) // 'this' : used in base member initializer list #endif ParScanThreadState::ParScanThreadState(Space* to_space_, ! ParNewGeneration* young_gen_, Generation* old_gen_, int thread_num_, ObjToScanQueueSet* work_queue_set_, Stack<oop, mtGC>* overflow_stacks_, size_t desired_plab_sz_, - ParallelTaskTerminator& term_) : ! _to_space(to_space_), _old_gen(old_gen_), _young_gen(gen_), _thread_num(thread_num_), ! _work_queue(work_queue_set_->queue(thread_num_)), _to_space_full(false), ! : _to_space(to_space_), ! _old_gen(old_gen_), + _young_gen(young_gen_), + _thread_num(thread_num_), + _work_queue(work_queue_set_->queue(thread_num_)), + _to_space_full(false), _overflow_stack(overflow_stacks_ ? overflow_stacks_ + thread_num_ : NULL), _ageTable(false), // false ==> not the global age table, no perf data. _to_space_alloc_buffer(desired_plab_sz_), ! _to_space_closure(gen_, this), _old_gen_closure(gen_, this), ! _to_space_root_closure(gen_, this), _old_gen_root_closure(gen_, this), ! _older_gen_closure(gen_, this), ! _evacuate_followers(this, &_to_space_closure, &_old_gen_closure, ! &_to_space_root_closure, gen_, &_old_gen_root_closure, ! work_queue_set_, &term_), ! _is_alive_closure(gen_), _scan_weak_ref_closure(gen_, this), ! _to_space_closure(young_gen_, this), ! _old_gen_closure(young_gen_, this), ! _to_space_root_closure(young_gen_, this), ! _old_gen_root_closure(young_gen_, this), ! _older_gen_closure(young_gen_, this), ! _evacuate_followers(this, ! &_to_space_closure, + &_old_gen_closure, + &_to_space_root_closure, + young_gen_, + &_old_gen_root_closure, + work_queue_set_, + &term_), + _is_alive_closure(young_gen_), + _scan_weak_ref_closure(young_gen_, this), _keep_alive_closure(&_scan_weak_ref_closure), _strong_roots_time(0.0), _term_time(0.0) ! _term_time(0.0) { + _strong_roots_time(0.0), #if TASKQUEUE_STATS _term_attempts = 0; _overflow_refills = 0; _overflow_refill_objs = 0; #endif // TASKQUEUE_STATS ! _survivor_chunk_array = (ChunkArray*) old_gen()->get_data_recorder(thread_num()); (ChunkArray*) old_gen()->get_data_recorder(thread_num()); _hash_seed = 17; // Might want to take time-based random value. _start = os::elapsedTime(); _old_gen_closure.set_generation(old_gen_); _old_gen_root_closure.set_generation(old_gen_); }
*** 153,163 **** --- 164,173 ---- // object is in old generation obj->oop_iterate_range(&_old_gen_closure, start, end); } } void ParScanThreadState::trim_queues(int max_size) { ObjToScanQueue* queue = work_queue(); do { while (queue->size() > (juint)max_size) { oop obj_to_scan;
*** 221,239 **** --- 231,246 ---- overflow_stack()->push(p); assert(young_gen()->overflow_list() == NULL, "Error"); } HeapWord* ParScanThreadState::alloc_in_to_space_slow(size_t word_sz) { // Otherwise, if the object is small enough, try to reallocate the // buffer. + // If the object is small enough, try to reallocate the buffer. HeapWord* obj = NULL; if (!_to_space_full) { ParGCAllocBuffer* const plab = to_space_alloc_buffer(); Space* const sp = to_space(); ! if (word_sz * 100 < ParallelGCBufferWastePct * plab->word_sz()) { ParallelGCBufferWastePct * plab->word_sz()) { // Is small enough; abandon this buffer and start a new one. plab->retire(false, false); size_t buf_size = plab->word_sz(); HeapWord* buf_space = sp->par_allocate(buf_size); if (buf_space == NULL) {
*** 271,283 **** --- 278,288 ---- } } return obj; } void ParScanThreadState::undo_alloc_in_to_space(HeapWord* obj, size_t word_sz) { + void ParScanThreadState::undo_alloc_in_to_space(HeapWord* obj, size_t word_sz) { // Is the alloc in the current alloc buffer? if (to_space_alloc_buffer()->contains(obj)) { assert(to_space_alloc_buffer()->contains(obj + word_sz - 1), "Should contain whole object."); to_space_alloc_buffer()->undo_allocation(obj, word_sz);
*** 299,309 **** --- 304,314 ---- ParScanThreadStateSet(int num_threads, Space& to_space, ParNewGeneration& gen, Generation& old_gen, ObjToScanQueueSet& queue_set, - Stack<oop, mtGC>* overflow_stacks_, size_t desired_plab_sz, ParallelTaskTerminator& term); ~ParScanThreadStateSet() { TASKQUEUE_STATS_ONLY(reset_stats()); }
*** 324,348 **** --- 329,356 ---- #endif // TASKQUEUE_STATS private: ParallelTaskTerminator& _term; ParNewGeneration& _gen; ! Generation& _next_gen; ! Generation& _old_gen; public: bool is_valid(int id) const { return id < length(); } ParallelTaskTerminator* terminator() { return &_term; } }; ParScanThreadStateSet::ParScanThreadStateSet( ! int num_threads, Space& to_space, ParNewGeneration& gen, ! Generation& old_gen, ObjToScanQueueSet& queue_set, + ParScanThreadStateSet::ParScanThreadStateSet(int num_threads, + Space& to_space, ! ParNewGeneration& gen, ! Generation& old_gen, + ObjToScanQueueSet& queue_set, Stack<oop, mtGC>* overflow_stacks, size_t desired_plab_sz, ParallelTaskTerminator& term) + size_t desired_plab_sz, + ParallelTaskTerminator& term) : ResourceArray(sizeof(ParScanThreadState), num_threads), - _gen(gen), _next_gen(old_gen), _term(term) { + _old_gen(old_gen), + _term(term) { assert(num_threads > 0, "sanity check!"); assert(ParGCUseLocalOverflow == (overflow_stacks != NULL), "overflow_stack allocation mismatch"); // Initialize states. for (int i = 0; i < num_threads; ++i) {
*** 350,361 **** --- 358,368 ---- ParScanThreadState(&to_space, &gen, &old_gen, i, &queue_set, overflow_stacks, desired_plab_sz, term); } } ! inline ParScanThreadState& ParScanThreadStateSet::thread_state(int i) { { assert(i >= 0 && i < length(), "sanity check!"); return ((ParScanThreadState*)_data)[i]; } void ParScanThreadStateSet::trace_promotion_failed(YoungGCTracer& gc_tracer) {
*** 365,415 **** --- 372,416 ---- thread_state(i).promotion_failed_info().reset(); } } } ! void ParScanThreadStateSet::reset(int active_threads, bool promotion_failed) { { _term.reset_for_reuse(active_threads); if (promotion_failed) { for (int i = 0; i < length(); ++i) { thread_state(i).print_promotion_failure_size(); } } } #if TASKQUEUE_STATS void ! ParScanThreadState::reset_stats() { { taskqueue_stats().reset(); _term_attempts = 0; _overflow_refills = 0; _overflow_refill_objs = 0; } ! void ParScanThreadStateSet::reset_stats() { { for (int i = 0; i < length(); ++i) { thread_state(i).reset_stats(); } } ! void ParScanThreadStateSet::print_termination_stats_hdr(outputStream* const st) { ParScanThreadStateSet::print_termination_stats_hdr(outputStream* const st) { st->print_raw_cr("GC Termination Stats"); st->print_raw_cr(" elapsed --strong roots-- " "-------termination-------"); st->print_raw_cr("thr ms ms % " " ms % attempts"); st->print_raw_cr("--- --------- --------- ------ " "--------- ------ --------"); } ! void ParScanThreadStateSet::print_termination_stats(outputStream* const st) { { print_termination_stats_hdr(st); for (int i = 0; i < length(); ++i) { const ParScanThreadState & pss = thread_state(i); const double elapsed_ms = pss.elapsed_time() * 1000.0;
*** 421,439 **** --- 422,438 ---- term_ms, term_ms * 100 / elapsed_ms, pss.term_attempts()); } } // Print stats related to work queue activity. ! void ParScanThreadStateSet::print_taskqueue_stats_hdr(outputStream* const st) { { st->print_raw_cr("GC Task Stats"); st->print_raw("thr "); TaskQueueStats::print_header(1, st); st->cr(); st->print_raw("--- "); TaskQueueStats::print_header(2, st); st->cr(); } ! void ParScanThreadStateSet::print_taskqueue_stats(outputStream* const st) { { print_taskqueue_stats_hdr(st); TaskQueueStats totals; for (int i = 0; i < length(); ++i) { const ParScanThreadState & pss = thread_state(i);
*** 451,462 **** --- 450,460 ---- DEBUG_ONLY(totals.verify()); } #endif // TASKQUEUE_STATS ! void ParScanThreadStateSet::flush() { { // Work in this loop should be kept as lightweight as // possible since this might otherwise become a bottleneck // to scaling. Should we add heavy-weight work into this // loop, consider parallelizing the loop into the worker threads. for (int i = 0; i < length(); ++i) {
*** 473,484 **** --- 471,482 ---- // them all into one. ageTable *local_table = par_scan_state.age_table(); _gen.age_table()->merge(local_table); // Inform old gen that we're done. ! _next_gen.par_promote_alloc_done(i); ! _next_gen.par_oop_since_save_marks_iterate_done(i); ! _old_gen.par_promote_alloc_done(i); ! _old_gen.par_oop_since_save_marks_iterate_done(i); } if (UseConcMarkSweepGC && ParallelGCThreads > 0) { // We need to call this even when ResizeOldPLAB is disabled // so as to avoid breaking some asserts. While we may be able
*** 488,501 **** --- 486,499 ---- CFLS_LAB::compute_desired_plab_size(); } } ParScanClosure::ParScanClosure(ParNewGeneration* g, - ParScanThreadState* par_scan_state) : OopsInKlassOrGenClosure(g), _par_scan_state(par_scan_state), _g(g) { assert(_g->level() == 0, "Optimized for youngest generation"); + : OopsInKlassOrGenClosure(g), + _par_scan_state(par_scan_state), + _g(g) { _boundary = _g->reserved().end(); } void ParScanWithBarrierClosure::do_oop(oop* p) { ParScanClosure::do_oop_work(p, true, false); } void ParScanWithBarrierClosure::do_oop(narrowOop* p) { ParScanClosure::do_oop_work(p, true, false); }
*** 509,553 **** --- 507,549 ---- void ParRootScanWithoutBarrierClosure::do_oop(oop* p) { ParScanClosure::do_oop_work(p, false, true); } void ParRootScanWithoutBarrierClosure::do_oop(narrowOop* p) { ParScanClosure::do_oop_work(p, false, true); } ParScanWeakRefClosure::ParScanWeakRefClosure(ParNewGeneration* g, ParScanThreadState* par_scan_state) - : ScanWeakRefClosure(g), _par_scan_state(par_scan_state) {} + _par_scan_state(par_scan_state) { + } void ParScanWeakRefClosure::do_oop(oop* p) { ParScanWeakRefClosure::do_oop_work(p); } void ParScanWeakRefClosure::do_oop(narrowOop* p) { ParScanWeakRefClosure::do_oop_work(p); } #ifdef WIN32 #pragma warning(disable: 4786) /* identifier was truncated to '255' characters in the browser information */ #endif ! ParEvacuateFollowersClosure::ParEvacuateFollowersClosure(ParScanThreadState* par_scan_state, ! ParScanThreadState* par_scan_state_, ! ParScanWithoutBarrierClosure* to_space_closure_, ! ParScanWithBarrierClosure* old_gen_closure_, ! ParRootScanWithoutBarrierClosure* to_space_root_closure_, ! ParNewGeneration* par_gen_, ! ParRootScanWithBarrierTwoGensClosure* old_gen_root_closure_, ObjToScanQueueSet* task_queues_, ParallelTaskTerminator* terminator_) : ! _par_scan_state(par_scan_state_), ! _to_space_closure(to_space_closure_), ! _old_gen_closure(old_gen_closure_), ! _to_space_root_closure(to_space_root_closure_), ! _old_gen_root_closure(old_gen_root_closure_), _par_gen(par_gen_), _task_queues(task_queues_), _terminator(terminator_) {} ! ParScanWithoutBarrierClosure* to_space_closure, ! ParScanWithBarrierClosure* old_gen_closure, ! ParRootScanWithoutBarrierClosure* to_space_root_closure, ! ParNewGeneration* par_gen, ! ParRootScanWithBarrierTwoGensClosure* old_gen_root_closure, ! ObjToScanQueueSet* task_queues, + ParallelTaskTerminator* terminator) + : _par_scan_state(par_scan_state), + _to_space_closure(to_space_closure), ! _old_gen_closure(old_gen_closure), ! _to_space_root_closure(to_space_root_closure), ! _old_gen_root_closure(old_gen_root_closure), ! _par_gen(par_gen), ! _task_queues(task_queues), + _terminator(terminator) { + } void ParEvacuateFollowersClosure::do_void() { ObjToScanQueue* work_q = par_scan_state()->work_queue(); while (true) { // Scan to-space and old-gen objs until we run out of both. oop obj_to_scan; par_scan_state()->trim_queues(0); // We have no local work, attempt to steal from other threads.
*** 576,597 **** --- 572,595 ---- "Broken overflow list?"); // Finish the last termination pause. par_scan_state()->end_term_time(); } ! ParNewGenTask::ParNewGenTask(ParNewGeneration* gen, Generation* next_gen, HeapWord* young_old_boundary, ParScanThreadStateSet* state_set) : ! AbstractGangTask("ParNewGeneration collection"), _gen(gen), _next_gen(next_gen), ! ParNewGenTask::ParNewGenTask(ParNewGeneration* young_gen, + Generation* old_gen, ! HeapWord* young_old_boundary, + ParScanThreadStateSet* state_set) + : AbstractGangTask("ParNewGeneration collection"), + _young_gen(young_gen), _old_gen(old_gen), _young_old_boundary(young_old_boundary), ! _state_set(state_set) { - {} ! } // Reset the terminator for the given number of // active threads. void ParNewGenTask::set_for_termination(int active_workers) { ! _state_set->reset(active_workers, _young_gen->promotion_failed()); // Should the heap be passed in? There's only 1 for now so // grab it instead. GenCollectedHeap* gch = GenCollectedHeap::heap(); gch->set_n_termination(active_workers); }
*** 600,613 **** --- 598,609 ---- GenCollectedHeap* gch = GenCollectedHeap::heap(); // Since this is being done in a separate thread, need new resource // and handle marks. ResourceMark rm; HandleMark hm; // We would need multiple old-gen queues otherwise. assert(gch->n_gens() == 2, "Par young collection currently only works with one older gen."); ! Generation* old_gen = gch->next_gen(_gen); ! Generation* old_gen = gch->old_gen(); ParScanThreadState& par_scan_state = _state_set->thread_state(worker_id); assert(_state_set->is_valid(worker_id), "Should not have been called"); par_scan_state.set_young_old_boundary(_young_old_boundary);
*** 617,627 **** --- 613,623 ---- CLDToKlassAndOopClosure cld_scan_closure(&klass_scan_closure, &par_scan_state.to_space_root_closure(), false); par_scan_state.start_strong_roots(); ! gch->gen_process_roots(_gen->level(), ! gch->gen_process_roots(Generation::Young, true, // Process younger gens, if any, // as strong roots. false, // no scope; this is parallel code SharedHeap::SO_ScavengeCodeCache, GenCollectedHeap::StrongAndWeakRoots,
*** 638,653 **** --- 634,648 ---- #ifdef _MSC_VER #pragma warning( push ) #pragma warning( disable:4355 ) // 'this' : used in base member initializer list #endif ParNewGeneration:: - ParNewGeneration(ReservedSpace rs, size_t initial_byte_size, int level) - : DefNewGeneration(rs, initial_byte_size, level, "PCopy"), _overflow_list(NULL), _is_alive_closure(this), ! _plab_stats(YoungPLABSize, PLABWeight) { { NOT_PRODUCT(_overflow_counter = ParGCWorkQueueOverflowInterval;) NOT_PRODUCT(_num_par_pushes = 0;) _task_queues = new ObjToScanQueueSet(ParallelGCThreads); guarantee(_task_queues != NULL, "task_queues allocation failure.");
*** 655,670 **** --- 650,665 ---- ObjToScanQueue *q = new ObjToScanQueue(); guarantee(q != NULL, "work_queue Allocation failure."); _task_queues->register_queue(i1, q); } ! for (uint i2 = 0; i2 < ParallelGCThreads; i2++) { _task_queues->queue(i2)->initialize(); + } _overflow_stacks = NULL; if (ParGCUseLocalOverflow) { // typedef to workaround NEW_C_HEAP_ARRAY macro, which can not deal // with ',' typedef Stack<oop, mtGC> GCOopStack; _overflow_stacks = NEW_C_HEAP_ARRAY(GCOopStack, ParallelGCThreads, mtGC);
*** 686,697 **** --- 681,693 ---- #ifdef _MSC_VER #pragma warning( pop ) #endif // ParNewGeneration:: - ParKeepAliveClosure::ParKeepAliveClosure(ParScanWeakRefClosure* cl) : DefNewGeneration::KeepAliveClosure(cl), _par_cl(cl) {} + : DefNewGeneration::KeepAliveClosure(cl), _par_cl(cl) { + } template <class T> void /*ParNewGeneration::*/ParKeepAliveClosure::do_oop_work(T* p) { #ifdef ASSERT {
*** 713,724 **** --- 709,721 ---- void /*ParNewGeneration::*/ParKeepAliveClosure::do_oop(oop* p) { ParKeepAliveClosure::do_oop_work(p); } void /*ParNewGeneration::*/ParKeepAliveClosure::do_oop(narrowOop* p) { ParKeepAliveClosure::do_oop_work(p); } // ParNewGeneration:: - KeepAliveClosure::KeepAliveClosure(ScanWeakRefClosure* cl) : DefNewGeneration::KeepAliveClosure(cl) {} + : DefNewGeneration::KeepAliveClosure(cl) { + } template <class T> void /*ParNewGeneration::*/KeepAliveClosure::do_oop_work(T* p) { #ifdef ASSERT {
*** 766,808 **** --- 763,803 ---- class ParNewRefProcTaskProxy: public AbstractGangTask { typedef AbstractRefProcTaskExecutor::ProcessTask ProcessTask; public: ParNewRefProcTaskProxy(ProcessTask& task, ParNewGeneration& gen, ! Generation& next_gen, ! Generation& old_gen, HeapWord* young_old_boundary, ParScanThreadStateSet& state_set); private: virtual void work(uint worker_id); virtual void set_for_termination(int active_workers) { _state_set.terminator()->reset_for_reuse(active_workers); } private: ! ParNewGeneration& _young_gen; ProcessTask& _task; ! Generation& _next_gen; ! Generation& _old_gen; HeapWord* _young_old_boundary; ParScanThreadStateSet& _state_set; }; ! ParNewRefProcTaskProxy::ParNewRefProcTaskProxy(ProcessTask& task, ! ProcessTask& task, ParNewGeneration& gen, ! Generation& next_gen, ! ParNewGeneration& young_gen, ! Generation& old_gen, HeapWord* young_old_boundary, ParScanThreadStateSet& state_set) : AbstractGangTask("ParNewGeneration parallel reference processing"), ! _young_gen(young_gen), _task(task), ! _next_gen(next_gen), ! _old_gen(old_gen), _young_old_boundary(young_old_boundary), ! _state_set(state_set) { { } ! void ParNewRefProcTaskProxy::work(uint worker_id) { { ResourceMark rm; HandleMark hm; ParScanThreadState& par_scan_state = _state_set.thread_state(worker_id); par_scan_state.set_young_old_boundary(_young_old_boundary); _task.work(worker_id, par_scan_state.is_alive_closure(),
*** 815,894 **** --- 810,888 ---- EnqueueTask& _task; public: ParNewRefEnqueueTaskProxy(EnqueueTask& task) : AbstractGangTask("ParNewGeneration parallel reference enqueue"), ! _task(task) { - { } ! virtual void work(uint worker_id) { { _task.work(worker_id); } }; void ParNewRefProcTaskExecutor::execute(ProcessTask& task) { + void ParNewRefProcTaskExecutor::execute(ProcessTask& task) { GenCollectedHeap* gch = GenCollectedHeap::heap(); assert(gch->kind() == CollectedHeap::GenCollectedHeap, "not a generational heap"); FlexibleWorkGang* workers = gch->workers(); assert(workers != NULL, "Need parallel worker threads."); _state_set.reset(workers->active_workers(), _generation.promotion_failed()); ! ParNewRefProcTaskProxy rp_task(task, _generation, *_generation.next_gen(), ! ParNewRefProcTaskProxy rp_task(task, _generation, *(gch->old_gen()), _generation.reserved().end(), _state_set); workers->run_task(&rp_task); _state_set.reset(0 /* bad value in debug if not reset */, _generation.promotion_failed()); } ! void ParNewRefProcTaskExecutor::execute(EnqueueTask& task) { { GenCollectedHeap* gch = GenCollectedHeap::heap(); FlexibleWorkGang* workers = gch->workers(); assert(workers != NULL, "Need parallel worker threads."); ParNewRefEnqueueTaskProxy enq_task(task); workers->run_task(&enq_task); } ! void ParNewRefProcTaskExecutor::set_single_threaded_mode() { { _state_set.flush(); GenCollectedHeap* gch = GenCollectedHeap::heap(); gch->set_par_threads(0); // 0 ==> non-parallel. gch->save_marks(); } ! ScanClosureWithParBarrier::ScanClosureWithParBarrier(ParNewGeneration* g, ScanClosureWithParBarrier(ParNewGeneration* g, bool gc_barrier) : ScanClosure(g, gc_barrier) {} + bool gc_barrier) + : ScanClosure(g, gc_barrier) { + } EvacuateFollowersClosureGeneral:: - EvacuateFollowersClosureGeneral(GenCollectedHeap* gch, int level, OopsInGenClosure* cur, - OopsInGenClosure* older) : ! _gch(gch), _level(level), _scan_cur_or_nonheap(cur), _scan_older(older) {} ! : _gch(gch), + _scan_cur_or_nonheap(cur), + _scan_older(older) { + } void EvacuateFollowersClosureGeneral::do_void() { do { // Beware: this call will lead to closure applications via virtual // calls. ! _gch->oop_since_save_marks_iterate(_level, ! _gch->oop_since_save_marks_iterate(Generation::Young, _scan_cur_or_nonheap, _scan_older); ! } while (!_gch->no_allocs_since_save_marks(_level)); ! } while (!_gch->no_allocs_since_save_marks(true /* include_young */)); } // A Generation that does parallel young-gen collection. bool ParNewGeneration::_avoid_promotion_undo = false; - void ParNewGeneration::handle_promotion_failed(GenCollectedHeap* gch, ParScanThreadStateSet& thread_state_set, ParNewTracer& gc_tracer) { + ParScanThreadStateSet& thread_state_set, + ParNewTracer& gc_tracer) { assert(_promo_failure_scan_stack.is_empty(), "post condition"); _promo_failure_scan_stack.clear(true); // Clear cached segments. remove_forwarding_pointers(); if (PrintGCDetails) {
*** 897,907 **** --- 891,901 ---- // All the spaces are in play for mark-sweep. swap_spaces(); // Make life simpler for CMS || rescan; see 6483690. from()->set_next_compaction_space(to()); gch->set_incremental_collection_failed(); // Inform the next generation that a promotion failure occurred. ! _next_gen->promotion_failure_occurred(); ! _old_gen->promotion_failure_occurred(); // Trace promotion failure in the parallel GC threads thread_state_set.trace_promotion_failed(gc_tracer); // Single threaded code may have reported promotion failure to the global state if (_promotion_failed_info.has_failed()) {
*** 929,941 **** --- 923,933 ---- int active_workers = AdaptiveSizePolicy::calc_active_workers(workers->total_workers(), workers->active_workers(), Threads::number_of_non_daemon_threads()); workers->set_active_workers(active_workers); assert(gch->n_gens() == 2, "Par collection currently only works with single older gen."); _next_gen = gch->next_gen(this); + _old_gen = gch->old_gen(); // Do we have to avoid promotion_undo? if (gch->collector_policy()->is_concurrent_mark_sweep_policy()) { set_avoid_promotion_undo(true); }
*** 977,990 **** --- 969,982 ---- // Always set the terminator for the active number of workers // because only those workers go through the termination protocol. ParallelTaskTerminator _term(n_workers, task_queues()); ParScanThreadStateSet thread_state_set(workers->active_workers(), ! *to(), *this, *_next_gen, *task_queues(), ! *to(), *this, *_old_gen, *task_queues(), _overflow_stacks, desired_plab_sz(), _term); ! ParNewGenTask tsk(this, _next_gen, reserved().end(), &thread_state_set); ! ParNewGenTask tsk(this, _old_gen, reserved().end(), &thread_state_set); gch->set_par_threads(n_workers); gch->rem_set()->prepare_for_younger_refs_iterate(true); // It turns out that even when we're using 1 thread, doing the work in a // separate thread causes wide variance in run times. We can't help this // in the multi-threaded case, but we special-case n=1 here to get
*** 1005,1015 **** --- 997,1007 ---- ScanWeakRefClosure scan_weak_ref(this); KeepAliveClosure keep_alive(&scan_weak_ref); ScanClosure scan_without_gc_barrier(this, false); ScanClosureWithParBarrier scan_with_gc_barrier(this, true); set_promo_failure_scan_stack_closure(&scan_without_gc_barrier); - EvacuateFollowersClosureGeneral evacuate_followers(gch, _level, &scan_without_gc_barrier, &scan_with_gc_barrier); rp->setup_policy(clear_all_soft_refs); // Can the mt_degree be set later (at run_task() time would be best)? rp->set_active_mt_degree(active_workers); ReferenceProcessorStats stats;
*** 1194,1205 **** --- 1186,1196 ---- if (forward_ptr != NULL) { // someone else beat us to it. return real_forwardee(old); } ! new_obj = _next_gen->par_promote(par_scan_state->thread_num(), old, m, sz); ! new_obj = _old_gen->par_promote(par_scan_state->thread_num(), old, m, sz); if (new_obj == NULL) { // promotion failed, forward to self _promotion_failed = true; new_obj = old;
*** 1226,1236 **** --- 1217,1230 ---- // This code must come after the CAS test, or it will print incorrect // information. if (TraceScavenge) { gclog_or_tty->print_cr("{%s %s " PTR_FORMAT " -> " PTR_FORMAT " (%d)}", is_in_reserved(new_obj) ? "copying" : "tenuring", new_obj->klass()->internal_name(), (void *)old, (void *)new_obj, new_obj->size()); + new_obj->klass()->internal_name(), + (void *)old, + (void *)new_obj, + new_obj->size()); } #endif if (forward_ptr == NULL) { oop obj_to_push = new_obj;
*** 1312,1322 **** --- 1306,1316 ---- } if (new_obj == NULL) { // Either to-space is full or we decided to promote // try allocating obj tenured ! new_obj = _next_gen->par_promote(par_scan_state->thread_num(), ! new_obj = _old_gen->par_promote(par_scan_state->thread_num(), old, m, sz); if (new_obj == NULL) { // promotion failed, forward to self forward_ptr = old->forward_to_atomic(old);
*** 1347,1357 **** --- 1341,1354 ---- // This code must come after the CAS test, or it will print incorrect // information. if (TraceScavenge) { gclog_or_tty->print_cr("{%s %s " PTR_FORMAT " -> " PTR_FORMAT " (%d)}", is_in_reserved(new_obj) ? "copying" : "tenuring", new_obj->klass()->internal_name(), (void *)old, (void *)new_obj, new_obj->size()); + new_obj->klass()->internal_name(), + (void*)old, + (void*)new_obj, + new_obj->size()); } #endif // Now attempt to install the forwarding pointer (atomically). // We have to copy the mark word before overwriting with forwarding
*** 1393,1403 **** --- 1390,1400 ---- // Must be in to_space. assert(to()->is_in_reserved(new_obj), "Checking"); par_scan_state->undo_alloc_in_to_space((HeapWord*)new_obj, sz); } else { assert(!_avoid_promotion_undo, "Should not be here if avoiding."); ! _next_gen->par_promote_alloc_undo(par_scan_state->thread_num(), ! _old_gen->par_promote_alloc_undo(par_scan_state->thread_num(), (HeapWord*)new_obj, sz); } return forward_ptr; }
*** 1509,1519 **** --- 1506,1518 ---- size_t objsFromOverflow = MIN2((size_t)(work_q->max_elems() - work_q->size())/4, (size_t)ParGCDesiredObjsFromOverflowList); assert(!UseCompressedOops, "Error"); assert(par_scan_state->overflow_stack() == NULL, "Error"); ! if (_overflow_list == NULL) return false; ! if (_overflow_list == NULL) { + return false; + } // Otherwise, there was something there; try claiming the list. oop prefix = cast_to_oop(Atomic::xchg_ptr(BUSY, &_overflow_list)); // Trim off a prefix of at most objsFromOverflow items Thread* tid = Thread::current();

src/share/vm/gc_implementation/parNew/parNewGeneration.cpp
Index Unified diffs Context diffs Sdiffs Patch New Old Previous File Next File