src/share/vm/gc_implementation/parNew/parNewGeneration.cpp

Print this page

        

*** 67,76 **** --- 67,77 ---- Generation* old_gen_, int thread_num_, ObjToScanQueueSet* work_queue_set_, Stack<oop, mtGC>* overflow_stacks_, size_t desired_plab_sz_, + ParNewTracer* gc_tracer, ParallelTaskTerminator& term_) : _to_space(to_space_), _old_gen(old_gen_), _young_gen(gen_), _thread_num(thread_num_), _work_queue(work_queue_set_->queue(thread_num_)), _to_space_full(false), _overflow_stack(overflow_stacks_ ? overflow_stacks_ + thread_num_ : NULL), _ageTable(false), // false ==> not the global age table, no perf data.
*** 81,90 **** --- 82,92 ---- _evacuate_followers(this, &_to_space_closure, &_old_gen_closure, &_to_space_root_closure, gen_, &_old_gen_root_closure, work_queue_set_, &term_), _is_alive_closure(gen_), _scan_weak_ref_closure(gen_, this), _keep_alive_closure(&_scan_weak_ref_closure), + _gc_tracer(gc_tracer), _strong_roots_time(0.0), _term_time(0.0) { #if TASKQUEUE_STATS _term_attempts = 0; _overflow_refills = 0;
*** 220,230 **** assert(ParGCUseLocalOverflow, "Else should not call"); overflow_stack()->push(p); assert(young_gen()->overflow_list() == NULL, "Error"); } ! HeapWord* ParScanThreadState::alloc_in_to_space_slow(size_t word_sz) { // Otherwise, if the object is small enough, try to reallocate the // buffer. HeapWord* obj = NULL; if (!_to_space_full) { --- 222,234 ---- assert(ParGCUseLocalOverflow, "Else should not call"); overflow_stack()->push(p); assert(young_gen()->overflow_list() == NULL, "Error"); } ! HeapWord* ParScanThreadState::alloc_in_to_space_slow(size_t word_sz, ! const oop old, ! const uint age) { // Otherwise, if the object is small enough, try to reallocate the // buffer. HeapWord* obj = NULL; if (!_to_space_full) {
*** 250,259 **** --- 254,265 ---- } if (buf_space != NULL) { plab->set_word_size(buf_size); plab->set_buf(buf_space); record_survivor_plab(buf_space, buf_size); + gc_tracer()->report_promotion_in_new_plab_event(old, word_sz, age, false, + buf_size); obj = plab->allocate_aligned(word_sz, SurvivorAlignmentInBytes); // Note that we cannot compare buf_size < word_sz below // because of AlignmentReserve (see ParGCAllocBuffer::allocate()). assert(obj != NULL || plab->words_remaining() < word_sz, "Else should have been able to allocate");
*** 265,274 **** --- 271,281 ---- _to_space_full = true; } } else { // Too large; allocate the object individually. + gc_tracer()->report_promotion_outside_plab_event(old, word_sz, age, false); obj = sp->par_allocate(word_sz); } } return obj; }
*** 301,310 **** --- 308,318 ---- ParNewGeneration& gen, Generation& old_gen, ObjToScanQueueSet& queue_set, Stack<oop, mtGC>* overflow_stacks_, size_t desired_plab_sz, + ParNewTracer* gc_tracer, ParallelTaskTerminator& term); ~ParScanThreadStateSet() { TASKQUEUE_STATS_ONLY(reset_stats()); } inline ParScanThreadState& thread_state(int i);
*** 335,356 **** ParScanThreadStateSet::ParScanThreadStateSet( int num_threads, Space& to_space, ParNewGeneration& gen, Generation& old_gen, ObjToScanQueueSet& queue_set, Stack<oop, mtGC>* overflow_stacks, ! size_t desired_plab_sz, ParallelTaskTerminator& term) : ResourceArray(sizeof(ParScanThreadState), num_threads), _gen(gen), _next_gen(old_gen), _term(term) { assert(num_threads > 0, "sanity check!"); assert(ParGCUseLocalOverflow == (overflow_stacks != NULL), "overflow_stack allocation mismatch"); // Initialize states. for (int i = 0; i < num_threads; ++i) { new ((ParScanThreadState*)_data + i) ParScanThreadState(&to_space, &gen, &old_gen, i, &queue_set, ! overflow_stacks, desired_plab_sz, term); } } inline ParScanThreadState& ParScanThreadStateSet::thread_state(int i) { --- 343,365 ---- ParScanThreadStateSet::ParScanThreadStateSet( int num_threads, Space& to_space, ParNewGeneration& gen, Generation& old_gen, ObjToScanQueueSet& queue_set, Stack<oop, mtGC>* overflow_stacks, ! size_t desired_plab_sz, ParNewTracer* gc_tracer, ! ParallelTaskTerminator& term) : ResourceArray(sizeof(ParScanThreadState), num_threads), _gen(gen), _next_gen(old_gen), _term(term) { assert(num_threads > 0, "sanity check!"); assert(ParGCUseLocalOverflow == (overflow_stacks != NULL), "overflow_stack allocation mismatch"); // Initialize states. for (int i = 0; i < num_threads; ++i) { new ((ParScanThreadState*)_data + i) ParScanThreadState(&to_space, &gen, &old_gen, i, &queue_set, ! overflow_stacks, desired_plab_sz, gc_tracer, term); } } inline ParScanThreadState& ParScanThreadStateSet::thread_state(int i) {
*** 978,988 **** // Always set the terminator for the active number of workers // because only those workers go through the termination protocol. ParallelTaskTerminator _term(n_workers, task_queues()); ParScanThreadStateSet thread_state_set(workers->active_workers(), *to(), *this, *_next_gen, *task_queues(), ! _overflow_stacks, desired_plab_sz(), _term); ParNewGenTask tsk(this, _next_gen, reserved().end(), &thread_state_set); gch->set_par_threads(n_workers); gch->rem_set()->prepare_for_younger_refs_iterate(true); // It turns out that even when we're using 1 thread, doing the work in a --- 987,998 ---- // Always set the terminator for the active number of workers // because only those workers go through the termination protocol. ParallelTaskTerminator _term(n_workers, task_queues()); ParScanThreadStateSet thread_state_set(workers->active_workers(), *to(), *this, *_next_gen, *task_queues(), ! _overflow_stacks, desired_plab_sz(), ! &gc_tracer, _term); ParNewGenTask tsk(this, _next_gen, reserved().end(), &thread_state_set); gch->set_par_threads(n_workers); gch->rem_set()->prepare_for_younger_refs_iterate(true); // It turns out that even when we're using 1 thread, doing the work in a
*** 1176,1186 **** oop new_obj = NULL; oop forward_ptr; // Try allocating obj in to-space (unless too old) if (dummyOld.age() < tenuring_threshold()) { ! new_obj = (oop)par_scan_state->alloc_in_to_space(sz); if (new_obj == NULL) { set_survivor_overflow(true); } } --- 1186,1196 ---- oop new_obj = NULL; oop forward_ptr; // Try allocating obj in to-space (unless too old) if (dummyOld.age() < tenuring_threshold()) { ! new_obj = (oop)par_scan_state->alloc_in_to_space(sz, old, dummyOld.age()); if (new_obj == NULL) { set_survivor_overflow(true); } }
*** 1194,1204 **** if (forward_ptr != NULL) { // someone else beat us to it. return real_forwardee(old); } ! new_obj = _next_gen->par_promote(par_scan_state->thread_num(), old, m, sz); if (new_obj == NULL) { // promotion failed, forward to self _promotion_failed = true; --- 1204,1215 ---- if (forward_ptr != NULL) { // someone else beat us to it. return real_forwardee(old); } ! new_obj = _next_gen->par_promote(par_scan_state->gc_tracer(), ! par_scan_state->thread_num(), old, m, sz); if (new_obj == NULL) { // promotion failed, forward to self _promotion_failed = true;
*** 1303,1322 **** oop new_obj = NULL; oop forward_ptr; // Try allocating obj in to-space (unless too old) if (dummyOld.age() < tenuring_threshold()) { ! new_obj = (oop)par_scan_state->alloc_in_to_space(sz); if (new_obj == NULL) { set_survivor_overflow(true); } } if (new_obj == NULL) { // Either to-space is full or we decided to promote // try allocating obj tenured ! new_obj = _next_gen->par_promote(par_scan_state->thread_num(), old, m, sz); if (new_obj == NULL) { // promotion failed, forward to self forward_ptr = old->forward_to_atomic(old); --- 1314,1334 ---- oop new_obj = NULL; oop forward_ptr; // Try allocating obj in to-space (unless too old) if (dummyOld.age() < tenuring_threshold()) { ! new_obj = (oop)par_scan_state->alloc_in_to_space(sz, old, dummyOld.age()); if (new_obj == NULL) { set_survivor_overflow(true); } } if (new_obj == NULL) { // Either to-space is full or we decided to promote // try allocating obj tenured ! new_obj = _next_gen->par_promote(par_scan_state->gc_tracer(), ! par_scan_state->thread_num(), old, m, sz); if (new_obj == NULL) { // promotion failed, forward to self forward_ptr = old->forward_to_atomic(old);