--- old/src/share/vm/gc_implementation/concurrentMarkSweep/compactibleFreeListSpace.cpp 2014-10-09 13:30:58.481575201 +0200 +++ new/src/share/vm/gc_implementation/concurrentMarkSweep/compactibleFreeListSpace.cpp 2014-10-09 13:30:58.381578696 +0200 @@ -149,18 +149,15 @@ check_free_list_consistency(); // Initialize locks for parallel case. - - if (CollectedHeap::use_parallel_gc_threads()) { - for (size_t i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) { - _indexedFreeListParLocks[i] = new Mutex(Mutex::leaf - 1, // == ExpandHeap_lock - 1 - "a freelist par lock", - true); - DEBUG_ONLY( - _indexedFreeList[i].set_protecting_lock(_indexedFreeListParLocks[i]); - ) - } - _dictionary->set_par_lock(&_parDictionaryAllocLock); + for (size_t i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) { + _indexedFreeListParLocks[i] = new Mutex(Mutex::leaf - 1, // == ExpandHeap_lock - 1 + "a freelist par lock", + true); + DEBUG_ONLY( + _indexedFreeList[i].set_protecting_lock(_indexedFreeListParLocks[i]); + ) } + _dictionary->set_par_lock(&_parDictionaryAllocLock); } // Like CompactibleSpace forward() but always calls cross_threshold() to @@ -622,17 +619,11 @@ // Mark the boundary of the new block in BOT _bt.mark_block(prevEnd, value); // put it all in the linAB - if (ParallelGCThreads > 1) { - MutexLockerEx x(parDictionaryAllocLock(), - Mutex::_no_safepoint_check_flag); - _smallLinearAllocBlock._ptr = prevEnd; - _smallLinearAllocBlock._word_size = newFcSize; - repairLinearAllocBlock(&_smallLinearAllocBlock); - } else { - _smallLinearAllocBlock._ptr = prevEnd; - _smallLinearAllocBlock._word_size = newFcSize; - repairLinearAllocBlock(&_smallLinearAllocBlock); - } + MutexLockerEx x(parDictionaryAllocLock(), + Mutex::_no_safepoint_check_flag); + _smallLinearAllocBlock._ptr = prevEnd; + _smallLinearAllocBlock._word_size = newFcSize; + repairLinearAllocBlock(&_smallLinearAllocBlock); // Births of chunks put into a LinAB are not recorded. Births // of chunks as they are allocated out of a LinAB are. } else { @@ -1740,10 +1731,7 @@ assert(chunk != NULL && is_in_reserved(chunk), "Not in this space!"); // One of the parallel gc task threads may be here // whilst others are allocating. - Mutex* lock = NULL; - if (CollectedHeap::use_parallel_gc_threads()) { - lock = &_parDictionaryAllocLock; - } + Mutex* lock = &_parDictionaryAllocLock; FreeChunk* ec; { MutexLockerEx x(lock, Mutex::_no_safepoint_check_flag); @@ -1760,7 +1748,7 @@ } ec->set_size(size); debug_only(ec->mangleFreed(size)); - if (size < SmallForDictionary && CollectedHeap::use_parallel_gc_threads()) { + if (size < SmallForDictionary) { lock = _indexedFreeListParLocks[size]; } MutexLockerEx x(lock, Mutex::_no_safepoint_check_flag); --- old/src/share/vm/gc_implementation/g1/concurrentMark.cpp 2014-10-09 13:30:59.077554380 +0200 +++ new/src/share/vm/gc_implementation/g1/concurrentMark.cpp 2014-10-09 13:30:58.977557873 +0200 @@ -660,7 +660,6 @@ gclog_or_tty->print_cr("CL Sleep Factor %1.4lf", cleanup_sleep_factor()); #endif - guarantee(parallel_marking_threads() > 0, "peace of mind"); _parallel_workers = new FlexibleWorkGang("G1 Parallel Marking Threads", _max_parallel_marking_threads, false, true); if (_parallel_workers == NULL) { @@ -1225,12 +1224,8 @@ uint active_workers = MAX2(1U, parallel_marking_threads()); CMRootRegionScanTask task(this); - if (use_parallel_marking_threads()) { - _parallel_workers->set_active_workers((int) active_workers); - _parallel_workers->run_task(&task); - } else { - task.work(0); - } + _parallel_workers->set_active_workers(active_workers); + _parallel_workers->run_task(&task); // It's possible that has_aborted() is true here without actually // aborting the survivor scan earlier. This is OK as it's @@ -1261,15 +1256,11 @@ set_concurrency_and_phase(active_workers, true /* concurrent */); CMConcurrentMarkingTask markingTask(this, cmThread()); - if (use_parallel_marking_threads()) { - _parallel_workers->set_active_workers((int)active_workers); - // Don't set _n_par_threads because it affects MT in process_roots() - // and the decisions on that MT processing is made elsewhere. - assert(_parallel_workers->active_workers() > 0, "Should have been set"); - _parallel_workers->run_task(&markingTask); - } else { - markingTask.work(0); - } + _parallel_workers->set_active_workers(active_workers); + // Don't set _n_par_threads because it affects MT in process_roots() + // and the decisions on that MT processing is made elsewhere. + assert(_parallel_workers->active_workers() > 0, "Should have been set"); + _parallel_workers->run_task(&markingTask); print_stats(); } @@ -3341,9 +3332,7 @@ } void ConcurrentMark::print_worker_threads_on(outputStream* st) const { - if (use_parallel_marking_threads()) { - _parallel_workers->print_worker_threads_on(st); - } + _parallel_workers->print_worker_threads_on(st); } void ConcurrentMark::print_on_error(outputStream* st) const { --- old/src/share/vm/gc_implementation/g1/concurrentMark.hpp 2014-10-09 13:30:59.709532300 +0200 +++ new/src/share/vm/gc_implementation/g1/concurrentMark.hpp 2014-10-09 13:30:59.601536073 +0200 @@ -519,15 +519,6 @@ double cleanup_sleep_factor() { return _cleanup_sleep_factor; } double cleanup_task_overhead() { return _cleanup_task_overhead;} - bool use_parallel_marking_threads() const { - assert(parallel_marking_threads() <= - max_parallel_marking_threads(), "sanity"); - assert((_parallel_workers == NULL && parallel_marking_threads() == 0) || - parallel_marking_threads() > 0, - "parallel workers not set up correctly"); - return _parallel_workers != NULL; - } - HeapWord* finger() { return _finger; } bool concurrent() { return _concurrent; } uint active_tasks() { return _active_tasks; } --- old/src/share/vm/gc_implementation/g1/g1HotCardCache.cpp 2014-10-09 13:31:00.257513154 +0200 +++ new/src/share/vm/gc_implementation/g1/g1HotCardCache.cpp 2014-10-09 13:31:00.161516508 +0200 @@ -43,6 +43,7 @@ _hot_cache_idx = 0; // For refining the cards in the hot cache in parallel + _hot_cache_par_chunk_size = ClaimChunkSize; _hot_cache_par_claimed_idx = 0; _card_counts.initialize(card_counts_storage); @@ -98,7 +99,7 @@ int start_idx; while ((start_idx = _hot_cache_par_claimed_idx) < _n_hot) { // read once - int end_idx = start_idx + ClaimChunkSize; + int end_idx = start_idx + _hot_cache_par_chunk_size; if (start_idx == Atomic::cmpxchg(end_idx, &_hot_cache_par_claimed_idx, start_idx)) { --- old/src/share/vm/gc_implementation/g1/g1HotCardCache.hpp 2014-10-09 13:31:00.813493730 +0200 +++ new/src/share/vm/gc_implementation/g1/g1HotCardCache.hpp 2014-10-09 13:31:00.709497363 +0200 @@ -63,6 +63,7 @@ int _n_hot; int _hot_cache_idx; + int _hot_cache_par_chunk_size; volatile int _hot_cache_par_claimed_idx; bool _use_cache; --- old/src/share/vm/gc_implementation/parNew/parNewGeneration.cpp 2014-10-09 13:31:01.349475004 +0200 +++ new/src/share/vm/gc_implementation/parNew/parNewGeneration.cpp 2014-10-09 13:31:01.249478497 +0200 @@ -479,7 +479,7 @@ _next_gen.par_oop_since_save_marks_iterate_done(i); } - if (UseConcMarkSweepGC && CollectedHeap::use_parallel_gc_threads()) { + if (UseConcMarkSweepGC) { // We need to call this even when ResizeOldPLAB is disabled // so as to avoid breaking some asserts. While we may be able // to avoid this by reorganizing the code a bit, I am loathe