--- old/src/hotspot/share/gc/g1/g1CollectedHeap.cpp 2018-09-13 13:42:18.235095667 +0200 +++ new/src/hotspot/share/gc/g1/g1CollectedHeap.cpp 2018-09-13 13:42:17.935082561 +0200 @@ -2488,7 +2488,6 @@ // Fill TLAB's and such double start = os::elapsedTime(); - accumulate_statistics_all_tlabs(); ensure_parsability(true); g1_policy()->phase_times()->record_prepare_tlab_time_ms((os::elapsedTime() - start) * 1000.0); } --- old/src/hotspot/share/gc/parallel/parallelScavengeHeap.cpp 2018-09-13 13:42:18.685115326 +0200 +++ new/src/hotspot/share/gc/parallel/parallelScavengeHeap.cpp 2018-09-13 13:42:18.387102307 +0200 @@ -488,10 +488,6 @@ return result; } -void ParallelScavengeHeap::accumulate_statistics_all_tlabs() { - CollectedHeap::accumulate_statistics_all_tlabs(); -} - void ParallelScavengeHeap::resize_all_tlabs() { CollectedHeap::resize_all_tlabs(); } --- old/src/hotspot/share/gc/parallel/parallelScavengeHeap.hpp 2018-09-13 13:42:19.086132845 +0200 +++ new/src/hotspot/share/gc/parallel/parallelScavengeHeap.hpp 2018-09-13 13:42:18.785119695 +0200 @@ -206,7 +206,6 @@ HeapWord** end_addr() const { return !UseNUMA ? young_gen()->end_addr() : (HeapWord**)-1; } void ensure_parsability(bool retire_tlabs); - void accumulate_statistics_all_tlabs(); void resize_all_tlabs(); bool supports_tlab_allocation() const { return true; } --- old/src/hotspot/share/gc/parallel/psMarkSweep.cpp 2018-09-13 13:42:19.481150102 +0200 +++ new/src/hotspot/share/gc/parallel/psMarkSweep.cpp 2018-09-13 13:42:19.180136952 +0200 @@ -150,7 +150,6 @@ heap->trace_heap_before_gc(_gc_tracer); // Fill in TLABs - heap->accumulate_statistics_all_tlabs(); heap->ensure_parsability(true); // retire TLABs if (VerifyBeforeGC && heap->total_collections() >= VerifyGCStartAt) { --- old/src/hotspot/share/gc/parallel/psParallelCompact.cpp 2018-09-13 13:42:19.875167315 +0200 +++ new/src/hotspot/share/gc/parallel/psParallelCompact.cpp 2018-09-13 13:42:19.573154121 +0200 @@ -972,7 +972,6 @@ heap->trace_heap_before_gc(&_gc_tracer); // Fill in TLABs - heap->accumulate_statistics_all_tlabs(); heap->ensure_parsability(true); // retire TLABs if (VerifyBeforeGC && heap->total_collections() >= VerifyGCStartAt) { --- old/src/hotspot/share/gc/parallel/psScavenge.cpp 2018-09-13 13:42:20.299185838 +0200 +++ new/src/hotspot/share/gc/parallel/psScavenge.cpp 2018-09-13 13:42:19.999172732 +0200 @@ -279,7 +279,6 @@ assert(!AlwaysTenure || _tenuring_threshold == 0, "Sanity"); // Fill in TLABs - heap->accumulate_statistics_all_tlabs(); heap->ensure_parsability(true); // retire TLABs if (VerifyBeforeGC && heap->total_collections() >= VerifyGCStartAt) { --- old/src/hotspot/share/gc/shared/collectedHeap.cpp 2018-09-13 13:42:20.696203182 +0200 +++ new/src/hotspot/share/gc/shared/collectedHeap.cpp 2018-09-13 13:42:20.398190163 +0200 @@ -476,35 +476,31 @@ // started allocating (nothing much to verify) or we have // started allocating but are now a full-fledged JavaThread // (and have thus made our TLAB's) available for filling. - assert(SafepointSynchronize::is_at_safepoint() || - !is_init_completed(), + assert(SafepointSynchronize::is_at_safepoint() || !is_init_completed(), "Should only be called at a safepoint or at start-up" " otherwise concurrent mutator activity may make heap " " unparsable again"); - const bool use_tlab = UseTLAB; + + if (UseTLAB && retire_tlabs) { + // Accumulate statistics before retiring + ThreadLocalAllocBuffer::accumulate_statistics_before_gc(); + } + // The main thread starts allocating via a TLAB even before it // has added itself to the threads list at vm boot-up. JavaThreadIteratorWithHandle jtiwh; - assert(!use_tlab || jtiwh.length() > 0, + assert(jtiwh.length() > 0, "Attempt to fill tlabs before main thread has been added" " to threads list is doomed to failure!"); BarrierSet *bs = BarrierSet::barrier_set(); for (; JavaThread *thread = jtiwh.next(); ) { - if (use_tlab) thread->tlab().make_parsable(retire_tlabs); + if (UseTLAB) { + thread->tlab().make_parsable(retire_tlabs); + } bs->make_parsable(thread); } } -void CollectedHeap::accumulate_statistics_all_tlabs() { - if (UseTLAB) { - assert(SafepointSynchronize::is_at_safepoint() || - !is_init_completed(), - "should only accumulate statistics on tlabs at safepoint"); - - ThreadLocalAllocBuffer::accumulate_statistics_before_gc(); - } -} - void CollectedHeap::resize_all_tlabs() { if (UseTLAB) { assert(SafepointSynchronize::is_at_safepoint() || --- old/src/hotspot/share/gc/shared/collectedHeap.hpp 2018-09-13 13:42:21.100220832 +0200 +++ new/src/hotspot/share/gc/shared/collectedHeap.hpp 2018-09-13 13:42:20.799207682 +0200 @@ -137,9 +137,6 @@ size_t requested_size, size_t* actual_size); - // Accumulate statistics on all tlabs. - virtual void accumulate_statistics_all_tlabs(); - // Reinitialize tlabs before resuming mutators. virtual void resize_all_tlabs(); --- old/src/hotspot/share/gc/shared/genCollectedHeap.cpp 2018-09-13 13:42:21.496238132 +0200 +++ new/src/hotspot/share/gc/shared/genCollectedHeap.cpp 2018-09-13 13:42:21.198225113 +0200 @@ -1293,7 +1293,6 @@ assert(InlineCacheBuffer::is_empty(), "should have cleaned up ICBuffer"); // Fill TLAB's and such - CollectedHeap::accumulate_statistics_all_tlabs(); ensure_parsability(true); // retire TLABs // Walk generations --- old/src/hotspot/share/gc/z/zCollectedHeap.hpp 2018-09-13 13:42:21.905256001 +0200 +++ new/src/hotspot/share/gc/z/zCollectedHeap.hpp 2018-09-13 13:42:21.606242938 +0200 @@ -57,7 +57,6 @@ static ZCollectedHeap* heap(); using CollectedHeap::ensure_parsability; - using CollectedHeap::accumulate_statistics_all_tlabs; using CollectedHeap::resize_all_tlabs; ZCollectedHeap(ZCollectorPolicy* policy); --- old/src/hotspot/share/gc/z/zObjectAllocator.cpp 2018-09-13 13:42:22.297273126 +0200 +++ new/src/hotspot/share/gc/z/zObjectAllocator.cpp 2018-09-13 13:42:21.999260107 +0200 @@ -300,7 +300,6 @@ // Retire TLABs if (UseTLAB) { ZCollectedHeap* heap = ZCollectedHeap::heap(); - heap->accumulate_statistics_all_tlabs(); heap->ensure_parsability(true /* retire_tlabs */); heap->resize_all_tlabs(); }