--- old/src/hotspot/share/gc/parallel/mutableNUMASpace.cpp 2020-08-24 12:25:34.052126044 +0200 +++ new/src/hotspot/share/gc/parallel/mutableNUMASpace.cpp 2020-08-24 12:25:33.688124239 +0200 @@ -571,7 +571,8 @@ void MutableNUMASpace::initialize(MemRegion mr, bool clear_space, bool mangle_space, - bool setup_pages) { + bool setup_pages, + WorkGang *pretouch_gang) { assert(clear_space, "Reallocation will destroy data!"); assert(lgrp_spaces()->length() > 0, "There should be at least one space"); --- old/src/hotspot/share/gc/parallel/mutableNUMASpace.hpp 2020-08-24 12:25:35.464133046 +0200 +++ new/src/hotspot/share/gc/parallel/mutableNUMASpace.hpp 2020-08-24 12:25:35.096131221 +0200 @@ -195,7 +195,11 @@ MutableNUMASpace(size_t alignment); virtual ~MutableNUMASpace(); // Space initialization. - virtual void initialize(MemRegion mr, bool clear_space, bool mangle_space, bool setup_pages = SetupPages); + virtual void initialize(MemRegion mr, + bool clear_space, + bool mangle_space, + bool setup_pages = SetupPages, + WorkGang *pretouch_gang = NULL); // Update space layout if necessary. Do all adaptive resizing job. virtual void update(); // Update allocation rate averages. --- old/src/hotspot/share/gc/parallel/mutableSpace.cpp 2020-08-24 12:25:36.992140623 +0200 +++ new/src/hotspot/share/gc/parallel/mutableSpace.cpp 2020-08-24 12:25:36.624138798 +0200 @@ -60,14 +60,59 @@ } } -void MutableSpace::pretouch_pages(MemRegion mr) { - os::pretouch_memory(mr.start(), mr.end()); -} +class PGCPretouchTask: public AbstractGangTask { + char * volatile _cur_addr; + char * const _start_addr; + char * const _end_addr; + size_t _page_size; + uint _total_workers; + +public: + PGCPretouchTask(MemRegion mr, size_t page_size) : + AbstractGangTask("ParallelGC PreTouch"), + _cur_addr((char*)mr.start()), + _start_addr((char*)mr.start()), + _end_addr((char*)mr.end()), + _page_size(0) { +#ifdef LINUX + _page_size = UseTransparentHugePages ? (size_t)os::vm_page_size() : page_size; +#else + _page_size = page_size; +#endif + } + + virtual void work(uint worker_id) { + + // Get chunk size + size_t const actual_chunk_size = MAX2(chunk_size(), _page_size); + + char *touch_addr = Atomic::fetch_and_add(&_cur_addr, actual_chunk_size); + + if (touch_addr > _end_addr) { + return ; + } + + char *end_addr = touch_addr + actual_chunk_size; + if (end_addr > _end_addr) + end_addr = _end_addr; + + os::pretouch_memory(touch_addr, end_addr, _page_size); + } + + void set_total_workers(uint total_workers) { _total_workers = total_workers; } + + size_t chunk_size() { return align_down((_end_addr-_start_addr)/_total_workers, _page_size); } + + static void pretouch_pages(MemRegion mr) { + os::pretouch_memory(mr.start(), mr.end()); + } +}; void MutableSpace::initialize(MemRegion mr, bool clear_space, bool mangle_space, - bool setup_pages) { + bool setup_pages, + WorkGang *pretouch_gang) { assert(Universe::on_page_boundary(mr.start()) && Universe::on_page_boundary(mr.end()), "invalid space boundaries"); @@ -114,8 +159,33 @@ } if (AlwaysPreTouch) { - pretouch_pages(head); - pretouch_pages(tail); + + size_t page_size = UseLargePages ? os::large_page_size() : os::vm_page_size(); + PGCPretouchTask pretouch_task(head, page_size); + + if (pretouch_gang) { + uint num_workers = pretouch_gang->total_workers(); + pretouch_task.set_total_workers(num_workers); + + log_debug(gc, heap)("Running %s with %u workers for pre-touching " SIZE_FORMAT "B.", + pretouch_task.name(), num_workers, head.byte_size()); + + pretouch_gang->run_task(&pretouch_task, num_workers); + + } else { + + if(head.byte_size() != 0) { + log_debug(gc, heap)("Running %s with 1 thread for pre-touching " SIZE_FORMAT "B.", + pretouch_task.name(), head.byte_size()); + PGCPretouchTask::pretouch_pages(head); + } + } + + if(tail.byte_size() != 0) { + log_debug(gc, heap)("Running %s with 1 thread for pre-touching " SIZE_FORMAT "B.", + pretouch_task.name(), tail.byte_size()); + PGCPretouchTask::pretouch_pages(tail); + } } // Remember where we stopped so that we can continue later. --- old/src/hotspot/share/gc/parallel/mutableSpace.hpp 2020-08-24 12:25:38.512148160 +0200 +++ new/src/hotspot/share/gc/parallel/mutableSpace.hpp 2020-08-24 12:25:38.144146335 +0200 @@ -56,7 +56,6 @@ MutableSpaceMangler* mangler() { return _mangler; } void numa_setup_pages(MemRegion mr, bool clear_space); - void pretouch_pages(MemRegion mr); void set_last_setup_region(MemRegion mr) { _last_setup_region = mr; } MemRegion last_setup_region() const { return _last_setup_region; } @@ -87,7 +86,8 @@ virtual void initialize(MemRegion mr, bool clear_space, bool mangle_space, - bool setup_pages = SetupPages); + bool setup_pages = SetupPages, + WorkGang *pretouch_gang = NULL); virtual void clear(bool mangle_space); // Does the usual initialization but optionally resets top to bottom. --- old/src/hotspot/share/gc/parallel/parallelScavengeHeap.cpp 2020-08-24 12:25:39.836154725 +0200 +++ new/src/hotspot/share/gc/parallel/parallelScavengeHeap.cpp 2020-08-24 12:25:39.468152900 +0200 @@ -88,6 +88,9 @@ ReservedSpace young_rs = heap_rs.last_part(MaxOldSize); assert(young_rs.size() == MaxNewSize, "Didn't reserve all of the heap"); + // Set up WorkGang + _workers.initialize_workers(); + // Create and initialize the generations. _young_gen = new PSYoungGen( young_rs, @@ -132,9 +135,6 @@ return JNI_ENOMEM; } - // Set up WorkGang - _workers.initialize_workers(); - GCInitLogger::print(); return JNI_OK; --- old/src/hotspot/share/gc/parallel/psOldGen.cpp 2020-08-24 12:25:40.880159902 +0200 +++ new/src/hotspot/share/gc/parallel/psOldGen.cpp 2020-08-24 12:25:40.508158058 +0200 @@ -131,7 +131,9 @@ _object_space = new MutableSpace(virtual_space()->alignment()); object_space()->initialize(cmr, SpaceDecorator::Clear, - SpaceDecorator::Mangle); + SpaceDecorator::Mangle, + MutableSpace::SetupPages, + &(ParallelScavengeHeap::heap()->workers())); // Update the start_array start_array()->set_covered_region(cmr); --- old/src/hotspot/share/gc/parallel/psYoungGen.cpp 2020-08-24 12:25:41.992165416 +0200 +++ new/src/hotspot/share/gc/parallel/psYoungGen.cpp 2020-08-24 12:25:41.620163572 +0200 @@ -189,9 +189,10 @@ MemRegion to_mr ((HeapWord*)to_start, (HeapWord*)from_start); MemRegion from_mr((HeapWord*)from_start, (HeapWord*)from_end); - eden_space()->initialize(eden_mr, true, ZapUnusedHeapArea); - to_space()->initialize(to_mr , true, ZapUnusedHeapArea); - from_space()->initialize(from_mr, true, ZapUnusedHeapArea); + WorkGang& pretouch_workers = ParallelScavengeHeap::heap()->workers(); + eden_space()->initialize(eden_mr, true, ZapUnusedHeapArea, MutableSpace::SetupPages, &pretouch_workers); + to_space()->initialize(to_mr , true, ZapUnusedHeapArea, MutableSpace::SetupPages, &pretouch_workers); + from_space()->initialize(from_mr, true, ZapUnusedHeapArea, MutableSpace::SetupPages, &pretouch_workers); } #ifndef PRODUCT