--- old/src/share/vm/gc/g1/g1CollectedHeap.cpp 2016-06-09 15:26:25.711525605 +0300 +++ new/src/share/vm/gc/g1/g1CollectedHeap.cpp 2016-06-09 15:26:25.599525609 +0300 @@ -3474,7 +3474,8 @@ double remove_self_forwards_start = os::elapsedTime(); remove_self_forwarding_pointers(); - _preserved_marks_set.restore(workers()); + SharedRestorePreservedMarksTaskExecutor task_executor(workers()); + _preserved_marks_set.restore(&task_executor); g1_policy()->phase_times()->record_evac_fail_remove_self_forwards((os::elapsedTime() - remove_self_forwards_start) * 1000.0); } --- old/src/share/vm/gc/parallel/psPromotionManager.cpp 2016-06-09 15:26:26.191525587 +0300 +++ new/src/share/vm/gc/parallel/psPromotionManager.cpp 2016-06-09 15:26:26.075525591 +0300 @@ -23,6 +23,7 @@ */ #include "precompiled.hpp" +#include "gc/parallel/gcTaskManager.hpp" #include "gc/parallel/mutableSpace.hpp" #include "gc/parallel/parallelScavengeHeap.hpp" #include "gc/parallel/psOldGen.hpp" @@ -237,8 +238,53 @@ _preserved_marks = preserved_marks; } +class ParRestoreGCTask : public GCTask { +private: + const uint _id; + PreservedMarksSet* const _preserved_marks_set; + volatile size_t* const _total_size_addr; + +public: + virtual char* name() { + return (char*) "preserved mark restoration task"; + } + + virtual void do_it(GCTaskManager* manager, uint which){ + _preserved_marks_set->get(_id)->restore_and_increment(_total_size_addr); + } + + ParRestoreGCTask(uint id, + PreservedMarksSet* preserved_marks_set, + volatile size_t* total_size_addr) + : _id(id), + _preserved_marks_set(preserved_marks_set), + _total_size_addr(total_size_addr) { } +}; + +class PSRestorePreservedMarksTaskExecutor : public RestorePreservedMarksTaskExecutor { +private: + GCTaskManager* _gc_task_manager; + +public: + PSRestorePreservedMarksTaskExecutor(GCTaskManager* gc_task_manager) + : _gc_task_manager(gc_task_manager) { } + + void restore(PreservedMarksSet* preserved_marks_set, + volatile size_t* total_size_addr) { + // GCTask / GCTaskQueue are ResourceObjs + ResourceMark rm; + + GCTaskQueue* q = GCTaskQueue::create(); + for (uint i = 0; i < preserved_marks_set->num(); i += 1) { + q->enqueue(new ParRestoreGCTask(i, preserved_marks_set, total_size_addr)); + } + _gc_task_manager->execute_and_wait(q); + } +}; + void PSPromotionManager::restore_preserved_marks() { - _preserved_marks_set->restore(PSScavenge::gc_task_manager()); + PSRestorePreservedMarksTaskExecutor task_executor(PSScavenge::gc_task_manager()); + _preserved_marks_set->restore(&task_executor); } void PSPromotionManager::drain_stacks_depth(bool totally_drain) { --- old/src/share/vm/gc/serial/defNewGeneration.cpp 2016-06-09 15:26:26.575525573 +0300 +++ new/src/share/vm/gc/serial/defNewGeneration.cpp 2016-06-09 15:26:26.475525577 +0300 @@ -739,7 +739,8 @@ eden()->object_iterate(&rspc); from()->object_iterate(&rspc); - _preserved_marks_set.restore(GenCollectedHeap::heap()->workers()); + SharedRestorePreservedMarksTaskExecutor task_executor(GenCollectedHeap::heap()->workers()); + _preserved_marks_set.restore(&task_executor); } void DefNewGeneration::handle_promotion_failure(oop old) { --- old/src/share/vm/gc/shared/preservedMarks.cpp 2016-06-09 15:26:27.143525553 +0300 +++ new/src/share/vm/gc/shared/preservedMarks.cpp 2016-06-09 15:26:26.931525560 +0300 @@ -28,9 +28,6 @@ #include "memory/allocation.inline.hpp" #include "memory/resourceArea.hpp" #include "utilities/macros.hpp" -#if INCLUDE_ALL_GCS -#include "gc/parallel/gcTaskManager.hpp" -#endif void PreservedMarks::restore() { while (!_stack.is_empty()) { @@ -40,6 +37,15 @@ assert_empty(); } +void PreservedMarks::restore_and_increment(volatile size_t* const total_size_addr) { + const size_t stack_size = size(); + restore(); + // Only do the atomic add if the size is > 0. + if (stack_size > 0) { + Atomic::add(stack_size, total_size_addr); + } +} + #ifndef PRODUCT void PreservedMarks::assert_empty() { assert(_stack.is_empty(), "stack expected to be empty, size = "SIZE_FORMAT, @@ -82,13 +88,7 @@ virtual void work(uint worker_id) { uint task_id = 0; while (!_sub_tasks.is_task_claimed(/* reference */ task_id)) { - PreservedMarks* const preserved_marks = _preserved_marks_set->get(task_id); - const size_t size = preserved_marks->size(); - preserved_marks->restore(); - // Only do the atomic add if the size is > 0. - if (size > 0) { - Atomic::add(size, _total_size_addr); - } + _preserved_marks_set->get(task_id)->restore_and_increment(_total_size_addr); } _sub_tasks.all_tasks_completed(); } @@ -104,53 +104,6 @@ } }; -void PreservedMarksSet::restore_internal(WorkGang* workers, - volatile size_t* total_size_addr) { - assert(workers != NULL, "pre-condition"); - ParRestoreTask task(workers->active_workers(), this, total_size_addr); - workers->run_task(&task); -} - -#if INCLUDE_ALL_GCS -class ParRestoreGCTask : public GCTask { -private: - const uint _id; - PreservedMarksSet* const _preserved_marks_set; - volatile size_t* const _total_size_addr; - -public: - virtual char* name() { return (char*) "preserved mark restoration task"; } - - virtual void do_it(GCTaskManager* manager, uint which) { - PreservedMarks* const preserved_marks = _preserved_marks_set->get(_id); - const size_t size = preserved_marks->size(); - preserved_marks->restore(); - // Only do the atomic add if the size is > 0. - if (size > 0) { - Atomic::add(size, _total_size_addr); - } - } - - ParRestoreGCTask(uint id, - PreservedMarksSet* preserved_marks_set, - volatile size_t* total_size_addr) - : _id(id), - _preserved_marks_set(preserved_marks_set), - _total_size_addr(total_size_addr) { } -}; - -void PreservedMarksSet::restore_internal(GCTaskManager* gc_task_manager, - volatile size_t* total_size_addr) { - // GCTask / GCTaskQueue are ResourceObjs - ResourceMark rm; - - GCTaskQueue* q = GCTaskQueue::create(); - for (uint i = 0; i < num(); i += 1) { - q->enqueue(new ParRestoreGCTask(i, this, total_size_addr)); - } - gc_task_manager->execute_and_wait(q); -} -#endif void PreservedMarksSet::reclaim() { assert_empty(); @@ -176,3 +129,16 @@ } } #endif // ndef PRODUCT + +void SharedRestorePreservedMarksTaskExecutor::restore(PreservedMarksSet* preserved_marks_set, + volatile size_t* total_size_addr) { + if (_workers == NULL) { + for (uint i = 0; i < preserved_marks_set->num(); i += 1) { + total_size_addr += preserved_marks_set->get(i)->size(); + preserved_marks_set->get(i)->restore(); + } + } else { + ParRestoreTask task(_workers->active_workers(), preserved_marks_set, total_size_addr); + _workers->run_task(&task); + } +} --- old/src/share/vm/gc/shared/preservedMarks.hpp 2016-06-09 15:26:27.983525522 +0300 +++ new/src/share/vm/gc/shared/preservedMarks.hpp 2016-06-09 15:26:27.827525528 +0300 @@ -30,7 +30,7 @@ #include "oops/oop.hpp" #include "utilities/stack.hpp" -class GCTaskManager; +class PreservedMarksSet; class WorkGang; class PreservedMarks VALUE_OBJ_CLASS_SPEC { @@ -61,6 +61,7 @@ // reclaim the memory taken up by the stack segments. void restore(); + void restore_and_increment(volatile size_t* const _total_size_addr); inline static void init_forwarded_mark(oop obj); // Assert the stack is empty and has no cached segments. @@ -75,6 +76,24 @@ virtual void do_object(oop obj); }; +class RestorePreservedMarksTaskExecutor { +public: + void virtual restore(PreservedMarksSet* preserved_marks_set, + volatile size_t* total_size_addr) = 0; +}; + +class SharedRestorePreservedMarksTaskExecutor : public RestorePreservedMarksTaskExecutor { +private: + WorkGang* _workers; + +public: + SharedRestorePreservedMarksTaskExecutor(WorkGang* workers) : _workers(workers) { } + + void restore(PreservedMarksSet* preserved_marks_set, + volatile size_t* total_size_addr); + +}; + class PreservedMarksSet : public CHeapObj { private: // true -> _stacks will be allocated in the C heap @@ -91,13 +110,6 @@ // or == NULL if they have not. Padded* _stacks; - // Internal version of restore() that uses a WorkGang for parallelism. - void restore_internal(WorkGang* workers, volatile size_t* total_size_addr); - - // Internal version of restore() that uses a GCTaskManager for parallelism. - void restore_internal(GCTaskManager* gc_task_manager, - volatile size_t* total_size_addr); - public: uint num() const { return _num; } @@ -111,14 +123,11 @@ // Allocate stack array. void init(uint num); - // Itrerate over all stacks, restore all presered marks, and reclaim - // the memory taken up by the stack segments. If the executor is - // NULL, restoration will be done serially. If the executor is not - // NULL, restoration could be done in parallel (when it makes - // sense). Supported executors: WorkGang (Serial, CMS, G1), - // GCTaskManager (PS). - template - inline void restore(E* executor); + // Iterate over all stacks, restore all preserved marks, and reclaim + // the memory taken up by the stack segments. + // Supported executors: SharedRestorePreservedMarksTaskExecutor (Serial, CMS, G1), + // PSRestorePreservedMarksTaskExecutor (PS). + inline void restore(RestorePreservedMarksTaskExecutor* executor); // Reclaim stack array. void reclaim(); --- old/src/share/vm/gc/shared/preservedMarks.inline.hpp 2016-06-09 15:26:28.631525499 +0300 +++ new/src/share/vm/gc/shared/preservedMarks.inline.hpp 2016-06-09 15:26:28.419525506 +0300 @@ -49,8 +49,7 @@ obj->init_mark(); } -template -inline void PreservedMarksSet::restore(E* executor) { +inline void PreservedMarksSet::restore(RestorePreservedMarksTaskExecutor* executor) { volatile size_t total_size = 0; #ifdef ASSERT @@ -61,17 +60,7 @@ } #endif // def ASSERT - if (executor == NULL) { - for (uint i = 0; i < _num; i += 1) { - total_size += get(i)->size(); - get(i)->restore(); - } - } else { - // Right now, if the executor is not NULL we do the work in - // parallel. In the future we might want to do the restoration - // serially, if there's only a small number of marks per stack. - restore_internal(executor, &total_size); - } + executor->restore(this, &total_size); assert_empty(); assert(total_size == total_size_before,