--- old/src/hotspot/share/gc/epsilon/epsilonHeap.cpp 2020-07-29 15:26:38.631931048 +0800 +++ new/src/hotspot/share/gc/epsilon/epsilonHeap.cpp 2020-07-29 15:26:38.339931038 +0800 @@ -277,6 +277,11 @@ _space->object_iterate(cl); } +// No workGang for EpsilonHeap, work serially with thread 0 +void EpsilonHeap::run_task(AbstractGangTask* task) { + task->work(0); +} + void EpsilonHeap::print_on(outputStream *st) const { st->print_cr("Epsilon Heap"); --- old/src/hotspot/share/gc/epsilon/epsilonHeap.hpp 2020-07-29 15:26:39.331931073 +0800 +++ new/src/hotspot/share/gc/epsilon/epsilonHeap.hpp 2020-07-29 15:26:39.047931063 +0800 @@ -141,6 +141,9 @@ virtual void print_tracing_info() const; virtual bool print_location(outputStream* st, void* addr) const; + // Runs the given AbstractGangTask with the current active workers. + virtual void run_task(AbstractGangTask* task); + private: void print_heap_info(size_t used) const; void print_metaspace_info() const; --- old/src/hotspot/share/gc/g1/g1CollectedHeap.cpp 2020-07-29 15:26:40.027931098 +0800 +++ new/src/hotspot/share/gc/g1/g1CollectedHeap.cpp 2020-07-29 15:26:39.735931088 +0800 @@ -89,6 +89,7 @@ #include "logging/log.hpp" #include "memory/allocation.hpp" #include "memory/iterator.hpp" +#include "memory/heapInspection.hpp" #include "memory/resourceArea.hpp" #include "memory/universe.hpp" #include "oops/access.inline.hpp" @@ -161,9 +162,13 @@ reset_from_card_cache(start_idx, num_regions); } -Tickspan G1CollectedHeap::run_task(AbstractGangTask* task) { - Ticks start = Ticks::now(); +void G1CollectedHeap::run_task(AbstractGangTask* task) { workers()->run_task(task, workers()->active_workers()); +} + +Tickspan G1CollectedHeap::run_task_timed(AbstractGangTask* task) { + Ticks start = Ticks::now(); + run_task(task); return Ticks::now() - start; } @@ -3700,7 +3705,7 @@ { G1PrepareEvacuationTask g1_prep_task(this); - Tickspan task_time = run_task(&g1_prep_task); + Tickspan task_time = run_task_timed(&g1_prep_task); phase_times()->record_register_regions(task_time.seconds() * 1000.0, g1_prep_task.humongous_total(), @@ -3850,7 +3855,7 @@ { G1RootProcessor root_processor(this, num_workers); G1EvacuateRegionsTask g1_par_task(this, per_thread_states, _task_queues, &root_processor, num_workers); - task_time = run_task(&g1_par_task); + task_time = run_task_timed(&g1_par_task); // Closing the inner scope will execute the destructor for the G1RootProcessor object. // To extract its code root fixup time we measure total time of this scope and // subtract from the time the WorkGang task took. @@ -3889,7 +3894,7 @@ { G1MarkScope code_mark_scope; G1EvacuateOptionalRegionsTask task(per_thread_states, _task_queues, workers()->active_workers()); - task_time = run_task(&task); + task_time = run_task_timed(&task); // See comment in evacuate_collection_set() for the reason of the scope. } Tickspan total_processing = Ticks::now() - start_processing; @@ -4916,3 +4921,27 @@ GrowableArray G1CollectedHeap::memory_pools() { return _g1mm->memory_pools(); } + +class G1ParallelObjectIterator : public ParallelObjectIterator { +private: + G1CollectedHeap* _heap; + HeapRegionClaimer _claimer; + +public: + G1ParallelObjectIterator(uint thread_num) : + _heap(G1CollectedHeap::heap()), + _claimer(thread_num == 0 ? G1CollectedHeap::heap()->workers()->active_workers() : thread_num) {} + + virtual void object_iterate(ObjectClosure* cl, uint worker_id) { + _heap->object_iterate_parallel(cl, worker_id, &_claimer); + } +}; + +ParallelObjectIterator* G1CollectedHeap::parallel_object_iterator(uint thread_num) { + return new G1ParallelObjectIterator(thread_num); +} + +void G1CollectedHeap::object_iterate_parallel(ObjectClosure* cl, uint worker_id, HeapRegionClaimer* claimer) { + IterateObjectClosureRegionClosure blk(cl); + heap_region_par_iterate_from_worker_offset(&blk, claimer, worker_id); +} --- old/src/hotspot/share/gc/g1/g1CollectedHeap.hpp 2020-07-29 15:26:40.739931124 +0800 +++ new/src/hotspot/share/gc/g1/g1CollectedHeap.hpp 2020-07-29 15:26:40.447931113 +0800 @@ -549,9 +549,12 @@ WorkGang* workers() const { return _workers; } - // Runs the given AbstractGangTask with the current active workers, returning the - // total time taken. - Tickspan run_task(AbstractGangTask* task); + // Runs the given AbstractGangTask with the current active workers. + virtual void run_task(AbstractGangTask* task); + + // Runs the given AbstractGangTask with the current active workers, + // returning the total time taken. + Tickspan run_task_timed(AbstractGangTask* task); G1Allocator* allocator() { return _allocator; @@ -1168,9 +1171,13 @@ // Iteration functions. + void object_iterate_parallel(ObjectClosure* cl, uint worker_id, HeapRegionClaimer* claimer); + // Iterate over all objects, calling "cl.do_object" on each. virtual void object_iterate(ObjectClosure* cl); + virtual ParallelObjectIterator* parallel_object_iterator(uint thread_num); + // Keep alive an object that was loaded with AS_NO_KEEPALIVE. virtual void keep_alive(oop obj); --- old/src/hotspot/share/gc/parallel/parallelScavengeHeap.cpp 2020-07-29 15:26:41.427931148 +0800 +++ new/src/hotspot/share/gc/parallel/parallelScavengeHeap.cpp 2020-07-29 15:26:41.139931138 +0800 @@ -539,6 +539,9 @@ old_gen()->object_iterate(cl); } +void ParallelScavengeHeap::run_task(AbstractGangTask* task) { + _workers.run_task(task); +} HeapWord* ParallelScavengeHeap::block_start(const void* addr) const { if (young_gen()->is_in_reserved(addr)) { --- old/src/hotspot/share/gc/parallel/parallelScavengeHeap.hpp 2020-07-29 15:26:42.095931172 +0800 +++ new/src/hotspot/share/gc/parallel/parallelScavengeHeap.hpp 2020-07-29 15:26:41.803931162 +0800 @@ -259,6 +259,9 @@ WorkGang& workers() { return _workers; } + + // Runs the given AbstractGangTask with the current active workers. + virtual void run_task(AbstractGangTask* task); }; // Class that can be used to print information about the --- old/src/hotspot/share/gc/serial/serialHeap.cpp 2020-07-29 15:26:42.771931196 +0800 +++ new/src/hotspot/share/gc/serial/serialHeap.cpp 2020-07-29 15:26:42.479931186 +0800 @@ -87,3 +87,8 @@ memory_pools.append(_old_pool); return memory_pools; } + +// No workGang for SerialHeap, work serially with thread 0. +void SerialHeap::run_task(AbstractGangTask* task) { + task->work(0); +} --- old/src/hotspot/share/gc/serial/serialHeap.hpp 2020-07-29 15:26:43.435931220 +0800 +++ new/src/hotspot/share/gc/serial/serialHeap.hpp 2020-07-29 15:26:43.147931210 +0800 @@ -75,6 +75,9 @@ template void oop_since_save_marks_iterate(OopClosureType1* cur, OopClosureType2* older); + + // Runs the given AbstractGangTask with the current active workers. + virtual void run_task(AbstractGangTask* task); }; #endif // SHARE_GC_SERIAL_SERIALHEAP_HPP --- old/src/hotspot/share/gc/shared/collectedHeap.hpp 2020-07-29 15:26:44.123931244 +0800 +++ new/src/hotspot/share/gc/shared/collectedHeap.hpp 2020-07-29 15:26:43.835931234 +0800 @@ -30,6 +30,7 @@ #include "gc/shared/verifyOption.hpp" #include "memory/allocation.hpp" #include "memory/universe.hpp" +#include "memory/heapInspection.hpp" #include "runtime/handles.hpp" #include "runtime/perfData.hpp" #include "runtime/safepoint.hpp" @@ -44,6 +45,7 @@ // class defines the functions that a heap must implement, and contains // infrastructure common to all heaps. +class AbstractGangTask; class AdaptiveSizePolicy; class BarrierSet; class GCHeapSummary; @@ -85,6 +87,11 @@ } }; +class ParallelObjectIterator : public CHeapObj { +public: + virtual void object_iterate(ObjectClosure* cl, uint worker_id) = 0; +}; + // // CollectedHeap // GenCollectedHeap @@ -401,6 +408,13 @@ // Iterate over all objects, calling "cl.do_object" on each. virtual void object_iterate(ObjectClosure* cl) = 0; + virtual ParallelObjectIterator* parallel_object_iterator(uint thread_num) { + return NULL; + } + + // Run given task. Possibly in parallel if the GC supports it. + virtual void run_task(AbstractGangTask* task) = 0; + // Keep alive an object that was loaded with AS_NO_KEEPALIVE. virtual void keep_alive(oop obj) {} --- old/src/hotspot/share/gc/shared/gcVMOperations.cpp 2020-07-29 15:26:44.795931268 +0800 +++ new/src/hotspot/share/gc/shared/gcVMOperations.cpp 2020-07-29 15:26:44.503931258 +0800 @@ -150,7 +150,7 @@ } } HeapInspection inspect; - inspect.heap_inspection(_out); + inspect.heap_inspection(_out, _parallel_thread_num); } --- old/src/hotspot/share/gc/shared/gcVMOperations.hpp 2020-07-29 15:26:45.475931293 +0800 +++ new/src/hotspot/share/gc/shared/gcVMOperations.hpp 2020-07-29 15:26:45.187931282 +0800 @@ -125,12 +125,15 @@ private: outputStream* _out; bool _full_gc; + uint _parallel_thread_num; public: - VM_GC_HeapInspection(outputStream* out, bool request_full_gc) : - VM_GC_Operation(0 /* total collections, dummy, ignored */, - GCCause::_heap_inspection /* GC Cause */, - 0 /* total full collections, dummy, ignored */, - request_full_gc), _out(out), _full_gc(request_full_gc) {} + VM_GC_HeapInspection(outputStream* out, bool request_full_gc, + uint parallel_thread_num = 1) : + VM_GC_Operation(0 /* total collections, dummy, ignored */, + GCCause::_heap_inspection /* GC Cause */, + 0 /* total full collections, dummy, ignored */, + request_full_gc), _out(out), _full_gc(request_full_gc), + _parallel_thread_num(parallel_thread_num) {} ~VM_GC_HeapInspection() {} virtual VMOp_Type type() const { return VMOp_GC_HeapInspection; } --- old/src/hotspot/share/gc/shenandoah/shenandoahHeap.cpp 2020-07-29 15:26:46.143931317 +0800 +++ new/src/hotspot/share/gc/shenandoah/shenandoahHeap.cpp 2020-07-29 15:26:45.855931306 +0800 @@ -1343,6 +1343,10 @@ } } +void ShenandoahHeap::run_task(AbstractGangTask* task) { + workers()->run_task(task, workers()->active_workers()); +} + // Keep alive an object that was loaded with AS_NO_KEEPALIVE. void ShenandoahHeap::keep_alive(oop obj) { if (is_concurrent_mark_in_progress()) { --- old/src/hotspot/share/gc/shenandoah/shenandoahHeap.hpp 2020-07-29 15:26:46.819931341 +0800 +++ new/src/hotspot/share/gc/shenandoah/shenandoahHeap.hpp 2020-07-29 15:26:46.535931330 +0800 @@ -617,6 +617,10 @@ void tlabs_retire(bool resize); void gclabs_retire(bool resize); + // Runs the given AbstractGangTask with the current active workers. + // Returns the total time to run the task. + virtual void run_task(AbstractGangTask* task); + // ---------- Marking support // private: --- old/src/hotspot/share/gc/z/zCollectedHeap.cpp 2020-07-29 15:26:47.503931365 +0800 +++ new/src/hotspot/share/gc/z/zCollectedHeap.cpp 2020-07-29 15:26:47.215931355 +0800 @@ -253,6 +253,10 @@ _heap.object_iterate(cl, true /* visit_weaks */); } +void ZCollectedHeap::run_task(AbstractGangTask* task) { + return _heap.run_task(task); +} + void ZCollectedHeap::keep_alive(oop obj) { _heap.keep_alive(obj); } --- old/src/hotspot/share/gc/z/zCollectedHeap.hpp 2020-07-29 15:26:48.187931389 +0800 +++ new/src/hotspot/share/gc/z/zCollectedHeap.hpp 2020-07-29 15:26:47.903931379 +0800 @@ -98,6 +98,8 @@ virtual void object_iterate(ObjectClosure* cl); + virtual void run_task(AbstractGangTask* task); + virtual void keep_alive(oop obj); virtual void register_nmethod(nmethod* nm); --- old/src/hotspot/share/gc/z/zHeap.cpp 2020-07-29 15:26:48.871931414 +0800 +++ new/src/hotspot/share/gc/z/zHeap.cpp 2020-07-29 15:26:48.579931403 +0800 @@ -35,6 +35,7 @@ #include "gc/z/zRelocationSetSelector.inline.hpp" #include "gc/z/zResurrection.hpp" #include "gc/z/zStat.hpp" +#include "gc/z/zTask.hpp" #include "gc/z/zThread.inline.hpp" #include "gc/z/zVerify.hpp" #include "gc/z/zWorkers.inline.hpp" @@ -185,6 +186,26 @@ _workers.threads_do(tc); } +// Adapter class from AbstractGangTask to Ztask +class ZAbstractGangTaskAdapter : public ZTask { +private: + AbstractGangTask* _task; + +public: + ZAbstractGangTaskAdapter(AbstractGangTask* task) : + ZTask(task->name()), + _task(task) { } + + virtual void work() { + _task->work(ZThread::worker_id()); + } +}; + +void ZHeap::run_task(AbstractGangTask* task) { + ZAbstractGangTaskAdapter ztask(task); + _workers.run_parallel(&ztask); +} + void ZHeap::out_of_memory() { ResourceMark rm; --- old/src/hotspot/share/gc/z/zHeap.hpp 2020-07-29 15:26:49.535931438 +0800 +++ new/src/hotspot/share/gc/z/zHeap.hpp 2020-07-29 15:26:49.247931427 +0800 @@ -100,6 +100,7 @@ void set_boost_worker_threads(bool boost); void threads_do(ThreadClosure* tc) const; + void run_task(AbstractGangTask* task); // Reference processing ReferenceDiscoverer* reference_discoverer(); void set_soft_reference_policy(bool clear); --- old/src/hotspot/share/memory/heapInspection.cpp 2020-07-29 15:26:50.223931462 +0800 +++ new/src/hotspot/share/memory/heapInspection.cpp 2020-07-29 15:26:49.935931452 +0800 @@ -35,6 +35,7 @@ #include "memory/universe.hpp" #include "oops/oop.inline.hpp" #include "oops/reflectionAccessorImplKlassHelper.hpp" +#include "runtime/atomic.hpp" #include "runtime/os.hpp" #include "utilities/globalDefinitions.hpp" #include "utilities/macros.hpp" @@ -237,6 +238,41 @@ return _size_of_instances_in_words; } +// Return false if the entry could not be recorded on account +// of running out of space required to create a new entry. +bool KlassInfoTable::merge_entry(const KlassInfoEntry* cie) { + Klass* k = cie->klass(); + KlassInfoEntry* elt = lookup(k); + // elt may be NULL if it's a new klass for which we + // could not allocate space for a new entry in the hashtable. + if (elt != NULL) { + elt->set_count(elt->count() + cie->count()); + elt->set_words(elt->words() + cie->words()); + _size_of_instances_in_words += cie->words(); + return true; + } + return false; +} + +class KlassInfoTableMergeClosure : public KlassInfoClosure { +private: + KlassInfoTable* _dest; + bool _success; +public: + KlassInfoTableMergeClosure(KlassInfoTable* table) : _dest(table), _success(true) {} + void do_cinfo(KlassInfoEntry* cie) { + _success &= _dest->merge_entry(cie); + } + bool success() { return _success; } +}; + +// merge from table +bool KlassInfoTable::merge(KlassInfoTable* table) { + KlassInfoTableMergeClosure closure(this); + table->iterate(&closure); + return closure.success(); +} + int KlassInfoHisto::sort_helper(KlassInfoEntry** e1, KlassInfoEntry** e2) { return (*e1)->compare(*e1,*e2); } @@ -482,7 +518,7 @@ class RecordInstanceClosure : public ObjectClosure { private: KlassInfoTable* _cit; - size_t _missed_count; + uint _missed_count; BoolObjectClosure* _filter; public: RecordInstanceClosure(KlassInfoTable* cit, BoolObjectClosure* filter) : @@ -496,7 +532,7 @@ } } - size_t missed_count() { return _missed_count; } + uint missed_count() { return _missed_count; } private: bool should_visit(oop obj) { @@ -504,25 +540,70 @@ } }; -size_t HeapInspection::populate_table(KlassInfoTable* cit, BoolObjectClosure *filter) { - ResourceMark rm; +// Heap inspection for every worker. +// When native OOM hanppens for KlassInfoTable, set _success to false. +void ParHeapInspectTask::work(uint worker_id) { + uint missed_count = 0; + bool merge_success = true; + if (!Atomic::load(&_success)) { + // other worker has failed on parallel iteration. + return; + } + + KlassInfoTable cit(false); + if (cit.allocation_failed()) { + // fail to allocate memory, stop parallel mode + Atomic::store(&_success, false); + return; + } + RecordInstanceClosure ric(&cit, _filter); + _poi->object_iterate(&ric, worker_id); + missed_count = ric.missed_count(); + { + MutexLocker x(&_mutex); + merge_success = _shared_cit->merge(&cit); + } + if (merge_success) { + Atomic::add(&_missed_count, missed_count); + } else { + Atomic::store(&_success, false); + } +} +uint HeapInspection::populate_table(KlassInfoTable* cit, BoolObjectClosure *filter, uint parallel_thread_num) { + + // Try parallel first. + if (parallel_thread_num > 1) { + ResourceMark rm; + ParallelObjectIterator* poi = Universe::heap()->parallel_object_iterator(parallel_thread_num); + if (poi != NULL) { + ParHeapInspectTask task(poi, cit, filter); + Universe::heap()->run_task(&task); + delete poi; + if (task.success()) { + return task.missed_count(); + } + } + } + + ResourceMark rm; + // If no parallel iteration available, run serially. RecordInstanceClosure ric(cit, filter); Universe::heap()->object_iterate(&ric); return ric.missed_count(); } -void HeapInspection::heap_inspection(outputStream* st) { +void HeapInspection::heap_inspection(outputStream* st, uint parallel_thread_num) { ResourceMark rm; KlassInfoTable cit(false); if (!cit.allocation_failed()) { // populate table with object allocation info - size_t missed_count = populate_table(&cit); + uint missed_count = populate_table(&cit, NULL, parallel_thread_num); if (missed_count != 0) { - log_info(gc, classhisto)("WARNING: Ran out of C-heap; undercounted " SIZE_FORMAT + log_info(gc, classhisto)("WARNING: Ran out of C-heap; undercounted " UINTX_FORMAT " total instances in data below", - missed_count); + (uintx)missed_count); } // Sort and print klass instance info --- old/src/hotspot/share/memory/heapInspection.hpp 2020-07-29 15:26:50.911931487 +0800 +++ new/src/hotspot/share/memory/heapInspection.hpp 2020-07-29 15:26:50.623931476 +0800 @@ -30,6 +30,9 @@ #include "oops/oop.hpp" #include "oops/annotations.hpp" #include "utilities/macros.hpp" +#include "gc/shared/workgroup.hpp" + +class ParallelObjectIterator; #if INCLUDE_SERVICES @@ -122,6 +125,8 @@ void iterate(KlassInfoClosure* cic); bool allocation_failed() { return _buckets == NULL; } size_t size_of_instances_in_words() const; + bool merge(KlassInfoTable* table); + bool merge_entry(const KlassInfoEntry* cie); friend class KlassInfoHisto; friend class KlassHierarchy; @@ -211,11 +216,48 @@ class HeapInspection : public StackObj { public: - void heap_inspection(outputStream* st) NOT_SERVICES_RETURN; - size_t populate_table(KlassInfoTable* cit, BoolObjectClosure* filter = NULL) NOT_SERVICES_RETURN_(0); + void heap_inspection(outputStream* st, uint parallel_thread_num = 1) NOT_SERVICES_RETURN; + uint populate_table(KlassInfoTable* cit, BoolObjectClosure* filter = NULL, uint parallel_thread_num = 1) NOT_SERVICES_RETURN_(0); static void find_instances_at_safepoint(Klass* k, GrowableArray* result) NOT_SERVICES_RETURN; private: void iterate_over_heap(KlassInfoTable* cit, BoolObjectClosure* filter = NULL); }; +// Parallel heap inspection task. Parallel inspection can fail due to +// a native OOM when allocating memory for TL-KlassInfoTable. +// _success will be set false on an OOM, and serial inspection tried. +class ParHeapInspectTask : public AbstractGangTask { + private: + ParallelObjectIterator* _poi; + KlassInfoTable* _shared_cit; + BoolObjectClosure* _filter; + uint _missed_count; + bool _success; + Mutex _mutex; + + public: + ParHeapInspectTask(ParallelObjectIterator* poi, + KlassInfoTable* shared_cit, + BoolObjectClosure* filter) : + AbstractGangTask("Iterating heap"), + _poi(poi), + _shared_cit(shared_cit), + _filter(filter), + _missed_count(0), + _success(true), + _mutex(Mutex::leaf, "Parallel heap iteration data merge lock") {} + + uint missed_count() const { + return _missed_count; + } + + bool success() { + return _success; + } + + virtual void work(uint worker_id); +}; + + + #endif // SHARE_MEMORY_HEAPINSPECTION_HPP --- old/src/hotspot/share/runtime/arguments.hpp 2020-07-29 15:26:51.591931511 +0800 +++ new/src/hotspot/share/runtime/arguments.hpp 2020-07-29 15:26:51.299931500 +0800 @@ -449,12 +449,6 @@ static ArgsRange check_memory_size(julong size, julong min_size, julong max_size); static ArgsRange parse_memory_size(const char* s, julong* long_arg, julong min_size, julong max_size = max_uintx); - // Parse a string for a unsigned integer. Returns true if value - // is an unsigned integer greater than or equal to the minimum - // parameter passed and returns the value in uintx_arg. Returns - // false otherwise, with uintx_arg undefined. - static bool parse_uintx(const char* value, uintx* uintx_arg, - uintx min_size); // methods to build strings from individual args static void build_jvm_args(const char* arg); @@ -498,6 +492,12 @@ public: // Parses the arguments, first phase static jint parse(const JavaVMInitArgs* args); + // Parse a string for a unsigned integer. Returns true if value + // is an unsigned integer greater than or equal to the minimum + // parameter passed and returns the value in uintx_arg. Returns + // false otherwise, with uintx_arg undefined. + static bool parse_uintx(const char* value, uintx* uintx_arg, + uintx min_size); // Apply ergonomics static jint apply_ergo(); // Adjusts the arguments after the OS have adjusted the arguments --- old/src/hotspot/share/services/attachListener.cpp 2020-07-29 15:26:52.263931535 +0800 +++ new/src/hotspot/share/services/attachListener.cpp 2020-07-29 15:26:51.971931524 +0800 @@ -248,11 +248,13 @@ // Input arguments :- // arg0: "-live" or "-all" // arg1: Name of the dump file or NULL +// arg2: parallel thread number static jint heap_inspection(AttachOperation* op, outputStream* out) { bool live_objects_only = true; // default is true to retain the behavior before this change is made outputStream* os = out; // if path not specified or path is NULL, use out fileStream* fs = NULL; const char* arg0 = op->arg(0); + uint parallel_thread_num = MAX2(1, (uint)os::initial_active_processor_count() * 3 / 8); if (arg0 != NULL && (strlen(arg0) > 0)) { if (strcmp(arg0, "-all") != 0 && strcmp(arg0, "-live") != 0) { out->print_cr("Invalid argument to inspectheap operation: %s", arg0); @@ -262,21 +264,26 @@ } const char* path = op->arg(1); - if (path != NULL) { - if (path[0] == '\0') { - out->print_cr("No dump file specified"); - } else { - // create file - fs = new (ResourceObj::C_HEAP, mtInternal) fileStream(path); - if (fs == NULL) { - out->print_cr("Failed to allocate space for file: %s", path); - return JNI_ERR; - } - os = fs; + if (path != NULL && path[0] != '\0') { + // create file + fs = new (ResourceObj::C_HEAP, mtInternal) fileStream(path); + if (fs == NULL) { + out->print_cr("Failed to allocate space for file: %s", path); } + os = fs; } - VM_GC_HeapInspection heapop(os, live_objects_only /* request full gc */); + const char* num_str = op->arg(2); + if (num_str != NULL && num_str[0] != '\0') { + uintx num; + if (!Arguments::parse_uintx(num_str, &num, 0)) { + out->print_cr("Invalid parallel thread number: [%s]", num_str); + return JNI_ERR; + } + parallel_thread_num = num == 0 ? parallel_thread_num : (uint)num; + } + + VM_GC_HeapInspection heapop(os, live_objects_only /* request full gc */, parallel_thread_num); VMThread::execute(&heapop); if (os != NULL && os != out) { out->print_cr("Heap inspection file created: %s", path); --- old/src/jdk.jcmd/share/classes/sun/tools/jmap/JMap.java 2020-07-29 15:26:52.939931559 +0800 +++ new/src/jdk.jcmd/share/classes/sun/tools/jmap/JMap.java 2020-07-29 15:26:52.651931549 +0800 @@ -169,28 +169,41 @@ UnsupportedEncodingException { String liveopt = "-all"; String filename = null; + String parallel = null; String subopts[] = options.split(","); + boolean set_all = false; + boolean set_live = false; for (int i = 0; i < subopts.length; i++) { String subopt = subopts[i]; if (subopt.equals("") || subopt.equals("all")) { - // pass + set_all = true; + liveopt = "-all"; } else if (subopt.equals("live")) { + // Add '-' for compatibility. + set_live = true; liveopt = "-live"; } else if (subopt.startsWith("file=")) { filename = parseFileName(subopt); if (filename == null) { usage(1); // invalid options or no filename } + } else if (subopt.startsWith("parallel=")) { + parallel = subopt.substring("parallel=".length()); + if (parallel == null) { + usage(1); + } } else { usage(1); } } + if (set_live && set_all) { + usage(1); + } System.out.flush(); - // inspectHeap is not the same as jcmd GC.class_histogram - executeCommandForPid(pid, "inspectheap", liveopt, filename); + executeCommandForPid(pid, "inspectheap", liveopt, filename, parallel); } private static void dump(String pid, String options) @@ -287,6 +300,10 @@ System.err.println(" live count only live objects"); System.err.println(" all count all objects in the heap (default if one of \"live\" or \"all\" is not specified)"); System.err.println(" file= dump data to "); + System.err.println(" parallel= parallel threads number for heap iteration:"); + System.err.println(" parallel=0 default behavior, use predefined number of threads"); + System.err.println(" parallel=1 disable parallel heap iteration"); + System.err.println(" parallel= use N threads for parallel heap iteration"); System.err.println(""); System.err.println(" Example: jmap -histo:live,file=/tmp/histo.data "); System.exit(exit); --- old/test/jdk/sun/tools/jmap/BasicJMapTest.java 2020-07-29 15:26:53.611931583 +0800 +++ new/test/jdk/sun/tools/jmap/BasicJMapTest.java 2020-07-29 15:26:53.327931573 +0800 @@ -79,40 +79,68 @@ output.shouldHaveExitValue(0); } + private static void testHistoParallelZero() throws Exception { + OutputAnalyzer output = jmap("-histo:parallel=0"); + output.shouldHaveExitValue(0); + } + + private static void testHistoParallel() throws Exception { + OutputAnalyzer output = jmap("-histo:parallel=2"); + output.shouldHaveExitValue(0); + } + + private static void testHistoNonParallel() throws Exception { + OutputAnalyzer output = jmap("-histo:parallel=1"); + output.shouldHaveExitValue(0); + } + private static void testHistoToFile() throws Exception { - histoToFile(false); + histoToFile(false, false, 1); } private static void testHistoLiveToFile() throws Exception { - histoToFile(true); + histoToFile(true, false, 1); } private static void testHistoAllToFile() throws Exception { - boolean explicitAll = true; - histoToFile(false, explicitAll); + histoToFile(false, true, 1); } - private static void histoToFile(boolean live) throws Exception { - boolean explicitAll = false; - histoToFile(live, explicitAll); + private static void testHistoFileParallelZero() throws Exception { + histoToFile(false, false, 0); } - private static void histoToFile(boolean live, boolean explicitAll) throws Exception { - if (live == true && explicitAll == true) { + private static void testHistoFileParallel() throws Exception { + histoToFile(false, false, 2); + } + + private static void histoToFile(boolean live, + boolean explicitAll, + int parallelThreadNum) throws Exception { + String liveArg = ""; + String fileArg = ""; + String parArg = "parallel=" + parallelThreadNum; + String allArgs = "-histo:"; + + if (live && explicitAll) { fail("Illegal argument setting for jmap -histo"); } + if (live) { + liveArg = "live,"; + } + if (explicitAll) { + liveArg = "all,"; + } + File file = new File("jmap.histo.file" + System.currentTimeMillis() + ".histo"); if (file.exists()) { file.delete(); } + fileArg = "file=" + file.getName(); + OutputAnalyzer output; - if (live) { - output = jmap("-histo:live,file=" + file.getName()); - } else if (explicitAll == true) { - output = jmap("-histo:all,file=" + file.getName()); - } else { - output = jmap("-histo:file=" + file.getName()); - } + allArgs = allArgs + liveArg + fileArg + ',' + parArg; + output = jmap(allArgs); output.shouldHaveExitValue(0); output.shouldContain("Heap inspection file created"); file.delete(); @@ -129,43 +157,45 @@ } private static void testDump() throws Exception { - dump(false); + dump(false, false); } private static void testDumpLive() throws Exception { - dump(true); + dump(true, false); } private static void testDumpAll() throws Exception { - boolean explicitAll = true; - dump(false, explicitAll); - } - - private static void dump(boolean live) throws Exception { - boolean explicitAll = false; - dump(live, explicitAll); + dump(false, true); } private static void dump(boolean live, boolean explicitAll) throws Exception { - if (live == true && explicitAll == true) { - fail("Illegal argument setting for jmap -dump"); - } - File dump = new File("jmap.dump." + System.currentTimeMillis() + ".hprof"); - if (dump.exists()) { - dump.delete(); + String liveArg = ""; + String fileArg = ""; + String allArgs = "-dump:"; + + if (live && explicitAll) { + fail("Illegal argument setting for jmap -dump"); } - OutputAnalyzer output; if (live) { - output = jmap("-dump:live,format=b,file=" + dump.getName()); - } else if (explicitAll == true) { - output = jmap("-dump:all,format=b,file=" + dump.getName()); - } else { - output = jmap("-dump:format=b,file=" + dump.getName()); + liveArg = "live,"; + } + if (explicitAll) { + liveArg = "all,"; } + + File file = new File("jmap.dump" + System.currentTimeMillis() + ".hprof"); + if (file.exists()) { + file.delete(); + } + fileArg = "file=" + file.getName(); + + OutputAnalyzer output; + allArgs = allArgs + liveArg + "format=b," + fileArg; + output = jmap(allArgs); output.shouldHaveExitValue(0); output.shouldContain("Heap dump file created"); - verifyDumpFile(dump); - dump.delete(); + verifyDumpFile(file); + file.delete(); } private static void verifyDumpFile(File dump) { @@ -195,5 +225,4 @@ return output; } - }