--- old/src/hotspot/share/gc/epsilon/epsilonHeap.hpp 2020-08-11 22:49:16.303976305 +0800 +++ new/src/hotspot/share/gc/epsilon/epsilonHeap.hpp 2020-08-11 22:49:16.139976830 +0800 @@ -119,6 +119,10 @@ // No GC threads virtual void gc_threads_do(ThreadClosure* tc) const {} + // Runs the given AbstractGangTask with the current active workers + // No workGang for EpsilonHeap, work serially with thread 0 + virtual void run_task(AbstractGangTask* task) { task->work(0); } + // No nmethod handling virtual void register_nmethod(nmethod* nm) {} virtual void unregister_nmethod(nmethod* nm) {} --- old/src/hotspot/share/gc/g1/g1CollectedHeap.cpp 2020-08-11 22:49:17.011974037 +0800 +++ new/src/hotspot/share/gc/g1/g1CollectedHeap.cpp 2020-08-11 22:49:16.843974576 +0800 @@ -89,6 +89,7 @@ #include "logging/log.hpp" #include "memory/allocation.hpp" #include "memory/iterator.hpp" +#include "memory/heapInspection.hpp" #include "memory/resourceArea.hpp" #include "memory/universe.hpp" #include "oops/access.inline.hpp" @@ -161,9 +162,13 @@ reset_from_card_cache(start_idx, num_regions); } -Tickspan G1CollectedHeap::run_task(AbstractGangTask* task) { - Ticks start = Ticks::now(); +void G1CollectedHeap::run_task(AbstractGangTask* task) { workers()->run_task(task, workers()->active_workers()); +} + +Tickspan G1CollectedHeap::run_task_timed(AbstractGangTask* task) { + Ticks start = Ticks::now(); + run_task(task); return Ticks::now() - start; } @@ -2301,6 +2306,30 @@ heap_region_iterate(&blk); } +class G1ParallelObjectIterator : public ParallelObjectIterator { +private: + G1CollectedHeap* _heap; + HeapRegionClaimer _claimer; + +public: + G1ParallelObjectIterator(uint thread_num) : + _heap(G1CollectedHeap::heap()), + _claimer(thread_num == 0 ? G1CollectedHeap::heap()->workers()->active_workers() : thread_num) {} + + virtual void object_iterate(ObjectClosure* cl, uint worker_id) { + _heap->object_iterate_parallel(cl, worker_id, &_claimer); + } +}; + +ParallelObjectIterator* G1CollectedHeap::parallel_object_iterator(uint thread_num) { + return new G1ParallelObjectIterator(thread_num); +} + +void G1CollectedHeap::object_iterate_parallel(ObjectClosure* cl, uint worker_id, HeapRegionClaimer* claimer) { + IterateObjectClosureRegionClosure blk(cl); + heap_region_par_iterate_from_worker_offset(&blk, claimer, worker_id); +} + void G1CollectedHeap::keep_alive(oop obj) { G1BarrierSet::enqueue(obj); } @@ -3694,7 +3723,7 @@ { G1PrepareEvacuationTask g1_prep_task(this); - Tickspan task_time = run_task(&g1_prep_task); + Tickspan task_time = run_task_timed(&g1_prep_task); phase_times()->record_register_regions(task_time.seconds() * 1000.0, g1_prep_task.humongous_total(), @@ -3843,7 +3872,7 @@ { G1RootProcessor root_processor(this, num_workers); G1EvacuateRegionsTask g1_par_task(this, per_thread_states, _task_queues, &root_processor, num_workers); - task_time = run_task(&g1_par_task); + task_time = run_task_timed(&g1_par_task); // Closing the inner scope will execute the destructor for the G1RootProcessor object. // To extract its code root fixup time we measure total time of this scope and // subtract from the time the WorkGang task took. @@ -3882,7 +3911,7 @@ { G1MarkScope code_mark_scope; G1EvacuateOptionalRegionsTask task(per_thread_states, _task_queues, workers()->active_workers()); - task_time = run_task(&task); + task_time = run_task_timed(&task); // See comment in evacuate_collection_set() for the reason of the scope. } Tickspan total_processing = Ticks::now() - start_processing; --- old/src/hotspot/share/gc/g1/g1CollectedHeap.hpp 2020-08-11 22:49:17.903971178 +0800 +++ new/src/hotspot/share/gc/g1/g1CollectedHeap.hpp 2020-08-11 22:49:17.723971756 +0800 @@ -551,9 +551,12 @@ WorkGang* workers() const { return _workers; } - // Runs the given AbstractGangTask with the current active workers, returning the - // total time taken. - Tickspan run_task(AbstractGangTask* task); + // Runs the given AbstractGangTask with the current active workers. + virtual void run_task(AbstractGangTask* task); + + // Runs the given AbstractGangTask with the current active workers, + // returning the total time taken. + Tickspan run_task_timed(AbstractGangTask* task); G1Allocator* allocator() { return _allocator; @@ -1173,9 +1176,13 @@ // Iteration functions. + void object_iterate_parallel(ObjectClosure* cl, uint worker_id, HeapRegionClaimer* claimer); + // Iterate over all objects, calling "cl.do_object" on each. virtual void object_iterate(ObjectClosure* cl); + virtual ParallelObjectIterator* parallel_object_iterator(uint thread_num); + // Keep alive an object that was loaded with AS_NO_KEEPALIVE. virtual void keep_alive(oop obj); --- old/src/hotspot/share/gc/parallel/parallelScavengeHeap.cpp 2020-08-11 22:49:18.779968373 +0800 +++ new/src/hotspot/share/gc/parallel/parallelScavengeHeap.cpp 2020-08-11 22:49:18.595968962 +0800 @@ -539,7 +539,6 @@ old_gen()->object_iterate(cl); } - HeapWord* ParallelScavengeHeap::block_start(const void* addr) const { if (young_gen()->is_in_reserved(addr)) { assert(young_gen()->is_in(addr), @@ -611,6 +610,10 @@ ParallelScavengeHeap::heap()->workers().threads_do(tc); } +void ParallelScavengeHeap::run_task(AbstractGangTask* task) { + _workers.run_task(task); +} + void ParallelScavengeHeap::print_tracing_info() const { AdaptiveSizePolicyOutput::print(); log_debug(gc, heap, exit)("Accumulated young generation GC time %3.7f secs", PSScavenge::accumulated_time()->seconds()); --- old/src/hotspot/share/gc/parallel/parallelScavengeHeap.hpp 2020-08-11 22:49:19.639965619 +0800 +++ new/src/hotspot/share/gc/parallel/parallelScavengeHeap.hpp 2020-08-11 22:49:19.479966131 +0800 @@ -218,6 +218,8 @@ virtual void print_on(outputStream* st) const; virtual void print_on_error(outputStream* st) const; virtual void gc_threads_do(ThreadClosure* tc) const; + // Runs the given AbstractGangTask with the current active workers. + virtual void run_task(AbstractGangTask* task); virtual void print_tracing_info() const; virtual WorkGang* get_safepoint_workers() { return &_workers; } --- old/src/hotspot/share/gc/serial/serialHeap.cpp 2020-08-11 22:49:20.459962992 +0800 +++ new/src/hotspot/share/gc/serial/serialHeap.cpp 2020-08-11 22:49:20.291963529 +0800 @@ -87,3 +87,8 @@ memory_pools.append(_old_pool); return memory_pools; } + +// No workGang for SerialHeap, work serially with thread 0. +void SerialHeap::run_task(AbstractGangTask* task) { + task->work(0); +} --- old/src/hotspot/share/gc/serial/serialHeap.hpp 2020-08-11 22:49:21.319960237 +0800 +++ new/src/hotspot/share/gc/serial/serialHeap.hpp 2020-08-11 22:49:21.139960814 +0800 @@ -75,6 +75,10 @@ template void oop_since_save_marks_iterate(OopClosureType1* cur, OopClosureType2* older); + + // Runs the given AbstractGangTask with the current active workers. + // No workGang for SerialHeap, work serially with thread 0. + virtual void run_task(AbstractGangTask* task); }; #endif // SHARE_GC_SERIAL_SERIALHEAP_HPP --- old/src/hotspot/share/gc/shared/collectedHeap.hpp 2020-08-11 22:49:22.171957509 +0800 +++ new/src/hotspot/share/gc/shared/collectedHeap.hpp 2020-08-11 22:49:22.011958021 +0800 @@ -29,6 +29,7 @@ #include "gc/shared/gcWhen.hpp" #include "gc/shared/verifyOption.hpp" #include "memory/allocation.hpp" +#include "memory/heapInspection.hpp" #include "memory/universe.hpp" #include "runtime/handles.hpp" #include "runtime/perfData.hpp" @@ -44,6 +45,7 @@ // class defines the functions that a heap must implement, and contains // infrastructure common to all heaps. +class AbstractGangTask; class AdaptiveSizePolicy; class BarrierSet; class GCHeapSummary; @@ -85,6 +87,11 @@ } }; +class ParallelObjectIterator : public CHeapObj { +public: + virtual void object_iterate(ObjectClosure* cl, uint worker_id) = 0; +}; + // // CollectedHeap // GenCollectedHeap @@ -407,6 +414,10 @@ // Iterate over all objects, calling "cl.do_object" on each. virtual void object_iterate(ObjectClosure* cl) = 0; + virtual ParallelObjectIterator* parallel_object_iterator(uint thread_num) { + return NULL; + } + // Keep alive an object that was loaded with AS_NO_KEEPALIVE. virtual void keep_alive(oop obj) {} @@ -456,6 +467,9 @@ // Iterator for all GC threads (other than VM thread) virtual void gc_threads_do(ThreadClosure* tc) const = 0; + // Run given task. Possibly in parallel if the GC supports it. + virtual void run_task(AbstractGangTask* task) = 0; + // Print any relevant tracing info that flags imply. // Default implementation does nothing. virtual void print_tracing_info() const = 0; --- old/src/hotspot/share/gc/shared/gcVMOperations.cpp 2020-08-11 22:49:23.015954805 +0800 +++ new/src/hotspot/share/gc/shared/gcVMOperations.cpp 2020-08-11 22:49:22.831955395 +0800 @@ -149,7 +149,7 @@ } } HeapInspection inspect; - inspect.heap_inspection(_out); + inspect.heap_inspection(_out, _parallel_thread_num); } --- old/src/hotspot/share/gc/shared/gcVMOperations.hpp 2020-08-11 22:49:23.899951975 +0800 +++ new/src/hotspot/share/gc/shared/gcVMOperations.hpp 2020-08-11 22:49:23.723952539 +0800 @@ -125,12 +125,15 @@ private: outputStream* _out; bool _full_gc; + uint _parallel_thread_num; public: - VM_GC_HeapInspection(outputStream* out, bool request_full_gc) : + VM_GC_HeapInspection(outputStream* out, bool request_full_gc, + uint parallel_thread_num = 1) : VM_GC_Operation(0 /* total collections, dummy, ignored */, GCCause::_heap_inspection /* GC Cause */, 0 /* total full collections, dummy, ignored */, - request_full_gc), _out(out), _full_gc(request_full_gc) {} + request_full_gc), _out(out), _full_gc(request_full_gc), + _parallel_thread_num(parallel_thread_num) {} ~VM_GC_HeapInspection() {} virtual VMOp_Type type() const { return VMOp_GC_HeapInspection; } --- old/src/hotspot/share/gc/shenandoah/shenandoahHeap.cpp 2020-08-11 22:49:24.735949298 +0800 +++ new/src/hotspot/share/gc/shenandoah/shenandoahHeap.cpp 2020-08-11 22:49:24.567949836 +0800 @@ -1195,6 +1195,10 @@ } } +void ShenandoahHeap::run_task(AbstractGangTask* task) { + workers()->run_task(task, workers()->active_workers()); +} + void ShenandoahHeap::print_tracing_info() const { LogTarget(Info, gc, stats) lt; if (lt.is_enabled()) { --- old/src/hotspot/share/gc/shenandoah/shenandoahHeap.hpp 2020-08-11 22:49:25.627946443 +0800 +++ new/src/hotspot/share/gc/shenandoah/shenandoahHeap.hpp 2020-08-11 22:49:25.451947005 +0800 @@ -198,6 +198,8 @@ WorkGang* get_safepoint_workers(); void gc_threads_do(ThreadClosure* tcl) const; + // Runs the given AbstractGangTask with the current active workers. + virtual void run_task(AbstractGangTask* task); // ---------- Heap regions handling machinery // --- old/src/hotspot/share/gc/z/zCollectedHeap.cpp 2020-08-11 22:49:26.431943867 +0800 +++ new/src/hotspot/share/gc/z/zCollectedHeap.cpp 2020-08-11 22:49:26.267944393 +0800 @@ -253,6 +253,10 @@ _heap.object_iterate(cl, true /* visit_weaks */); } +void ZCollectedHeap::run_task(AbstractGangTask* task) { + return _heap.run_task(task); +} + void ZCollectedHeap::keep_alive(oop obj) { _heap.keep_alive(obj); } --- old/src/hotspot/share/gc/z/zCollectedHeap.hpp 2020-08-11 22:49:27.247941255 +0800 +++ new/src/hotspot/share/gc/z/zCollectedHeap.hpp 2020-08-11 22:49:27.067941831 +0800 @@ -98,6 +98,8 @@ virtual void object_iterate(ObjectClosure* cl); + virtual void run_task(AbstractGangTask* task); + virtual void keep_alive(oop obj); virtual void register_nmethod(nmethod* nm); --- old/src/hotspot/share/gc/z/zHeap.cpp 2020-08-11 22:49:28.079938592 +0800 +++ new/src/hotspot/share/gc/z/zHeap.cpp 2020-08-11 22:49:27.907939142 +0800 @@ -35,6 +35,7 @@ #include "gc/z/zRelocationSetSelector.inline.hpp" #include "gc/z/zResurrection.hpp" #include "gc/z/zStat.hpp" +#include "gc/z/zTask.hpp" #include "gc/z/zThread.inline.hpp" #include "gc/z/zVerify.hpp" #include "gc/z/zWorkers.inline.hpp" @@ -185,6 +186,26 @@ _workers.threads_do(tc); } +// Adapter class from AbstractGangTask to Ztask +class ZAbstractGangTaskAdapter : public ZTask { +private: + AbstractGangTask* _task; + +public: + ZAbstractGangTaskAdapter(AbstractGangTask* task) : + ZTask(task->name()), + _task(task) { } + + virtual void work() { + _task->work(ZThread::worker_id()); + } +}; + +void ZHeap::run_task(AbstractGangTask* task) { + ZAbstractGangTaskAdapter ztask(task); + _workers.run_parallel(&ztask); +} + void ZHeap::out_of_memory() { ResourceMark rm; --- old/src/hotspot/share/gc/z/zHeap.hpp 2020-08-11 22:49:28.911935928 +0800 +++ new/src/hotspot/share/gc/z/zHeap.hpp 2020-08-11 22:49:28.731936504 +0800 @@ -98,6 +98,7 @@ uint nconcurrent_no_boost_worker_threads() const; void set_boost_worker_threads(bool boost); void threads_do(ThreadClosure* tc) const; + void run_task(AbstractGangTask* task); // Reference processing ReferenceDiscoverer* reference_discoverer(); --- old/src/hotspot/share/memory/heapInspection.cpp 2020-08-11 22:49:29.763933201 +0800 +++ new/src/hotspot/share/memory/heapInspection.cpp 2020-08-11 22:49:29.603933714 +0800 @@ -35,6 +35,7 @@ #include "memory/universe.hpp" #include "oops/oop.inline.hpp" #include "oops/reflectionAccessorImplKlassHelper.hpp" +#include "runtime/atomic.hpp" #include "runtime/os.hpp" #include "utilities/globalDefinitions.hpp" #include "utilities/macros.hpp" @@ -237,6 +238,41 @@ return _size_of_instances_in_words; } +// Return false if the entry could not be recorded on account +// of running out of space required to create a new entry. +bool KlassInfoTable::merge_entry(const KlassInfoEntry* cie) { + Klass* k = cie->klass(); + KlassInfoEntry* elt = lookup(k); + // elt may be NULL if it's a new klass for which we + // could not allocate space for a new entry in the hashtable. + if (elt != NULL) { + elt->set_count(elt->count() + cie->count()); + elt->set_words(elt->words() + cie->words()); + _size_of_instances_in_words += cie->words(); + return true; + } + return false; +} + +class KlassInfoTableMergeClosure : public KlassInfoClosure { +private: + KlassInfoTable* _dest; + bool _success; +public: + KlassInfoTableMergeClosure(KlassInfoTable* table) : _dest(table), _success(true) {} + void do_cinfo(KlassInfoEntry* cie) { + _success &= _dest->merge_entry(cie); + } + bool success() { return _success; } +}; + +// merge from table +bool KlassInfoTable::merge(KlassInfoTable* table) { + KlassInfoTableMergeClosure closure(this); + table->iterate(&closure); + return closure.success(); +} + int KlassInfoHisto::sort_helper(KlassInfoEntry** e1, KlassInfoEntry** e2) { return (*e1)->compare(*e1,*e2); } @@ -482,7 +518,7 @@ class RecordInstanceClosure : public ObjectClosure { private: KlassInfoTable* _cit; - size_t _missed_count; + uintx _missed_count; BoolObjectClosure* _filter; public: RecordInstanceClosure(KlassInfoTable* cit, BoolObjectClosure* filter) : @@ -496,7 +532,7 @@ } } - size_t missed_count() { return _missed_count; } + uintx missed_count() { return _missed_count; } private: bool should_visit(oop obj) { @@ -504,23 +540,68 @@ } }; -size_t HeapInspection::populate_table(KlassInfoTable* cit, BoolObjectClosure *filter) { - ResourceMark rm; +// Heap inspection for every worker. +// When native OOM hanppens for KlassInfoTable, set _success to false. +void ParHeapInspectTask::work(uint worker_id) { + uintx missed_count = 0; + bool merge_success = true; + if (!Atomic::load(&_success)) { + // other worker has failed on parallel iteration. + return; + } + + KlassInfoTable cit(false); + if (cit.allocation_failed()) { + // fail to allocate memory, stop parallel mode + Atomic::store(&_success, false); + return; + } + RecordInstanceClosure ric(&cit, _filter); + _poi->object_iterate(&ric, worker_id); + missed_count = ric.missed_count(); + { + MutexLocker x(&_mutex); + merge_success = _shared_cit->merge(&cit); + } + if (merge_success) { + Atomic::add(&_missed_count, missed_count); + } else { + Atomic::store(&_success, false); + } +} +uintx HeapInspection::populate_table(KlassInfoTable* cit, BoolObjectClosure *filter, uint parallel_thread_num) { + + // Try parallel first. + if (parallel_thread_num > 1) { + ResourceMark rm; + ParallelObjectIterator* poi = Universe::heap()->parallel_object_iterator(parallel_thread_num); + if (poi != NULL) { + ParHeapInspectTask task(poi, cit, filter); + Universe::heap()->run_task(&task); + delete poi; + if (task.success()) { + return task.missed_count(); + } + } + } + + ResourceMark rm; + // If no parallel iteration available, run serially. RecordInstanceClosure ric(cit, filter); Universe::heap()->object_iterate(&ric); return ric.missed_count(); } -void HeapInspection::heap_inspection(outputStream* st) { +void HeapInspection::heap_inspection(outputStream* st, uint parallel_thread_num) { ResourceMark rm; KlassInfoTable cit(false); if (!cit.allocation_failed()) { // populate table with object allocation info - size_t missed_count = populate_table(&cit); + uintx missed_count = populate_table(&cit, NULL, parallel_thread_num); if (missed_count != 0) { - log_info(gc, classhisto)("WARNING: Ran out of C-heap; undercounted " SIZE_FORMAT + log_info(gc, classhisto)("WARNING: Ran out of C-heap; undercounted " UINTX_FORMAT " total instances in data below", missed_count); } --- old/src/hotspot/share/memory/heapInspection.hpp 2020-08-11 22:49:30.643930385 +0800 +++ new/src/hotspot/share/memory/heapInspection.hpp 2020-08-11 22:49:30.467930949 +0800 @@ -30,6 +30,9 @@ #include "oops/oop.hpp" #include "oops/annotations.hpp" #include "utilities/macros.hpp" +#include "gc/shared/workgroup.hpp" + +class ParallelObjectIterator; #if INCLUDE_SERVICES @@ -122,6 +125,8 @@ void iterate(KlassInfoClosure* cic); bool allocation_failed() { return _buckets == NULL; } size_t size_of_instances_in_words() const; + bool merge(KlassInfoTable* table); + bool merge_entry(const KlassInfoEntry* cie); friend class KlassInfoHisto; friend class KlassHierarchy; @@ -211,11 +216,46 @@ class HeapInspection : public StackObj { public: - void heap_inspection(outputStream* st) NOT_SERVICES_RETURN; - size_t populate_table(KlassInfoTable* cit, BoolObjectClosure* filter = NULL) NOT_SERVICES_RETURN_(0); + void heap_inspection(outputStream* st, uint parallel_thread_num = 1) NOT_SERVICES_RETURN; + uintx populate_table(KlassInfoTable* cit, BoolObjectClosure* filter = NULL, uint parallel_thread_num = 1) NOT_SERVICES_RETURN_(0); static void find_instances_at_safepoint(Klass* k, GrowableArray* result) NOT_SERVICES_RETURN; private: void iterate_over_heap(KlassInfoTable* cit, BoolObjectClosure* filter = NULL); }; +// Parallel heap inspection task. Parallel inspection can fail due to +// a native OOM when allocating memory for TL-KlassInfoTable. +// _success will be set false on an OOM, and serial inspection tried. +class ParHeapInspectTask : public AbstractGangTask { + private: + ParallelObjectIterator* _poi; + KlassInfoTable* _shared_cit; + BoolObjectClosure* _filter; + uintx _missed_count; + bool _success; + Mutex _mutex; + + public: + ParHeapInspectTask(ParallelObjectIterator* poi, + KlassInfoTable* shared_cit, + BoolObjectClosure* filter) : + AbstractGangTask("Iterating heap"), + _poi(poi), + _shared_cit(shared_cit), + _filter(filter), + _missed_count(0), + _success(true), + _mutex(Mutex::leaf, "Parallel heap iteration data merge lock") {} + + uintx missed_count() const { + return _missed_count; + } + + bool success() { + return _success; + } + + virtual void work(uint worker_id); +}; + #endif // SHARE_MEMORY_HEAPINSPECTION_HPP --- old/src/hotspot/share/runtime/arguments.hpp 2020-08-11 22:49:31.451927798 +0800 +++ new/src/hotspot/share/runtime/arguments.hpp 2020-08-11 22:49:31.291928310 +0800 @@ -449,12 +449,6 @@ static ArgsRange check_memory_size(julong size, julong min_size, julong max_size); static ArgsRange parse_memory_size(const char* s, julong* long_arg, julong min_size, julong max_size = max_uintx); - // Parse a string for a unsigned integer. Returns true if value - // is an unsigned integer greater than or equal to the minimum - // parameter passed and returns the value in uintx_arg. Returns - // false otherwise, with uintx_arg undefined. - static bool parse_uintx(const char* value, uintx* uintx_arg, - uintx min_size); // methods to build strings from individual args static void build_jvm_args(const char* arg); @@ -498,6 +492,12 @@ public: // Parses the arguments, first phase static jint parse(const JavaVMInitArgs* args); + // Parse a string for a unsigned integer. Returns true if value + // is an unsigned integer greater than or equal to the minimum + // parameter passed and returns the value in uintx_arg. Returns + // false otherwise, with uintx_arg undefined. + static bool parse_uintx(const char* value, uintx* uintx_arg, + uintx min_size); // Apply ergonomics static jint apply_ergo(); // Adjusts the arguments after the OS have adjusted the arguments --- old/src/hotspot/share/services/attachListener.cpp 2020-08-11 22:49:32.335924969 +0800 +++ new/src/hotspot/share/services/attachListener.cpp 2020-08-11 22:49:32.163925519 +0800 @@ -248,11 +248,13 @@ // Input arguments :- // arg0: "-live" or "-all" // arg1: Name of the dump file or NULL +// arg2: parallel thread number static jint heap_inspection(AttachOperation* op, outputStream* out) { bool live_objects_only = true; // default is true to retain the behavior before this change is made outputStream* os = out; // if path not specified or path is NULL, use out fileStream* fs = NULL; const char* arg0 = op->arg(0); + uint parallel_thread_num = MAX2(1, (uint)os::initial_active_processor_count() * 3 / 8); if (arg0 != NULL && (strlen(arg0) > 0)) { if (strcmp(arg0, "-all") != 0 && strcmp(arg0, "-live") != 0) { out->print_cr("Invalid argument to inspectheap operation: %s", arg0); @@ -262,21 +264,26 @@ } const char* path = op->arg(1); - if (path != NULL) { - if (path[0] == '\0') { - out->print_cr("No dump file specified"); - } else { - // create file - fs = new (ResourceObj::C_HEAP, mtInternal) fileStream(path); - if (fs == NULL) { - out->print_cr("Failed to allocate space for file: %s", path); - return JNI_ERR; - } - os = fs; + if (path != NULL && path[0] != '\0') { + // create file + fs = new (ResourceObj::C_HEAP, mtInternal) fileStream(path); + if (fs == NULL) { + out->print_cr("Failed to allocate space for file: %s", path); } + os = fs; } - VM_GC_HeapInspection heapop(os, live_objects_only /* request full gc */); + const char* num_str = op->arg(2); + if (num_str != NULL && num_str[0] != '\0') { + uintx num; + if (!Arguments::parse_uintx(num_str, &num, 0)) { + out->print_cr("Invalid parallel thread number: [%s]", num_str); + return JNI_ERR; + } + parallel_thread_num = num == 0 ? parallel_thread_num : (uint)num; + } + + VM_GC_HeapInspection heapop(os, live_objects_only /* request full gc */, parallel_thread_num); VMThread::execute(&heapop); if (os != NULL && os != out) { out->print_cr("Heap inspection file created: %s", path); --- old/src/jdk.jcmd/share/classes/sun/tools/jmap/JMap.java 2020-08-11 22:49:33.063922639 +0800 +++ new/src/jdk.jcmd/share/classes/sun/tools/jmap/JMap.java 2020-08-11 22:49:32.887923202 +0800 @@ -169,6 +169,7 @@ UnsupportedEncodingException { String liveopt = "-all"; String filename = null; + String parallel = null; String subopts[] = options.split(","); for (int i = 0; i < subopts.length; i++) { @@ -180,9 +181,17 @@ } else if (subopt.startsWith("file=")) { filename = parseFileName(subopt); if (filename == null) { - usage(1); // invalid options or no filename + System.err.println("Fail: invalid option or no file name '" + subopt +"'"); + usage(1); } + } else if (subopt.startsWith("parallel=")) { + parallel = subopt.substring("parallel=".length()); + if (parallel == null) { + System.err.println("Fail: no number provided in option: '" + subopt + "'"); + usage(1); + } } else { + System.err.println("Fail: invalid option: '" + subopt + "'"); usage(1); } } @@ -190,7 +199,7 @@ System.out.flush(); // inspectHeap is not the same as jcmd GC.class_histogram - executeCommandForPid(pid, "inspectheap", liveopt, filename); + executeCommandForPid(pid, "inspectheap", liveopt, filename, parallel); } private static void dump(String pid, String options) @@ -211,7 +220,8 @@ } if (filename == null) { - usage(1); // invalid options or no filename + System.err.println("Fail: invalid option or no file name"); + usage(1); } // dumpHeap is not the same as jcmd GC.heap_dump @@ -287,6 +297,10 @@ System.err.println(" live count only live objects"); System.err.println(" all count all objects in the heap (default if one of \"live\" or \"all\" is not specified)"); System.err.println(" file= dump data to "); + System.err.println(" parallel= parallel threads number for heap iteration:"); + System.err.println(" parallel=0 default behavior, use predefined number of threads"); + System.err.println(" parallel=1 disable parallel heap iteration"); + System.err.println(" parallel= use N threads for parallel heap iteration"); System.err.println(""); System.err.println(" Example: jmap -histo:live,file=/tmp/histo.data "); System.exit(exit); --- old/test/jdk/sun/tools/jmap/BasicJMapTest.java 2020-08-11 22:49:33.735920489 +0800 +++ new/test/jdk/sun/tools/jmap/BasicJMapTest.java 2020-08-11 22:49:33.575921001 +0800 @@ -79,40 +79,68 @@ output.shouldHaveExitValue(0); } + private static void testHistoParallelZero() throws Exception { + OutputAnalyzer output = jmap("-histo:parallel=0"); + output.shouldHaveExitValue(0); + } + + private static void testHistoParallel() throws Exception { + OutputAnalyzer output = jmap("-histo:parallel=2"); + output.shouldHaveExitValue(0); + } + + private static void testHistoNonParallel() throws Exception { + OutputAnalyzer output = jmap("-histo:parallel=1"); + output.shouldHaveExitValue(0); + } + private static void testHistoToFile() throws Exception { - histoToFile(false); + histoToFile(false, false, 1); } private static void testHistoLiveToFile() throws Exception { - histoToFile(true); + histoToFile(true, false, 1); } private static void testHistoAllToFile() throws Exception { - boolean explicitAll = true; - histoToFile(false, explicitAll); + histoToFile(false, true, 1); } - private static void histoToFile(boolean live) throws Exception { - boolean explicitAll = false; - histoToFile(live, explicitAll); + private static void testHistoFileParallelZero() throws Exception { + histoToFile(false, false, 0); } - private static void histoToFile(boolean live, boolean explicitAll) throws Exception { - if (live == true && explicitAll == true) { + private static void testHistoFileParallel() throws Exception { + histoToFile(false, false, 2); + } + + private static void histoToFile(boolean live, + boolean explicitAll, + int parallelThreadNum) throws Exception { + String liveArg = ""; + String fileArg = ""; + String parArg = "parallel=" + parallelThreadNum; + String allArgs = "-histo:"; + + if (live && explicitAll) { fail("Illegal argument setting for jmap -histo"); } + if (live) { + liveArg = "live,"; + } + if (explicitAll) { + liveArg = "all,"; + } + File file = new File("jmap.histo.file" + System.currentTimeMillis() + ".histo"); if (file.exists()) { file.delete(); } + fileArg = "file=" + file.getName(); + OutputAnalyzer output; - if (live) { - output = jmap("-histo:live,file=" + file.getName()); - } else if (explicitAll == true) { - output = jmap("-histo:all,file=" + file.getName()); - } else { - output = jmap("-histo:file=" + file.getName()); - } + allArgs = allArgs + liveArg + fileArg + ',' + parArg; + output = jmap(allArgs); output.shouldHaveExitValue(0); output.shouldContain("Heap inspection file created"); file.delete(); @@ -129,43 +157,45 @@ } private static void testDump() throws Exception { - dump(false); + dump(false, false); } private static void testDumpLive() throws Exception { - dump(true); + dump(true, false); } private static void testDumpAll() throws Exception { - boolean explicitAll = true; - dump(false, explicitAll); - } - - private static void dump(boolean live) throws Exception { - boolean explicitAll = false; - dump(live, explicitAll); + dump(false, true); } private static void dump(boolean live, boolean explicitAll) throws Exception { - if (live == true && explicitAll == true) { - fail("Illegal argument setting for jmap -dump"); - } - File dump = new File("jmap.dump." + System.currentTimeMillis() + ".hprof"); - if (dump.exists()) { - dump.delete(); + String liveArg = ""; + String fileArg = ""; + String allArgs = "-dump:"; + + if (live && explicitAll) { + fail("Illegal argument setting for jmap -dump"); } - OutputAnalyzer output; if (live) { - output = jmap("-dump:live,format=b,file=" + dump.getName()); - } else if (explicitAll == true) { - output = jmap("-dump:all,format=b,file=" + dump.getName()); - } else { - output = jmap("-dump:format=b,file=" + dump.getName()); + liveArg = "live,"; + } + if (explicitAll) { + liveArg = "all,"; } + + File file = new File("jmap.dump" + System.currentTimeMillis() + ".hprof"); + if (file.exists()) { + file.delete(); + } + fileArg = "file=" + file.getName(); + + OutputAnalyzer output; + allArgs = allArgs + liveArg + "format=b," + fileArg; + output = jmap(allArgs); output.shouldHaveExitValue(0); output.shouldContain("Heap dump file created"); - verifyDumpFile(dump); - dump.delete(); + verifyDumpFile(file); + file.delete(); } private static void verifyDumpFile(File dump) { @@ -195,5 +225,4 @@ return output; } - }