--- old/src/hotspot/share/gc/epsilon/epsilonHeap.cpp 2020-04-25 20:53:27.567352441 +0800 +++ new/src/hotspot/share/gc/epsilon/epsilonHeap.cpp 2020-04-25 20:53:27.399352953 +0800 @@ -296,6 +296,13 @@ _space->object_iterate(cl); } +// No workGang for EpsilonHeap, work serially with thread 0 +Tickspan EpsilonHeap::run_task(AbstractGangTask* task) { + Ticks start = Ticks::now(); + task->work(0); + return Ticks::now() - start; +} + void EpsilonHeap::print_on(outputStream *st) const { st->print_cr("Epsilon Heap"); --- old/src/hotspot/share/gc/epsilon/epsilonHeap.hpp 2020-04-25 20:53:28.167350609 +0800 +++ new/src/hotspot/share/gc/epsilon/epsilonHeap.hpp 2020-04-25 20:53:27.991351146 +0800 @@ -141,6 +141,10 @@ virtual void print_tracing_info() const; virtual bool print_location(outputStream* st, void* addr) const; + // Runs the given AbstractGangTask with the current active workers, returning the + // total time taken. + virtual Tickspan run_task(AbstractGangTask* task); + private: void print_heap_info(size_t used) const; void print_metaspace_info() const; --- old/src/hotspot/share/gc/g1/g1CollectedHeap.cpp 2020-04-25 20:53:28.723348913 +0800 +++ new/src/hotspot/share/gc/g1/g1CollectedHeap.cpp 2020-04-25 20:53:28.559349414 +0800 @@ -87,6 +87,7 @@ #include "logging/log.hpp" #include "memory/allocation.hpp" #include "memory/iterator.hpp" +#include "memory/heapInspection.hpp" #include "memory/resourceArea.hpp" #include "memory/universe.hpp" #include "oops/access.inline.hpp" @@ -4978,3 +4979,27 @@ GrowableArray G1CollectedHeap::memory_pools() { return _g1mm->memory_pools(); } + +class G1ParallelObjectIterator : public ParallelObjectIterator { +private: + G1CollectedHeap* _heap; + HeapRegionClaimer _claimer; + +public: + G1ParallelObjectIterator(uint thread_num) : + _heap(G1CollectedHeap::heap()), + _claimer(thread_num == 0 ? G1CollectedHeap::heap()->workers()->active_workers() : thread_num) {} + + virtual void object_iterate(ObjectClosure* cl, uint worker_id) { + _heap->object_iterate_parallel(cl, worker_id, &_claimer); + } +}; + +ParallelObjectIterator* G1CollectedHeap::parallel_object_iterator(uint thread_num) { + return new G1ParallelObjectIterator(thread_num); +} + +void G1CollectedHeap::object_iterate_parallel(ObjectClosure* cl, uint worker_id, HeapRegionClaimer* claimer) { + IterateObjectClosureRegionClosure blk(cl); + heap_region_par_iterate_from_worker_offset(&blk, claimer, worker_id); +} --- old/src/hotspot/share/gc/g1/g1CollectedHeap.hpp 2020-04-25 20:53:29.343347020 +0800 +++ new/src/hotspot/share/gc/g1/g1CollectedHeap.hpp 2020-04-25 20:53:29.179347521 +0800 @@ -550,7 +550,7 @@ // Runs the given AbstractGangTask with the current active workers, returning the // total time taken. - Tickspan run_task(AbstractGangTask* task); + virtual Tickspan run_task(AbstractGangTask* task); G1Allocator* allocator() { return _allocator; @@ -1167,9 +1167,13 @@ // Iteration functions. + void object_iterate_parallel(ObjectClosure* cl, uint worker_id, HeapRegionClaimer* claimer); + // Iterate over all objects, calling "cl.do_object" on each. virtual void object_iterate(ObjectClosure* cl); + virtual ParallelObjectIterator* parallel_object_iterator(uint thread_num); + // Keep alive an object that was loaded with AS_NO_KEEPALIVE. virtual void keep_alive(oop obj); --- old/src/hotspot/share/gc/parallel/parallelScavengeHeap.cpp 2020-04-25 20:53:29.935345213 +0800 +++ new/src/hotspot/share/gc/parallel/parallelScavengeHeap.cpp 2020-04-25 20:53:29.755345763 +0800 @@ -536,6 +536,12 @@ old_gen()->object_iterate(cl); } +Tickspan ParallelScavengeHeap::run_task(AbstractGangTask* task) { + Ticks start = Ticks::now(); + _workers.run_task(task); + return Ticks::now() - start; +} + HeapWord* ParallelScavengeHeap::block_start(const void* addr) const { if (young_gen()->is_in_reserved(addr)) { --- old/src/hotspot/share/gc/parallel/parallelScavengeHeap.hpp 2020-04-25 20:53:30.527343406 +0800 +++ new/src/hotspot/share/gc/parallel/parallelScavengeHeap.hpp 2020-04-25 20:53:30.367343895 +0800 @@ -258,6 +258,10 @@ WorkGang& workers() { return _workers; } + + // Runs the given AbstractGangTask with the current active workers, returning the + // total time taken. + virtual Tickspan run_task(AbstractGangTask* task); }; // Class that can be used to print information about the --- old/src/hotspot/share/gc/serial/serialHeap.cpp 2020-04-25 20:53:31.075341732 +0800 +++ new/src/hotspot/share/gc/serial/serialHeap.cpp 2020-04-25 20:53:30.911342233 +0800 @@ -90,3 +90,10 @@ memory_pools.append(_old_pool); return memory_pools; } + +// No workGang for SerialHeap, work serially with thread 0 +Tickspan SerialHeap::run_task(AbstractGangTask* task) { + Ticks start = Ticks::now(); + task->work(0); + return Ticks::now() - start; +} --- old/src/hotspot/share/gc/serial/serialHeap.hpp 2020-04-25 20:53:31.687339864 +0800 +++ new/src/hotspot/share/gc/serial/serialHeap.hpp 2020-04-25 20:53:31.503340425 +0800 @@ -75,6 +75,10 @@ template void oop_since_save_marks_iterate(OopClosureType1* cur, OopClosureType2* older); + + // Runs the given AbstractGangTask with the current active workers, returning the + // total time taken. + virtual Tickspan run_task(AbstractGangTask* task); }; #endif // SHARE_GC_SERIAL_SERIALHEAP_HPP --- old/src/hotspot/share/gc/shared/collectedHeap.hpp 2020-04-25 20:53:32.259338117 +0800 +++ new/src/hotspot/share/gc/shared/collectedHeap.hpp 2020-04-25 20:53:32.075338679 +0800 @@ -29,6 +29,7 @@ #include "gc/shared/gcWhen.hpp" #include "gc/shared/verifyOption.hpp" #include "memory/allocation.hpp" +#include "memory/heapInspection.hpp" #include "runtime/handles.hpp" #include "runtime/perfData.hpp" #include "runtime/safepoint.hpp" @@ -43,6 +44,7 @@ // class defines the functions that a heap must implement, and contains // infrastructure common to all heaps. +class AbstractGangTask; class AdaptiveSizePolicy; class BarrierSet; class GCHeapSummary; @@ -84,6 +86,11 @@ } }; +class ParallelObjectIterator : public CHeapObj { +public: + virtual void object_iterate(ObjectClosure* cl, uint worker_id) = 0; +}; + // // CollectedHeap // GenCollectedHeap @@ -386,6 +393,13 @@ // Iterate over all objects, calling "cl.do_object" on each. virtual void object_iterate(ObjectClosure* cl) = 0; + virtual ParallelObjectIterator* parallel_object_iterator(uint thread_num) { + return NULL; + } + + // Run given task, parallelly if possible. + virtual Tickspan run_task(AbstractGangTask* task) = 0; + // Keep alive an object that was loaded with AS_NO_KEEPALIVE. virtual void keep_alive(oop obj) {} --- old/src/hotspot/share/gc/shared/gcVMOperations.cpp 2020-04-25 20:53:32.835336358 +0800 +++ new/src/hotspot/share/gc/shared/gcVMOperations.cpp 2020-04-25 20:53:32.663336884 +0800 @@ -152,7 +152,7 @@ } } HeapInspection inspect; - inspect.heap_inspection(_out); + inspect.heap_inspection(_out, _parallel_thread_num); } --- old/src/hotspot/share/gc/shared/gcVMOperations.hpp 2020-04-25 20:53:33.411334599 +0800 +++ new/src/hotspot/share/gc/shared/gcVMOperations.hpp 2020-04-25 20:53:33.247335099 +0800 @@ -125,12 +125,15 @@ private: outputStream* _out; bool _full_gc; + size_t _parallel_thread_num; public: - VM_GC_HeapInspection(outputStream* out, bool request_full_gc) : + VM_GC_HeapInspection(outputStream* out, bool request_full_gc, + size_t parallel_thread_num = 1) : VM_GC_Operation(0 /* total collections, dummy, ignored */, GCCause::_heap_inspection /* GC Cause */, 0 /* total full collections, dummy, ignored */, - request_full_gc), _out(out), _full_gc(request_full_gc) {} + request_full_gc), _out(out), _full_gc(request_full_gc), + _parallel_thread_num(parallel_thread_num) {} ~VM_GC_HeapInspection() {} virtual VMOp_Type type() const { return VMOp_GC_HeapInspection; } --- old/src/hotspot/share/gc/shenandoah/shenandoahHeap.cpp 2020-04-25 20:53:34.011332766 +0800 +++ new/src/hotspot/share/gc/shenandoah/shenandoahHeap.cpp 2020-04-25 20:53:33.831333316 +0800 @@ -1314,6 +1314,12 @@ } } +Tickspan ShenandoahHeap::run_task(AbstractGangTask* task) { + Ticks start = Ticks::now(); + workers()->run_task(task, workers()->active_workers()); + return Ticks::now() - start; +} + // Keep alive an object that was loaded with AS_NO_KEEPALIVE. void ShenandoahHeap::keep_alive(oop obj) { if (is_concurrent_mark_in_progress()) { --- old/src/hotspot/share/gc/shenandoah/shenandoahHeap.hpp 2020-04-25 20:53:34.603330956 +0800 +++ new/src/hotspot/share/gc/shenandoah/shenandoahHeap.hpp 2020-04-25 20:53:34.423331507 +0800 @@ -616,6 +616,10 @@ void ensure_parsability(bool retire_tlabs); void make_parsable(bool retire_tlabs); + // Runs the given AbstractGangTask with the current active workers, returning the + // total time taken. + virtual Tickspan run_task(AbstractGangTask* task); + // ---------- Marking support // private: --- old/src/hotspot/share/gc/z/zCollectedHeap.cpp 2020-04-25 20:53:35.211329099 +0800 +++ new/src/hotspot/share/gc/z/zCollectedHeap.cpp 2020-04-25 20:53:35.031329649 +0800 @@ -247,6 +247,9 @@ _heap.object_iterate(cl, true /* visit_weaks */); } +Tickspan ZCollectedHeap::run_task(AbstractGangTask* task) { + return _heap.run_task(task); +} void ZCollectedHeap::keep_alive(oop obj) { _heap.keep_alive(obj); } --- old/src/hotspot/share/gc/z/zCollectedHeap.hpp 2020-04-25 20:53:35.823327229 +0800 +++ new/src/hotspot/share/gc/z/zCollectedHeap.hpp 2020-04-25 20:53:35.643327779 +0800 @@ -99,6 +99,10 @@ virtual void object_iterate(ObjectClosure* cl); + // Runs the given AbstractGangTask with the current active workers, returning the + // total time taken. + virtual Tickspan run_task(AbstractGangTask* task); + virtual void keep_alive(oop obj); virtual void register_nmethod(nmethod* nm); --- old/src/hotspot/share/gc/z/zHeap.cpp 2020-04-25 20:53:36.407325444 +0800 +++ new/src/hotspot/share/gc/z/zHeap.cpp 2020-04-25 20:53:36.239325957 +0800 @@ -35,6 +35,7 @@ #include "gc/z/zResurrection.hpp" #include "gc/z/zStat.hpp" #include "gc/z/zThread.inline.hpp" +#include "gc/z/zTask.hpp" #include "gc/z/zVerify.hpp" #include "gc/z/zWorkers.inline.hpp" #include "logging/log.hpp" @@ -45,6 +46,7 @@ #include "runtime/thread.hpp" #include "utilities/debug.hpp" +class ZTask; static const ZStatSampler ZSamplerHeapUsedBeforeMark("Memory", "Heap Used Before Mark", ZStatUnitBytes); static const ZStatSampler ZSamplerHeapUsedAfterMark("Memory", "Heap Used After Mark", ZStatUnitBytes); static const ZStatSampler ZSamplerHeapUsedBeforeRelocation("Memory", "Heap Used Before Relocation", ZStatUnitBytes); @@ -206,6 +208,28 @@ _workers.print_threads_on(st); } +// A delegate class for AbstractGangTask to Ztask. +class DelegatedZAbstractGangTask : public ZTask { + private: + AbstractGangTask* _task; + + public: + DelegatedZAbstractGangTask(AbstractGangTask* task) : + ZTask(task->name()), + _task(task) { } + + virtual void work() { + _task->work(ZThread::worker_id()); + } +}; + +Tickspan ZHeap::run_task(AbstractGangTask* task) { + Ticks start = Ticks::now(); + DelegatedZAbstractGangTask dtask(task); + _workers.run_parallel(&dtask); + return Ticks::now() - start; +} + void ZHeap::out_of_memory() { ResourceMark rm; --- old/src/hotspot/share/gc/z/zHeap.hpp 2020-04-25 20:53:36.995323647 +0800 +++ new/src/hotspot/share/gc/z/zHeap.hpp 2020-04-25 20:53:36.823324172 +0800 @@ -105,6 +105,10 @@ void set_boost_worker_threads(bool boost); void worker_threads_do(ThreadClosure* tc) const; void print_worker_threads_on(outputStream* st) const; + // Runs the given AbstractGangTask with the current active workers, returning the + // total time taken. + Tickspan run_task(AbstractGangTask* task); + // Reference processing ReferenceDiscoverer* reference_discoverer(); --- old/src/hotspot/share/memory/heapInspection.cpp 2020-04-25 20:53:37.571321886 +0800 +++ new/src/hotspot/share/memory/heapInspection.cpp 2020-04-25 20:53:37.391322436 +0800 @@ -237,6 +237,42 @@ return _size_of_instances_in_words; } +// Return false if the entry could not be recorded on account +// of running out of space required to create a new entry. +bool KlassInfoTable::merge_entry(const KlassInfoEntry* cie) { + Klass* k = cie->klass(); + KlassInfoEntry* elt = lookup(k); + // elt may be NULL if it's a new klass for which we + // could not allocate space for a new entry in the hashtable. + if (elt != NULL) { + elt->set_count(elt->count() + cie->count()); + elt->set_words(elt->words() + cie->words()); + _size_of_instances_in_words += cie->words(); + return true; + } else { + return false; + } +} + +class KlassInfoTableMergeClosure : public KlassInfoClosure { +private: + KlassInfoTable* _dest; + bool _success; +public: + KlassInfoTableMergeClosure(KlassInfoTable* table) : _dest(table), _success(true) {} + void do_cinfo(KlassInfoEntry* cie) { + _success &= _dest->merge_entry(cie); + } + bool is_success() { return _success; } +}; + +// merge from table +bool KlassInfoTable::merge(KlassInfoTable* table) { + KlassInfoTableMergeClosure closure(this); + table->iterate(&closure); + return closure.is_success(); +} + int KlassInfoHisto::sort_helper(KlassInfoEntry** e1, KlassInfoEntry** e2) { return (*e1)->compare(*e1,*e2); } @@ -504,21 +540,72 @@ } }; -size_t HeapInspection::populate_table(KlassInfoTable* cit, BoolObjectClosure *filter) { +// Heap inspection for every worker. +// When native OOM hanppens for KlassInfoTable, set _success to false. +// TODO(?) it seems atomically set/get _success is unnecessary becasue it +// is set to true at constructor and can only be set to false here. +// the only risk seems a worker may continue inspect heap when another +// worker set _success to false, but this is OK because the current worker +// doesn't change _success if everything is OK for it's inpection work, and +// the _success will be false finally and serial heap inpection can be tried. +void ParHeapInspectTask::work(uint worker_id) { + size_t missed_count = 0; + if (!_success) { + // other worker has failed on parallel iteration. + return; + } + + KlassInfoTable cit(false); + if (!cit.allocation_failed()) { + RecordInstanceClosure ric(&cit, _filter); + _poi->object_iterate(&ric, worker_id); + missed_count = ric.missed_count(); + } else { + // fail to allocate memory, stop parallel mode. + _success = false; + return; + } + { + MutexLocker x(&_mutex); + + if (!_shared_cit->merge(&cit)) { + _success = false; + return; + } + _shared_missed_count += missed_count; + } +} + +size_t HeapInspection::populate_table(KlassInfoTable* cit, BoolObjectClosure *filter, size_t parallel_thread_num) { ResourceMark rm; + // Try parallel first. + if (parallel_thread_num > 1) { + ParallelObjectIterator* poi = Universe::heap()->parallel_object_iterator(parallel_thread_num); + if (poi != NULL) { + ParHeapInspectTask task(poi, cit, filter); + Universe::heap()->run_task(&task); + delete poi; + if (task.success()) { + return task.missed_count(); + } + } + } + + // If no parallel iteration available, run serially. RecordInstanceClosure ric(cit, filter); Universe::heap()->object_iterate(&ric); return ric.missed_count(); } -void HeapInspection::heap_inspection(outputStream* st) { +void HeapInspection::heap_inspection(outputStream* st, size_t parallel_thread_num) { ResourceMark rm; KlassInfoTable cit(false); if (!cit.allocation_failed()) { + size_t missed_count = 0;; // populate table with object allocation info - size_t missed_count = populate_table(&cit); + missed_count = populate_table(&cit, NULL, parallel_thread_num); if (missed_count != 0) { log_info(gc, classhisto)("WARNING: Ran out of C-heap; undercounted " SIZE_FORMAT " total instances in data below", --- old/src/hotspot/share/memory/heapInspection.hpp 2020-04-25 20:53:38.139320150 +0800 +++ new/src/hotspot/share/memory/heapInspection.hpp 2020-04-25 20:53:37.959320699 +0800 @@ -30,6 +30,9 @@ #include "oops/oop.hpp" #include "oops/annotations.hpp" #include "utilities/macros.hpp" +#include "gc/shared/workgroup.hpp" + +class ParallelObjectIterator; #if INCLUDE_SERVICES @@ -122,6 +125,8 @@ void iterate(KlassInfoClosure* cic); bool allocation_failed() { return _buckets == NULL; } size_t size_of_instances_in_words() const; + bool merge(KlassInfoTable* table); + bool merge_entry(const KlassInfoEntry* cie); friend class KlassInfoHisto; friend class KlassHierarchy; @@ -211,11 +216,47 @@ class HeapInspection : public StackObj { public: - void heap_inspection(outputStream* st) NOT_SERVICES_RETURN; - size_t populate_table(KlassInfoTable* cit, BoolObjectClosure* filter = NULL) NOT_SERVICES_RETURN_(0); + void heap_inspection(outputStream* st, size_t parallel_thread_num = 1) NOT_SERVICES_RETURN; + size_t populate_table(KlassInfoTable* cit, BoolObjectClosure* filter = NULL, size_t parallel_thread_num = 1) NOT_SERVICES_RETURN_(0); static void find_instances_at_safepoint(Klass* k, GrowableArray* result) NOT_SERVICES_RETURN; private: void iterate_over_heap(KlassInfoTable* cit, BoolObjectClosure* filter = NULL); }; +// Task for parallel heap inspection. The parallel inspection can be fail +// because of native OOM when allocation memory for TL-KlassInfoTable, it +// will set success to false when OOM so serial inspection can be tried. +// see work() implementation at heapInspection.cpp for more info. +class ParHeapInspectTask : public AbstractGangTask { + private: + ParallelObjectIterator* _poi; + KlassInfoTable* _shared_cit; + BoolObjectClosure* _filter; + size_t _shared_missed_count; + bool _success; + Mutex _mutex; + + public: + ParHeapInspectTask(ParallelObjectIterator* poi, + KlassInfoTable* shared_cit, + BoolObjectClosure* filter) : + AbstractGangTask("Iterating heap"), + _poi(poi), + _shared_cit(shared_cit), + _filter(filter), + _shared_missed_count(0), + _success(true), + _mutex(Mutex::leaf, "Parallel heap iteration data merge lock") {} + + uint missed_count() const { + return _shared_missed_count; + } + + bool success() { + return _success; + } + + virtual void work(uint worker_id); +}; + #endif // SHARE_MEMORY_HEAPINSPECTION_HPP --- old/src/hotspot/share/runtime/arguments.hpp 2020-04-25 20:53:38.767318229 +0800 +++ new/src/hotspot/share/runtime/arguments.hpp 2020-04-25 20:53:38.587318779 +0800 @@ -441,12 +441,6 @@ static ArgsRange check_memory_size(julong size, julong min_size, julong max_size); static ArgsRange parse_memory_size(const char* s, julong* long_arg, julong min_size, julong max_size = max_uintx); - // Parse a string for a unsigned integer. Returns true if value - // is an unsigned integer greater than or equal to the minimum - // parameter passed and returns the value in uintx_arg. Returns - // false otherwise, with uintx_arg undefined. - static bool parse_uintx(const char* value, uintx* uintx_arg, - uintx min_size); // methods to build strings from individual args static void build_jvm_args(const char* arg); @@ -490,6 +484,12 @@ public: // Parses the arguments, first phase static jint parse(const JavaVMInitArgs* args); + // Parse a string for a unsigned integer. Returns true if value + // is an unsigned integer greater than or equal to the minimum + // parameter passed and returns the value in uintx_arg. Returns + // false otherwise, with uintx_arg undefined. + static bool parse_uintx(const char* value, uintx* uintx_arg, + uintx min_size); // Apply ergonomics static jint apply_ergo(); // Adjusts the arguments after the OS have adjusted the arguments --- old/src/hotspot/share/services/attachListener.cpp 2020-04-25 20:53:39.391316321 +0800 +++ new/src/hotspot/share/services/attachListener.cpp 2020-04-25 20:53:39.219316846 +0800 @@ -242,45 +242,100 @@ return JNI_OK; } +// Valid Arguments: +// "-live" or "-all" +// "parallel=" +// "" +static jint process_heap_inspect_options(const char* argline, + outputStream* out, + HeapInspectArgs* args) { + char* save_ptr; + char* buf = NEW_C_HEAP_ARRAY(char, strlen(argline)+1, mtInternal); + snprintf(buf, strlen(argline)+1, "%s", argline); + if (buf == NULL) { + return JNI_ERR; + } + char* arg = strtok_r(buf, ",", &save_ptr); + while (arg != NULL) { + // "-live" or "-all" + if (strcmp(arg, "-live") == 0) { + args->_live_object_only = true; + } else if (strcmp(arg, "-all") == 0) { + args->_live_object_only = false; + } else if (strncmp(arg, "parallel=", 9) == 0) { + char* num_str = &arg[9]; + uintx num = 0; + if (!Arguments::parse_uintx(num_str, &num, 0)) { + out->print_cr("Invalid parallel thread number"); + return JNI_ERR; + } + args->_parallel_thread_num = num; + } else { + // must be file path + assert(args->_path == NULL, "Must be"); + char* path = args->_path = NEW_C_HEAP_ARRAY(char, strlen(arg)+1, mtInternal); + if (path == NULL) { + out->print_cr("Out of internal memory."); + return JNI_ERR; + } + snprintf(path, strlen(arg)+1, "%s", arg); + if (path[0] == '\0') { + out->print_cr("No dump file specified."); + } else { + fileStream* fs = new (ResourceObj::C_HEAP, mtInternal) fileStream(path); + if (fs == NULL) { + out->print_cr("Failed to allocate filestream for file: %s", path); + return JNI_ERR; + } + args->_fs = fs; + } + } + arg = strtok_r(NULL, ",", &save_ptr); + } + FREE_C_HEAP_ARRAY(char, buf); + return JNI_OK; +} + +// Parse command options +static jint parse_cmd_options(const char* cmd, const char* argline, + outputStream* out, void* args) { + assert(argline != NULL, "Must be"); + if (strncmp(cmd, "heap_inspection", 11) == 0) { + HeapInspectArgs* insp_opts = (HeapInspectArgs*)args; + return process_heap_inspect_options(argline, out, insp_opts); + } + // Command not match + return JNI_ERR; +} + // Implementation of "inspectheap" command // See also: ClassHistogramDCmd class // // Input arguments :- -// arg0: "-live" or "-all" -// arg1: Name of the dump file or NULL +// all arguments in op->arg(0); static jint heap_inspection(AttachOperation* op, outputStream* out) { bool live_objects_only = true; // default is true to retain the behavior before this change is made outputStream* os = out; // if path not specified or path is NULL, use out - fileStream* fs = NULL; const char* arg0 = op->arg(0); - if (arg0 != NULL && (strlen(arg0) > 0)) { - if (strcmp(arg0, "-all") != 0 && strcmp(arg0, "-live") != 0) { - out->print_cr("Invalid argument to inspectheap operation: %s", arg0); + size_t parallel_thread_num = os::processor_count() * 3 / 8; // default is less than half of processors. + HeapInspectArgs args; + // Parse arguments + if (arg0 != NULL) { + if (JNI_ERR == parse_cmd_options("heap_inspection", arg0, out, (void*)(&args))) { return JNI_ERR; } - live_objects_only = strcmp(arg0, "-live") == 0; - } - - const char* path = op->arg(1); - if (path != NULL) { - if (path[0] == '\0') { - out->print_cr("No dump file specified"); - } else { - // create file - fs = new (ResourceObj::C_HEAP, mtInternal) fileStream(path); - if (fs == NULL) { - out->print_cr("Failed to allocate space for file: %s", path); - return JNI_ERR; - } - os = fs; + live_objects_only = args._live_object_only; + os = args._fs == NULL ? out : args._fs; + parallel_thread_num = args._parallel_thread_num == 0 ? parallel_thread_num : args._parallel_thread_num; + if (parallel_thread_num == 0) { + parallel_thread_num = 1; } } - VM_GC_HeapInspection heapop(os, live_objects_only /* request full gc */); + VM_GC_HeapInspection heapop(os, live_objects_only /* request full gc */, parallel_thread_num); VMThread::execute(&heapop); - if (os != NULL && os != out) { - out->print_cr("Heap inspection file created: %s", path); - delete fs; + if (args._path != NULL) { + out->print_cr("Heap inspection file created: %s", args._path); } return JNI_OK; } --- old/src/hotspot/share/services/attachListener.hpp 2020-04-25 20:53:39.947314621 +0800 +++ new/src/hotspot/share/services/attachListener.hpp 2020-04-25 20:53:39.779315134 +0800 @@ -190,6 +190,35 @@ // complete operation by sending result code and any result data to the client virtual void complete(jint result, bufferedStream* result_stream) = 0; }; + +// Base Class for arguments parsing. +class CommandArgs : public CHeapObj { +}; + +// Arguments of HeapInspect. +struct HeapInspectArgs : public CommandArgs { + bool _live_object_only; + size_t _parallel_thread_num; + fileStream* _fs; + char* _path; + + HeapInspectArgs() : _live_object_only(false), + _parallel_thread_num(0), + _fs(NULL), + _path(NULL) { } + ~HeapInspectArgs() { + if (_path != NULL) { + FREE_C_HEAP_ARRAY(char, _path); + _path = NULL; + } + + if (_fs != NULL) { + delete _fs; + _fs = NULL; + } + } +}; + #endif // INCLUDE_SERVICES #endif // SHARE_SERVICES_ATTACHLISTENER_HPP --- old/src/jdk.jcmd/share/classes/sun/tools/jmap/JMap.java 2020-04-25 20:53:40.523312858 +0800 +++ new/src/jdk.jcmd/share/classes/sun/tools/jmap/JMap.java 2020-04-25 20:53:40.343313409 +0800 @@ -164,33 +164,57 @@ return null; } + private static String add_option(String cmd, String opt) { + if (cmd.isEmpty()) { + return opt; + } + return cmd + "," + opt; + } + private static void histo(String pid, String options) throws AttachNotSupportedException, IOException, UnsupportedEncodingException { String liveopt = "-all"; String filename = null; + String parallel = null; String subopts[] = options.split(","); + boolean set_all = false; + boolean set_live = false; + String cmdline = ""; for (int i = 0; i < subopts.length; i++) { String subopt = subopts[i]; if (subopt.equals("") || subopt.equals("all")) { - // pass + cmdline = add_option(cmdline, "-all"); + set_all = true; } else if (subopt.equals("live")) { - liveopt = "-live"; + // Add '-' for compatibility. + cmdline = add_option(cmdline, "-live"); + set_live = true; } else if (subopt.startsWith("file=")) { filename = parseFileName(subopt); if (filename == null) { usage(1); // invalid options or no filename } + cmdline = add_option(cmdline, filename); + } else if (subopt.startsWith("parallel=")) { + parallel = subopt.substring("parallel=".length()); + if (parallel == null) { + usage(1); + } + // Add "parallelThreadsNum=<>" for later check + cmdline = add_option(cmdline, subopt); } else { usage(1); } } + if (set_live && set_all) { + usage(1); + } System.out.flush(); - // inspectHeap is not the same as jcmd GC.class_histogram - executeCommandForPid(pid, "inspectheap", liveopt, filename); + executeCommandForPid(pid, "inspectheap", cmdline); } private static void dump(String pid, String options) @@ -287,6 +311,10 @@ System.err.println(" live count only live objects"); System.err.println(" all count all objects in the heap (default if one of \"live\" or \"all\" is not specified)"); System.err.println(" file= dump data to "); + System.err.println(" parallel= parallel threads number for heap iteration:"); + System.err.println(" parallel=0 default behavior, use predefined number of threads"); + System.err.println(" parallel=1 disable parallel heap iteration"); + System.err.println(" parallel= use N threads for parallel heap iteration"); System.err.println(""); System.err.println(" Example: jmap -histo:live,file=/tmp/histo.data "); System.exit(exit); --- old/test/jdk/sun/tools/jmap/BasicJMapTest.java 2020-04-25 20:53:41.119311035 +0800 +++ new/test/jdk/sun/tools/jmap/BasicJMapTest.java 2020-04-25 20:53:40.943311574 +0800 @@ -78,6 +78,21 @@ output.shouldHaveExitValue(0); } + private static void testHistoParallelZero() throws Exception { + OutputAnalyzer output = jmap("-histo:parallel=0"); + output.shouldHaveExitValue(0); + } + + private static void testHistoParallel() throws Exception { + OutputAnalyzer output = jmap("-histo:parallel=2"); + output.shouldHaveExitValue(0); + } + + private static void testHistoNonParallel() throws Exception { + OutputAnalyzer output = jmap("-histo:parallel=1"); + output.shouldHaveExitValue(0); + } + private static void testHistoToFile() throws Exception { histoToFile(false); } @@ -91,6 +106,18 @@ histoToFile(false, explicitAll); } + private static void testHistoFileParallelZero() throws Exception { + histoToFile(false, false, 0); + } + + private static void testHistoFileParallel() throws Exception { + histoToFile(false, false, 2); + } + + private static void testHistoFileNonParallel() throws Exception { + histoToFile(false, false, 1); + } + private static void histoToFile(boolean live) throws Exception { boolean explicitAll = false; histoToFile(live, explicitAll); @@ -114,6 +141,43 @@ } output.shouldHaveExitValue(0); output.shouldContain("Heap inspection file created"); + file.delete(); + } + + private static void histoToFile(boolean live, + boolean explicitAll, + int parallelThreadNum) throws Exception { + String liveArg = ""; + String fileArg = ""; + String parArg = "parallel=" + parallelThreadNum; + String allArgs = "-histo:"; + + if (live == true && explicitAll == true) { + fail("Illegal argument setting for jmap -histo"); + } + if (live == true ) { + liveArg = "live,"; + } else if(explicitAll) { + liveArg = "all,"; + } + + File file = new File("jmap.histo.file" + System.currentTimeMillis() + ".histo"); + if (file.exists()) { + file.delete(); + } + fileArg = "file=" + file.getName(); + + OutputAnalyzer output; + allArgs = allArgs + liveArg + fileArg + "," + parArg + ""; + if (live) { + output = jmap(allArgs); + } else if (explicitAll == true) { + output = jmap(allArgs); + } else { + output = jmap(allArgs); + } + output.shouldHaveExitValue(0); + output.shouldContain("Heap inspection file created"); file.delete(); }