--- old/src/hotspot/share/gc/shared/collectedHeap.cpp 2018-03-08 15:53:21.183623606 -0800 +++ new/src/hotspot/share/gc/shared/collectedHeap.cpp 2018-03-08 15:53:20.915624513 -0800 @@ -346,7 +346,7 @@ // requested. if (HeapMonitoring::enabled()) { size_t tlab_bytes_since_last_sample = thread->tlab().bytes_since_last_sample_point(); - thread->check_for_sampling(obj, size, tlab_bytes_since_last_sample); + thread->heap_sampler().check_for_sampling(obj, size, tlab_bytes_since_last_sample); } thread->tlab().fill(obj, obj + size, new_tlab_size); --- old/src/hotspot/share/gc/shared/collectedHeap.inline.hpp 2018-03-08 15:53:22.067620614 -0800 +++ new/src/hotspot/share/gc/shared/collectedHeap.inline.hpp 2018-03-08 15:53:21.815621467 -0800 @@ -161,7 +161,7 @@ AllocTracer::send_allocation_outside_tlab(klass, result, size_in_bytes, THREAD); if (HeapMonitoring::enabled()) { - THREAD->check_for_sampling(result, size_in_bytes); + THREAD->heap_sampler().check_for_sampling(result, size_in_bytes); } return result; } --- old/src/hotspot/share/gc/shared/threadLocalAllocBuffer.cpp 2018-03-08 15:53:22.939617664 -0800 +++ new/src/hotspot/share/gc/shared/threadLocalAllocBuffer.cpp 2018-03-08 15:53:22.675618558 -0800 @@ -52,8 +52,6 @@ return 0; } - // TODO: To be deprecated when FastTLABRefill is deprecated. - update_end_pointers(); return pointer_delta(reserved_end(), top()); } @@ -126,8 +124,6 @@ myThread()->incr_allocated_bytes(used_bytes()); } - // TODO: To be deprecated when FastTLABRefill is deprecated. - update_end_pointers(); CollectedHeap::fill_with_object(top(), reserved_end(), retire && zap); if (retire || ZeroTLAB) { // "Reset" the TLAB @@ -136,12 +132,11 @@ set_pf_top(NULL); set_current_end(NULL); set_allocation_end(NULL); - set_last_slow_path_end(NULL); } } assert(!(retire || ZeroTLAB) || (start() == NULL && current_end() == NULL && top() == NULL && - _allocation_end == NULL && _last_slow_path_end == NULL), + _allocation_end == NULL), "TLAB must be reset"); } @@ -206,7 +201,6 @@ set_pf_top(top); set_current_end(end); set_allocation_end(end); - set_last_slow_path_end(end); invariants(); } @@ -331,13 +325,12 @@ void ThreadLocalAllocBuffer::set_sample_end() { size_t heap_words_remaining = pointer_delta(_current_end, _top); - size_t bytes_until_sample = myThread()->bytes_until_sample(); + size_t bytes_until_sample = myThread()->heap_sampler().bytes_until_sample(); size_t words_until_sample = bytes_until_sample / HeapWordSize;; if (heap_words_remaining > words_until_sample) { HeapWord* new_end = _top + words_until_sample; set_current_end(new_end); - set_last_slow_path_end(new_end); _bytes_since_last_sample_point = bytes_until_sample; } else { _bytes_since_last_sample_point = heap_words_remaining * HeapWordSize;; @@ -351,27 +344,16 @@ } void ThreadLocalAllocBuffer::set_back_allocation_end() { - update_end_pointers(); _current_end = _allocation_end; } -void ThreadLocalAllocBuffer::update_end_pointers() { - // Did a fast TLAB refill occur? (This will be deprecated when fast TLAB - // refill disappears). - if (_last_slow_path_end != _current_end) { - // Fix up the last slow path end to be now the end of this TLAB. - _last_slow_path_end = _current_end; - _allocation_end = _current_end; - } -} - HeapWord* ThreadLocalAllocBuffer::allocate_sampled_object(size_t size) { Thread* thread = myThread(); thread->tlab().set_back_allocation_end(); HeapWord* result = thread->tlab().allocate(size); if (result) { - thread->check_for_sampling(result, size * HeapWordSize, _bytes_since_last_sample_point); + thread->heap_sampler().check_for_sampling(result, size * HeapWordSize, _bytes_since_last_sample_point); thread->tlab().set_sample_end(); } @@ -379,8 +361,6 @@ } HeapWord* ThreadLocalAllocBuffer::reserved_end() { - assert (_last_slow_path_end == _current_end, - "Have to call update_end_pointers before reserved_end."); return _allocation_end + alignment_reserve(); } --- old/src/hotspot/share/gc/shared/threadLocalAllocBuffer.hpp 2018-03-08 15:53:23.843614606 -0800 +++ new/src/hotspot/share/gc/shared/threadLocalAllocBuffer.hpp 2018-03-08 15:53:23.587615472 -0800 @@ -38,12 +38,11 @@ // time across multiple threads. The park()/unpark() pair is // used to make it available for such multiplexing. // -// Heap sampling is performed via the end/actual_end fields. -// actual_end contains the real end of the tlab allocation, -// whereas end can be set to an arbitrary spot in the tlab to +// Heap sampling is performed via the current_end/allocation_end +// fields. +// allocation_end contains the real end of the tlab allocation, +// whereas current_end can be set to an arbitrary spot in the tlab to // trip the return and sample the allocation. -// slow_path_end is used to track if a fast tlab refill occured -// between slowpath calls. class ThreadLocalAllocBuffer: public CHeapObj { friend class VMStructs; friend class JVMCIVMStructs; @@ -53,7 +52,6 @@ HeapWord* _pf_top; // allocation prefetch watermark HeapWord* _current_end; // allocation end (can be the sampling end point or _allocation_end) HeapWord* _allocation_end; // end for allocations (actual TLAB end, excluding alignment_reserve) - HeapWord* _last_slow_path_end; // last address for slow_path_end (as opposed to _allocation_end) size_t _desired_size; // desired size (including alignment_reserve) size_t _refill_waste_limit; // hold onto tlab if free() is larger than this @@ -78,7 +76,6 @@ void set_start(HeapWord* start) { _start = start; } void set_current_end(HeapWord* current_end) { _current_end = current_end; } void set_allocation_end(HeapWord* ptr) { _allocation_end = ptr; } - void set_last_slow_path_end(HeapWord* ptr) { _last_slow_path_end = ptr; } void set_top(HeapWord* top) { _top = top; } void set_pf_top(HeapWord* pf_top) { _pf_top = pf_top; } void set_desired_size(size_t desired_size) { _desired_size = desired_size; } --- old/src/hotspot/share/opto/runtime.cpp 2018-03-08 15:53:24.755611521 -0800 +++ new/src/hotspot/share/opto/runtime.cpp 2018-03-08 15:53:24.491612414 -0800 @@ -217,6 +217,7 @@ #ifndef PRODUCT SharedRuntime::_new_instance_ctr++; // new instance requires GC #endif + JvmtiSampledObjectAllocEventCollector sampled_collector; assert(check_compiled_frame(thread), "incorrect caller"); // These checks are cheap to make and support reflective allocation. @@ -258,6 +259,7 @@ SharedRuntime::_new_array_ctr++; // new array requires GC #endif assert(check_compiled_frame(thread), "incorrect caller"); + JvmtiSampledObjectAllocEventCollector sampled_collector; // Scavenge and allocate an instance. oop result; @@ -297,6 +299,7 @@ SharedRuntime::_new_array_ctr++; // new array requires GC #endif assert(check_compiled_frame(thread), "incorrect caller"); + JvmtiSampledObjectAllocEventCollector sampled_collector; // Scavenge and allocate an instance. oop result; @@ -345,6 +348,7 @@ #ifndef PRODUCT SharedRuntime::_multi2_ctr++; // multianewarray for 1 dimension #endif + JvmtiSampledObjectAllocEventCollector sampled_collector; assert(check_compiled_frame(thread), "incorrect caller"); assert(elem_type->is_klass(), "not a class"); jint dims[2]; @@ -361,6 +365,7 @@ #ifndef PRODUCT SharedRuntime::_multi3_ctr++; // multianewarray for 1 dimension #endif + JvmtiSampledObjectAllocEventCollector sampled_collector; assert(check_compiled_frame(thread), "incorrect caller"); assert(elem_type->is_klass(), "not a class"); jint dims[3]; @@ -378,6 +383,7 @@ #ifndef PRODUCT SharedRuntime::_multi4_ctr++; // multianewarray for 1 dimension #endif + JvmtiSampledObjectAllocEventCollector sampled_collector; assert(check_compiled_frame(thread), "incorrect caller"); assert(elem_type->is_klass(), "not a class"); jint dims[4]; @@ -396,6 +402,7 @@ #ifndef PRODUCT SharedRuntime::_multi5_ctr++; // multianewarray for 1 dimension #endif + JvmtiSampledObjectAllocEventCollector sampled_collector; assert(check_compiled_frame(thread), "incorrect caller"); assert(elem_type->is_klass(), "not a class"); jint dims[5]; @@ -414,6 +421,7 @@ assert(check_compiled_frame(thread), "incorrect caller"); assert(elem_type->is_klass(), "not a class"); assert(oop(dims)->is_typeArray(), "not an array"); + JvmtiSampledObjectAllocEventCollector sampled_collector; ResourceMark rm; jint len = dims->length(); --- old/src/hotspot/share/prims/jvmti.xml 2018-03-08 15:53:25.647608504 -0800 +++ new/src/hotspot/share/prims/jvmti.xml 2018-03-08 15:53:25.379609410 -0800 @@ -10353,13 +10353,19 @@ See . - Can sample the heap. If this capability is enabled then the heap sampling methods can be called. + + + Can generate sampled object allocation events. + If this capability is enabled then an event for a sampled object allocation is sent + when the heap sampler is enabled as well. + + @@ -11601,7 +11607,7 @@ Start Heap Sampling - Start the heap sampler in the JVM. The function provides, via its argument, the sampling + Start the heap sampler in the JVM. The function provides, via its arguments, the sampling rate requested and will fill internal data structures with heap allocation samples. The samples are obtained via the , , @@ -11616,17 +11622,21 @@ - The monitoring rate used for sampling. The sampler will use a statistical approach to + The monitoring rate in bytes used for sampling. The sampler will use a statistical approach to provide in average sampling every allocated bytes. - Note: a low monitoring rate will incur a higher overhead, therefore, the sampler should - only be used when knowing it may impact performance. + Note: a low monitoring rate, such as sampling every 1024 bytes, will probably incur a high overhead. + Due to the incurred overhead, the sampler should only be used when knowing it may impact performance. + On the other hand, sampling however every 1024kB has a far less chance of a high overhead since it will sample + 1024 times less than the 1024-byte sampling. - The maximum storage used for the GC samples in the sampler. By default, the value is 200. + The sampler keeps in memory a maximum number of garbage collected traces. This parameter sets the maximum number + to be preserved by the system. The bigger the number, the more traces will be preserved at a given time, augmenting + memory usage by the system. @@ -11647,12 +11657,12 @@ Any sample obtained during sampling is still available via the , , - , + , or functions. Stopping the heap sampler resets internal traces and counters. Therefore stopping the sampler frees any internal trace samples, any subsequent call to the , - , , + , , or functions will return no traces. new @@ -11668,10 +11678,11 @@ Get Live Object Allocation Traces - Get Live Object Heap Sampled traces. The fields of the + Get the live object heap sampled traces. The fields of the structure are filled in with details of the specified sampled allocation. - This method returns traces that either have survived a GC or have not yet been collected. + Live objects are defined in this case as objects that either have survived the latest GC + or have not yet been collected by a GC. This method can be called at any time but if the sampler is not enabled, via , it returns no traces. @@ -11691,9 +11702,9 @@ by .

Note that this buffer is allocated to include the - buffers pointed to by , which also + buffers pointed to by , which also include the buffers pointed by - . + . All these buffers must not be separately deallocated. @@ -11712,9 +11723,13 @@ Get Garbage Traces - Get the recent garbage heap sampled traces. The fields of the + Get the recent garbage heap sampled traces. The fields of the structure are filled in with details of the specified sampled allocation. + The garbage object traces returned by this method are the most recent garbage collected traces. Up to + from the call to + will be provided. + This method can be called at any time but if the sampler is not enabled, via , it returns no traces. @@ -11733,9 +11748,9 @@ by .

Note that this buffer is allocated to include the - buffers pointed to by , which also + buffers pointed to by , which also include the buffers pointed by - . + . All these buffers must not be separately deallocated. @@ -11754,9 +11769,14 @@ Get Frequent Garbage Traces - Get the frequent garbage heap sampled traces. The fields of the + Get the frequent garbage heap sampled traces. The fields of the structure are filled in with details of the specified sampled allocation. + The garbage object traces returned by this method are the most frequent garbage collected traces. Most frequent + being done by allowing less and less traces in the frequent cache, which statistically only allows frequent objects + to be cached and returned to the user. Up to from the call to + will be provided. + This method can be called at any time but if the sampler is not enabled, via , it returns no traces. @@ -11775,9 +11795,9 @@ by .

Note that this buffer is allocated to include the - buffers pointed to by , which also + buffers pointed to by , which also include the buffers pointed by - . + . All these buffers must not be separately deallocated. @@ -11797,9 +11817,13 @@ Get Cached Object Allocated Traces Get the cached sampled traces: the traces are the ones that were collected during the last - full GC. The fields of the structure are filled in with + full GC. The fields of the structure are filled in with details of the specified sampled allocation. + Cached object traces are defined as being the objects still live after the last full GC. As opposed to the function + , this method does not contain objects + that have not survived at least one GC. + This method can be called at any time but if the sampler is not enabled, via , it returns no traces. @@ -11818,7 +11842,7 @@ by .

Note that this buffer is allocated to include the - buffers pointed to by , which also + buffers pointed to by , which also include the buffers pointed by . @@ -11837,9 +11861,9 @@ - Get the heap sampling statistics + Get Heap Sampling Statistics - Returns a to understand the heap sampling behavior and current + Fills a to understand the heap sampling behavior and current internal data storage status. This method can be called at any time but if the sampler has not been started via at least @@ -13847,8 +13871,56 @@ + + + Sent when an object is sampled via the + Heap Sampling Monitoring system . + The event is sent once the allocation has been done and provides the object, stack trace + for the allocation, the thread allocating, the size of allocation, and class. + + new + + + + + + + JNIEnv + + + The JNI environment of the event (current) thread. + + + + + + Thread allocating the object. + + + + + + JNI local reference to the object that was allocated. + + + + + + JNI local reference to the class of the object + + + + + + Size of the object (in bytes). See . + + + + + + id="ObjectFree" const="JVMTI_EVENT_OBJECT_FREE" num="83"> An Object Free event is sent when the garbage collector frees an object. Events are only sent for tagged objects--see @@ -13865,10 +13937,10 @@ - - - The freed object's tag - + + + The freed object's tag + --- old/src/hotspot/share/prims/jvmtiEventController.cpp 2018-03-08 15:53:26.819604539 -0800 +++ new/src/hotspot/share/prims/jvmtiEventController.cpp 2018-03-08 15:53:26.551605446 -0800 @@ -84,6 +84,7 @@ static const jlong OBJECT_FREE_BIT = (((jlong)1) << (JVMTI_EVENT_OBJECT_FREE - TOTAL_MIN_EVENT_TYPE_VAL)); static const jlong RESOURCE_EXHAUSTED_BIT = (((jlong)1) << (JVMTI_EVENT_RESOURCE_EXHAUSTED - TOTAL_MIN_EVENT_TYPE_VAL)); static const jlong VM_OBJECT_ALLOC_BIT = (((jlong)1) << (JVMTI_EVENT_VM_OBJECT_ALLOC - TOTAL_MIN_EVENT_TYPE_VAL)); +static const jlong SAMPLED_OBJECT_ALLOC_BIT = (((jlong)1) << (JVMTI_EVENT_SAMPLED_OBJECT_ALLOC - TOTAL_MIN_EVENT_TYPE_VAL)); // bits for extension events static const jlong CLASS_UNLOAD_BIT = (((jlong)1) << (EXT_EVENT_CLASS_UNLOAD - TOTAL_MIN_EVENT_TYPE_VAL)); @@ -615,6 +616,7 @@ JvmtiExport::set_should_post_compiled_method_load((any_env_thread_enabled & COMPILED_METHOD_LOAD_BIT) != 0); JvmtiExport::set_should_post_compiled_method_unload((any_env_thread_enabled & COMPILED_METHOD_UNLOAD_BIT) != 0); JvmtiExport::set_should_post_vm_object_alloc((any_env_thread_enabled & VM_OBJECT_ALLOC_BIT) != 0); + JvmtiExport::set_should_post_sampled_object_alloc((any_env_thread_enabled & SAMPLED_OBJECT_ALLOC_BIT) != 0); // need this if we want thread events or we need them to init data JvmtiExport::set_should_post_thread_life((any_env_thread_enabled & NEED_THREAD_LIFE_EVENTS) != 0); --- old/src/hotspot/share/prims/jvmtiExport.cpp 2018-03-08 15:53:27.703601550 -0800 +++ new/src/hotspot/share/prims/jvmtiExport.cpp 2018-03-08 15:53:27.471602335 -0800 @@ -1031,12 +1031,12 @@ return k; } -class JvmtiVMObjectAllocEventMark : public JvmtiClassEventMark { +class JvmtiObjectAllocEventMark : public JvmtiClassEventMark { private: jobject _jobj; jlong _size; public: - JvmtiVMObjectAllocEventMark(JavaThread *thread, oop obj) : JvmtiClassEventMark(thread, oop_to_klass(obj)) { + JvmtiObjectAllocEventMark(JavaThread *thread, oop obj) : JvmtiClassEventMark(thread, oop_to_klass(obj)) { _jobj = (jobject)to_jobject(obj); _size = obj->size() * wordSize; }; @@ -1201,6 +1201,7 @@ bool JvmtiExport::_should_post_object_free = false; bool JvmtiExport::_should_post_resource_exhausted = false; bool JvmtiExport::_should_post_vm_object_alloc = false; +bool JvmtiExport::_should_post_sampled_object_alloc = false; bool JvmtiExport::_should_post_on_exceptions = false; //////////////////////////////////////////////////////////////////////////////////////////////// @@ -2298,6 +2299,26 @@ } } +// Collect all the vm internally allocated objects which are visible to java world +void JvmtiExport::record_sampled_internal_object_allocation(oop obj) { + Thread* thread = Thread::current_or_null(); + if (thread != NULL && thread->is_Java_thread()) { + // Can not take safepoint here. + NoSafepointVerifier no_sfpt; + // Can not take safepoint here so can not use state_for to get + // jvmti thread state. + JvmtiThreadState *state = ((JavaThread*)thread)->jvmti_thread_state(); + if (state != NULL ) { + // state is non NULL when SampledObjectAllocEventCollector is enabled. + JvmtiSampledObjectAllocEventCollector *collector; + collector = state->get_sampled_object_alloc_event_collector(); + if (collector != NULL && collector->is_enabled()) { + collector->record_allocation(obj); + } + } + } +} + void JvmtiExport::post_garbage_collection_finish() { Thread *thread = Thread::current(); // this event is posted from VM-Thread. EVT_TRIG_TRACE(JVMTI_EVENT_GARBAGE_COLLECTION_FINISH, @@ -2499,7 +2520,6 @@ } } - void JvmtiExport::post_vm_object_alloc(JavaThread *thread, oop object) { EVT_TRIG_TRACE(JVMTI_EVENT_VM_OBJECT_ALLOC, ("[%s] Trg vm object alloc triggered", JvmtiTrace::safe_get_thread_name(thread))); @@ -2515,7 +2535,7 @@ JvmtiTrace::safe_get_thread_name(thread), object==NULL? "NULL" : object->klass()->external_name())); - JvmtiVMObjectAllocEventMark jem(thread, h()); + JvmtiObjectAllocEventMark jem(thread, h()); JvmtiJavaThreadEventTransition jet(thread); jvmtiEventVMObjectAlloc callback = env->callbacks()->VMObjectAlloc; if (callback != NULL) { @@ -2526,6 +2546,37 @@ } } +void JvmtiExport::post_sampled_object_alloc(JavaThread *thread, oop object) { + EVT_TRIG_TRACE(JVMTI_EVENT_SAMPLED_OBJECT_ALLOC, + ("[%s] Trg sampled object alloc triggered", + JvmtiTrace::safe_get_thread_name(thread))); + + if (object == NULL) { + return; + } + + HandleMark hm(thread); + Handle h(thread, object); + + JvmtiEnvIterator it; + for (JvmtiEnv* env = it.first(); env != NULL; env = it.next(env)) { + if (env->is_enabled(JVMTI_EVENT_SAMPLED_OBJECT_ALLOC)) { + EVT_TRACE(JVMTI_EVENT_SAMPLED_OBJECT_ALLOC, + ("[%s] Evt sampled object alloc sent %s", + JvmtiTrace::safe_get_thread_name(thread), + object == NULL ? "NULL" : object->klass()->external_name())); + + JvmtiObjectAllocEventMark jem(thread, h()); + JvmtiJavaThreadEventTransition jet(thread); + jvmtiEventSampledObjectAlloc callback = env->callbacks()->SampledObjectAlloc; + if (callback != NULL) { + (*callback)(env->jvmti_external(), jem.jni_env(), jem.jni_thread(), + jem.jni_jobject(), jem.jni_class(), jem.size()); + } + } + } +} + //////////////////////////////////////////////////////////////////////////////////////////////// void JvmtiExport::cleanup_thread(JavaThread* thread) { @@ -2551,7 +2602,7 @@ void JvmtiExport::oops_do(OopClosure* f) { JvmtiCurrentBreakpoints::oops_do(f); - JvmtiVMObjectAllocEventCollector::oops_do_for_all_threads(f); + JvmtiObjectAllocEventCollector::oops_do_for_all_threads(f); } void JvmtiExport::weak_oops_do(BoolObjectClosure* is_alive, OopClosure* f) { @@ -2684,6 +2735,9 @@ } else if (is_dynamic_code_event()) { _prev = state->get_dynamic_code_event_collector(); state->set_dynamic_code_event_collector((JvmtiDynamicCodeEventCollector *)this); + } else if (is_sampled_object_alloc_event()) { + _prev = state->get_sampled_object_alloc_event_collector(); + state->set_sampled_object_alloc_event_collector((JvmtiSampledObjectAllocEventCollector*)this); } } @@ -2700,14 +2754,19 @@ // this thread's jvmti state was created during the scope of // the event collector. } - } else { - if (is_dynamic_code_event()) { - if (state->get_dynamic_code_event_collector() == this) { - state->set_dynamic_code_event_collector((JvmtiDynamicCodeEventCollector *)_prev); - } else { - // this thread's jvmti state was created during the scope of - // the event collector. - } + } else if (is_dynamic_code_event()) { + if (state->get_dynamic_code_event_collector() == this) { + state->set_dynamic_code_event_collector((JvmtiDynamicCodeEventCollector *)_prev); + } else { + // this thread's jvmti state was created during the scope of + // the event collector. + } + } else if (is_sampled_object_alloc_event()) { + if (state->get_sampled_object_alloc_event_collector() == this) { + state->set_sampled_object_alloc_event_collector((JvmtiSampledObjectAllocEventCollector*)_prev); + } else { + // this thread's jvmti state was created during the scope of + // the event collector. } } } @@ -2744,34 +2803,26 @@ _code_blobs->append(new JvmtiCodeBlobDesc(name, start, end)); } -// Setup current thread to record vm allocated objects. -JvmtiVMObjectAllocEventCollector::JvmtiVMObjectAllocEventCollector() : _allocated(NULL) { - if (JvmtiExport::should_post_vm_object_alloc()) { - _enable = true; - setup_jvmti_thread_state(); - } else { - _enable = false; - } +JvmtiObjectAllocEventCollector::JvmtiObjectAllocEventCollector() : + _allocated(NULL), _enable(false), _post_callback(NULL), + _callback_for_all_oops(false) { } -// Post vm_object_alloc event for vm allocated objects visible to java -// world. -JvmtiVMObjectAllocEventCollector::~JvmtiVMObjectAllocEventCollector() { +void JvmtiObjectAllocEventCollector::generate_call_for_allocated() { if (_allocated != NULL) { set_enabled(false); for (int i = 0; i < _allocated->length(); i++) { oop obj = _allocated->at(i); - if (ServiceUtil::visible_oop(obj)) { - JvmtiExport::post_vm_object_alloc(JavaThread::current(), obj); + if (_callback_for_all_oops || ServiceUtil::visible_oop(obj)) { + _post_callback(JavaThread::current(), obj); } } delete _allocated; } - unset_jvmti_thread_state(); } -void JvmtiVMObjectAllocEventCollector::record_allocation(oop obj) { - assert(is_enabled(), "VM object alloc event collector is not enabled"); +void JvmtiObjectAllocEventCollector::record_allocation(oop obj) { + assert(is_enabled(), "Object alloc event collector is not enabled"); if (_allocated == NULL) { _allocated = new (ResourceObj::C_HEAP, mtInternal) GrowableArray(1, true); } @@ -2779,7 +2830,7 @@ } // GC support. -void JvmtiVMObjectAllocEventCollector::oops_do(OopClosure* f) { +void JvmtiObjectAllocEventCollector::oops_do(OopClosure* f) { if (_allocated != NULL) { for(int i=_allocated->length() - 1; i >= 0; i--) { if (_allocated->at(i) != NULL) { @@ -2789,7 +2840,7 @@ } } -void JvmtiVMObjectAllocEventCollector::oops_do_for_all_threads(OopClosure* f) { +void JvmtiObjectAllocEventCollector::oops_do_for_all_threads(OopClosure* f) { // no-op if jvmti not enabled if (!JvmtiEnv::environments_might_exist()) { return; @@ -2798,11 +2849,17 @@ for (JavaThreadIteratorWithHandle jtiwh; JavaThread *jthr = jtiwh.next(); ) { JvmtiThreadState *state = jthr->jvmti_thread_state(); if (state != NULL) { - JvmtiVMObjectAllocEventCollector *collector; + JvmtiObjectAllocEventCollector *collector; collector = state->get_vm_object_alloc_event_collector(); while (collector != NULL) { collector->oops_do(f); - collector = (JvmtiVMObjectAllocEventCollector *)collector->get_prev(); + collector = (JvmtiObjectAllocEventCollector*) collector->get_prev(); + } + + collector = state->get_sampled_object_alloc_event_collector(); + while (collector != NULL) { + collector->oops_do(f); + collector = (JvmtiObjectAllocEventCollector*) collector->get_prev(); } } } @@ -2837,6 +2894,36 @@ } }; +// Setup current thread to record vm allocated objects. +JvmtiVMObjectAllocEventCollector::JvmtiVMObjectAllocEventCollector() { + if (JvmtiExport::should_post_vm_object_alloc()) { + _enable = true; + _post_callback = JvmtiExport::post_vm_object_alloc; + setup_jvmti_thread_state(); + } +} + +JvmtiVMObjectAllocEventCollector::~JvmtiVMObjectAllocEventCollector() { + generate_call_for_allocated(); + unset_jvmti_thread_state(); +} + +// Setup current thread to record sampled allocated objects. +JvmtiSampledObjectAllocEventCollector::JvmtiSampledObjectAllocEventCollector() { + if (JvmtiExport::should_post_sampled_object_alloc()) { + _enable = true; + // TODO: switch it back.. + _callback_for_all_oops = false; + _post_callback = JvmtiExport::post_sampled_object_alloc; + setup_jvmti_thread_state(); + } +} + +JvmtiSampledObjectAllocEventCollector::~JvmtiSampledObjectAllocEventCollector() { + generate_call_for_allocated(); + unset_jvmti_thread_state(); +} + JvmtiGCMarker::JvmtiGCMarker() { // if there aren't any JVMTI environments then nothing to do if (!JvmtiEnv::environments_might_exist()) { --- old/src/hotspot/share/prims/jvmtiExport.hpp 2018-03-08 15:53:28.639598385 -0800 +++ new/src/hotspot/share/prims/jvmtiExport.hpp 2018-03-08 15:53:28.375599278 -0800 @@ -123,6 +123,7 @@ // breakpoint info JVMTI_SUPPORT_FLAG(should_clean_up_heap_objects) JVMTI_SUPPORT_FLAG(should_post_vm_object_alloc) + JVMTI_SUPPORT_FLAG(should_post_sampled_object_alloc) // If flag cannot be implemented, give an error if on=true static void report_unsupported(bool on); @@ -376,6 +377,17 @@ record_vm_internal_object_allocation(object); } } + + static void record_sampled_internal_object_allocation(oop object) NOT_JVMTI_RETURN; + // Post objects collected by sampled_object_alloc_event_collector. + static void post_sampled_object_alloc(JavaThread *thread, oop object) NOT_JVMTI_RETURN; + // Collects vm internal objects for later event posting. + inline static void sampled_object_alloc_event_collector(oop object) { + if (should_post_sampled_object_alloc()) { + record_sampled_internal_object_allocation(object); + } + } + inline static void post_array_size_exhausted() { if (should_post_resource_exhausted()) { post_resource_exhausted(JVMTI_RESOURCE_EXHAUSTED_OOM_ERROR, @@ -437,10 +449,13 @@ JvmtiEventCollector* _prev; // Save previous one to support nested event collector. public: + JvmtiEventCollector() : _prev(NULL) {} + void setup_jvmti_thread_state(); // Set this collector in current thread. void unset_jvmti_thread_state(); // Reset previous collector in current thread. virtual bool is_dynamic_code_event() { return false; } virtual bool is_vm_object_alloc_event(){ return false; } + virtual bool is_sampled_object_alloc_event(){ return false; } JvmtiEventCollector *get_prev() { return _prev; } }; @@ -475,21 +490,18 @@ }; -// Used to record vm internally allocated object oops and post -// vm object alloc event for objects visible to java world. -// Constructor enables JvmtiThreadState flag and all vm allocated -// objects are recorded in a growable array. When destructor is -// called the vm object alloc event is posted for each objects -// visible to java world. -// See jvm.cpp file for its usage. +// Used as a base class for object allocation collection and then posting +// the allocations to any event notification callbacks. // -class JvmtiVMObjectAllocEventCollector : public JvmtiEventCollector { - private: +class JvmtiObjectAllocEventCollector : public JvmtiEventCollector { + protected: GrowableArray* _allocated; // field to record vm internally allocated object oop. bool _enable; // This flag is enabled in constructor and disabled // in destructor before posting event. To avoid // collection of objects allocated while running java code inside - // agent post_vm_object_alloc() event handler. + // agent post_X_object_alloc() event handler. + void (*_post_callback)(JavaThread*, oop); // what callback to use when destroying the collector. + bool _callback_for_all_oops; //GC support void oops_do(OopClosure* f); @@ -502,15 +514,42 @@ static void oops_do_for_all_threads(OopClosure* f); public: - JvmtiVMObjectAllocEventCollector() NOT_JVMTI_RETURN; - ~JvmtiVMObjectAllocEventCollector() NOT_JVMTI_RETURN; - bool is_vm_object_alloc_event() { return true; } + JvmtiObjectAllocEventCollector() NOT_JVMTI_RETURN; + + void generate_call_for_allocated(); bool is_enabled() { return _enable; } void set_enabled(bool on) { _enable = on; } }; +// Used to record vm internally allocated object oops and post +// vm object alloc event for objects visible to java world. +// Constructor enables JvmtiThreadState flag and all vm allocated +// objects are recorded in a growable array. When destructor is +// called the vm object alloc event is posted for each object +// visible to java world. +// See jvm.cpp file for its usage. +// +class JvmtiVMObjectAllocEventCollector : public JvmtiObjectAllocEventCollector { + public: + JvmtiVMObjectAllocEventCollector() NOT_JVMTI_RETURN; + ~JvmtiVMObjectAllocEventCollector() NOT_JVMTI_RETURN; + virtual bool is_vm_object_alloc_event() { return true; } +}; +// Used to record sampled allocated object oops and post +// sampled object alloc event. +// Constructor enables JvmtiThreadState flag and all sampled allocated +// objects are recorded in a growable array. When destructor is +// called the sampled object alloc event is posted for each sampled object. +// See jvm.cpp file for its usage. +// +class JvmtiSampledObjectAllocEventCollector : public JvmtiObjectAllocEventCollector { + public: + JvmtiSampledObjectAllocEventCollector() NOT_JVMTI_RETURN; + ~JvmtiSampledObjectAllocEventCollector() NOT_JVMTI_RETURN; + bool is_sampled_object_alloc_event() { return true; } +}; // Marker class to disable the posting of VMObjectAlloc events // within its scope. --- old/src/hotspot/share/prims/jvmtiManageCapabilities.cpp 2018-03-08 15:53:29.459595613 -0800 +++ new/src/hotspot/share/prims/jvmtiManageCapabilities.cpp 2018-03-08 15:53:29.255596302 -0800 @@ -158,6 +158,7 @@ jc.can_generate_field_access_events = 1; jc.can_generate_breakpoint_events = 1; jc.can_sample_heap = 1; + jc.can_generate_sampled_object_alloc_events = 1; return jc; } @@ -426,6 +427,8 @@ log_trace(jvmti)("can_generate_breakpoint_events"); if (cap->can_sample_heap) log_trace(jvmti)("can_sample_heap"); + if (cap->can_generate_sampled_object_alloc_events) + log_trace(jvmti)("can_generate_sampled_object_alloc_events"); if (cap->can_suspend) log_trace(jvmti)("can_suspend"); if (cap->can_redefine_any_class ) --- old/src/hotspot/share/prims/jvmtiThreadState.cpp 2018-03-08 15:53:30.259592908 -0800 +++ new/src/hotspot/share/prims/jvmtiThreadState.cpp 2018-03-08 15:53:30.007593760 -0800 @@ -60,6 +60,7 @@ _head_env_thread_state = NULL; _dynamic_code_event_collector = NULL; _vm_object_alloc_event_collector = NULL; + _sampled_object_alloc_event_collector = NULL; _the_class_for_redefinition_verification = NULL; _scratch_class_for_redefinition_verification = NULL; _cur_stack_depth = UNKNOWN_STACK_DEPTH; --- old/src/hotspot/share/prims/jvmtiThreadState.hpp 2018-03-08 15:53:31.111590028 -0800 +++ new/src/hotspot/share/prims/jvmtiThreadState.hpp 2018-03-08 15:53:30.863590866 -0800 @@ -113,6 +113,8 @@ JvmtiDynamicCodeEventCollector* _dynamic_code_event_collector; // holds the current vm object alloc event collector, NULL if no event collector in use JvmtiVMObjectAllocEventCollector* _vm_object_alloc_event_collector; + // holds the current sampled object alloc event collector, NULL if no event collector in use + JvmtiSampledObjectAllocEventCollector* _sampled_object_alloc_event_collector; // Should only be created by factory methods JvmtiThreadState(JavaThread *thread); @@ -314,12 +316,18 @@ JvmtiVMObjectAllocEventCollector* get_vm_object_alloc_event_collector() { return _vm_object_alloc_event_collector; } + JvmtiSampledObjectAllocEventCollector* get_sampled_object_alloc_event_collector() { + return _sampled_object_alloc_event_collector; + } void set_dynamic_code_event_collector(JvmtiDynamicCodeEventCollector* collector) { _dynamic_code_event_collector = collector; } void set_vm_object_alloc_event_collector(JvmtiVMObjectAllocEventCollector* collector) { _vm_object_alloc_event_collector = collector; } + void set_sampled_object_alloc_event_collector(JvmtiSampledObjectAllocEventCollector* collector) { + _sampled_object_alloc_event_collector = collector; + } // --- old/src/hotspot/share/runtime/heapMonitoring.cpp 2018-03-08 15:53:31.947587202 -0800 +++ new/src/hotspot/share/runtime/heapMonitoring.cpp 2018-03-08 15:53:31.683588094 -0800 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2017, Google and/or its affiliates. All rights reserved. + * Copyright (c) 2018, Google and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -221,7 +221,8 @@ public: // The function that gets called to add a trace to the list of // traces we are maintaining. - void add_trace(jvmtiAllocTraceInfo* trace, oop o); + // Returns if the trace got added or not. + bool add_trace(jvmtiAllocTraceInfo* trace, oop o); // The function that gets called by the client to retrieve the list // of stack traces. Passes a jvmtiAllocTraceInfo which will get mutated. @@ -280,11 +281,6 @@ _stats.sample_rate_count++; } - bool initialized() { - return OrderAccess::load_acquire(&_initialized) != 0; - return _initialized; - } - private: // The traces currently sampled. GrowableArray* _allocated_traces; @@ -305,7 +301,7 @@ int _max_gc_storage; static StackTraceStorage* internal_storage; - int _initialized; + bool _initialized; // Support functions and classes for copying data to the external // world. @@ -392,7 +388,7 @@ _recent_garbage_traces = NULL; _frequent_garbage_traces = NULL; _max_gc_storage = 0; - OrderAccess::release_store(&_initialized, 0); + _initialized = false; } void StackTraceStorage::free_garbage() { @@ -425,7 +421,7 @@ } void StackTraceStorage::free_storage() { - if (!initialized()) { + if (!_initialized) { return; } @@ -440,7 +436,6 @@ } StackTraceStorage::~StackTraceStorage() { - MutexLocker mu(HeapMonitorStorage_lock); free_storage(); } @@ -450,7 +445,7 @@ "This should not be accessed concurrently"); // In case multiple threads got locked and then 1 by 1 got through. - if (initialized()) { + if (_initialized) { return; } @@ -464,26 +459,37 @@ _max_gc_storage = max_gc_storage; memset(&_stats, 0, sizeof(_stats)); - OrderAccess::release_store(&_initialized, 1); + _initialized = true; } -void StackTraceStorage::add_trace(jvmtiAllocTraceInfo* trace, oop o) { +bool StackTraceStorage::add_trace(jvmtiAllocTraceInfo* trace, oop o) { MutexLocker mu(HeapMonitorStorage_lock); - // Last minute check on initialization here in case: - // Between the moment object_alloc_do_sample's check for initialization - // and now, there was a stop() that deleted the data. - if (initialized()) { - StackTraceDataWithOop new_data(trace, o); - _stats.sample_count++; - _stats.stack_depth_accumulation += trace->stack_info->frame_count; - _allocated_traces->append(new_data); + // Last minute check on initialization here in case the system got turned off + // and a few sample requests got through to here, ie: + // A sample point happened in TLAB, that code checks for + // HeapMonitoring::enabled and calls object_alloc_do_sample. + // The code starts getting a stacktrace and then calls the this add_trace. + // + // At the same time, another thread has turned off HeapMonitoring while the + // stacktraces were getting constructed and disables StackTraceStorage. + // + // Both disabling and this add_trace are protected by the same + // HeapMonitorStorage_lock mutex. + if (!_initialized) { + return false; } + + StackTraceDataWithOop new_data(trace, o); + _stats.sample_count++; + _stats.stack_depth_accumulation += trace->stack_info->frame_count; + _allocated_traces->append(new_data); + return true; } void StackTraceStorage::weak_oops_do(BoolObjectClosure* is_alive, OopClosure* f) { size_t count = 0; - if (initialized()) { + if (_initialized) { int len = _allocated_traces->length(); _traces_on_last_full_gc->clear(); @@ -849,59 +855,56 @@ void HeapMonitoring::object_alloc_do_sample(Thread* t, oopDesc* o, size_t byte_size) { JavaThread* thread = static_cast(t); - if (StackTraceStorage::storage()->initialized()) { - assert(t->is_Java_thread(), "non-Java thread passed to do_sample"); - JavaThread* thread = static_cast(t); + assert(t->is_Java_thread(), "non-Java thread passed to do_sample"); - jvmtiAllocTraceInfo* trace = NEW_C_HEAP_OBJ(jvmtiAllocTraceInfo, mtInternal); - if (trace == NULL) { - return; - } + jvmtiAllocTraceInfo* trace = NEW_C_HEAP_OBJ(jvmtiAllocTraceInfo, mtInternal); + if (trace == NULL) { + return; + } - jvmtiStackInfo* stack_info = NEW_C_HEAP_OBJ(jvmtiStackInfo, mtInternal); - if (trace == NULL) { - FREE_C_HEAP_OBJ(trace); - return; - } - trace->stack_info = stack_info; + jvmtiStackInfo* stack_info = NEW_C_HEAP_OBJ(jvmtiStackInfo, mtInternal); + if (trace == NULL) { + FREE_C_HEAP_OBJ(trace); + return; + } + trace->stack_info = stack_info; - jvmtiFrameInfo* frames = - NEW_C_HEAP_ARRAY(jvmtiFrameInfo, MaxStackDepth, mtInternal); + jvmtiFrameInfo* frames = + NEW_C_HEAP_ARRAY(jvmtiFrameInfo, MaxStackDepth, mtInternal); - if (frames == NULL) { - FREE_C_HEAP_OBJ(stack_info); - FREE_C_HEAP_OBJ(trace); - return; - } - stack_info->frame_buffer = frames; - stack_info->frame_count = 0; + if (frames == NULL) { + FREE_C_HEAP_OBJ(stack_info); + FREE_C_HEAP_OBJ(trace); + return; + } + stack_info->frame_buffer = frames; + stack_info->frame_count = 0; - trace->thread_id = SharedRuntime::get_java_tid(thread); - trace->size = byte_size; + trace->thread_id = SharedRuntime::get_java_tid(thread); + trace->size = byte_size; - if (thread->has_last_Java_frame()) { // just to be safe - vframeStream vfst(thread, true); - int count = 0; - while (!vfst.at_end() && count < MaxStackDepth) { - Method* m = vfst.method(); - frames[count].location = vfst.bci(); - frames[count].method = m->jmethod_id(); - count++; + if (thread->has_last_Java_frame()) { // just to be safe + vframeStream vfst(thread, true); + int count = 0; + while (!vfst.at_end() && count < MaxStackDepth) { + Method* m = vfst.method(); + frames[count].location = vfst.bci(); + frames[count].method = m->jmethod_id(); + count++; - vfst.next(); - } - stack_info->frame_count = count; + vfst.next(); } + stack_info->frame_count = count; + } - if (stack_info->frame_count > 0) { - // Success! - StackTraceStorage::storage()->add_trace(trace, o); + if (stack_info->frame_count > 0) { + if (StackTraceStorage::storage()->add_trace(trace, o)) { return; } - - // Failure! - FREE_C_HEAP_ARRAY(jvmtiFrameInfo, frames); - FREE_C_HEAP_OBJ(stack_info); - FREE_C_HEAP_OBJ(trace); } + + // Failure! + FREE_C_HEAP_ARRAY(jvmtiFrameInfo, frames); + FREE_C_HEAP_OBJ(stack_info); + FREE_C_HEAP_OBJ(trace); } --- old/src/hotspot/share/runtime/heapMonitoring.hpp 2018-03-08 15:53:32.819584256 -0800 +++ new/src/hotspot/share/runtime/heapMonitoring.hpp 2018-03-08 15:53:32.559585134 -0800 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2017, Google and/or its affiliates. All rights reserved. + * Copyright (c) 2018, Google and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it --- old/src/hotspot/share/runtime/thread.cpp 2018-03-08 15:53:33.567581727 -0800 +++ new/src/hotspot/share/runtime/thread.cpp 2018-03-08 15:53:33.315582579 -0800 @@ -233,6 +233,7 @@ set_active_handles(NULL); set_free_handle_block(NULL); set_last_handle_mark(NULL); + _heap_sampler.set_thread(this); // This initial value ==> never claimed. _oops_do_parity = 0; @@ -261,7 +262,6 @@ omFreeProvision = 32; omInUseList = NULL; omInUseCount = 0; - _bytes_until_sample = 0; #ifdef ASSERT _visited_for_critical_count = false; @@ -5011,40 +5011,6 @@ } } -void Thread::pick_next_sample(size_t overflowed_bytes) { - HeapMonitoring::pick_next_sample(&_bytes_until_sample); - - // Try to correct sample size by removing extra space from last allocation. - if (overflowed_bytes > 0 && _bytes_until_sample > overflowed_bytes) { - _bytes_until_sample -= overflowed_bytes; - } -} - -void Thread::check_for_sampling(HeapWord* ptr, size_t allocation_size, size_t bytes_since_allocation) { - oopDesc* oop = reinterpret_cast(ptr); - size_t total_allocated_bytes = bytes_since_allocation + allocation_size; - - // If not yet time for a sample, skip it. - if (total_allocated_bytes < _bytes_until_sample) { - _bytes_until_sample -= total_allocated_bytes; - return; - } - - HeapMonitoring::object_alloc_do_sample(this, oop, allocation_size); - - size_t overflow_bytes = total_allocated_bytes - _bytes_until_sample; - pick_next_sample(overflow_bytes); -} - -size_t Thread::bytes_until_sample() { - if (!_bytes_until_sample) { - pick_next_sample(); - } - - assert(_bytes_until_sample != 0, "Sampling size should never be 0"); - return _bytes_until_sample; -} - void Threads::verify() { ALL_JAVA_THREADS(p) { p->verify(); --- old/src/hotspot/share/runtime/thread.hpp 2018-03-08 15:53:34.475578659 -0800 +++ new/src/hotspot/share/runtime/thread.hpp 2018-03-08 15:53:34.227579497 -0800 @@ -40,6 +40,7 @@ #include "runtime/park.hpp" #include "runtime/safepoint.hpp" #include "runtime/stubRoutines.hpp" +#include "runtime/threadHeapSampler.hpp" #include "runtime/threadLocalStorage.hpp" #include "runtime/thread_ext.hpp" #include "runtime/unhandledOops.hpp" @@ -322,8 +323,8 @@ ThreadLocalAllocBuffer _tlab; // Thread-local eden jlong _allocated_bytes; // Cumulative number of bytes allocated on - size_t _bytes_until_sample; // bytes until sample. // the Java heap + ThreadHeapSampler _heap_sampler; // For use when sampling the memory. mutable TRACE_DATA _trace_data; // Thread-local data for tracing @@ -439,8 +440,6 @@ inline void set_trace_flag(); inline void clear_trace_flag(); - void pick_next_sample(size_t diff = 0); - // Support for Unhandled Oop detection // Add the field for both, fastdebug and debug, builds to keep // Thread's fields layout the same. @@ -507,9 +506,7 @@ void incr_allocated_bytes(jlong size) { _allocated_bytes += size; } inline jlong cooked_allocated_bytes(); - size_t bytes_until_sample(); - void set_bytes_until_sample(size_t bytes) { _bytes_until_sample = bytes; } - void check_for_sampling(HeapWord* obj, size_t size_in_bytes, size_t bytes_allocated_before = 0); + ThreadHeapSampler& heap_sampler() { return _heap_sampler; } TRACE_DEFINE_THREAD_TRACE_DATA_OFFSET; TRACE_DATA* trace_data() const { return &_trace_data; } --- old/test/hotspot/jtreg/serviceability/jvmti/HeapMonitor/MyPackage/HeapMonitor.java 2018-03-08 15:53:35.259576009 -0800 +++ new/test/hotspot/jtreg/serviceability/jvmti/HeapMonitor/MyPackage/HeapMonitor.java 2018-03-08 15:53:35.019576821 -0800 @@ -147,4 +147,8 @@ public static void freeStorage() { arrays = null; } + + public native static boolean obtainedEvents(Frame[] frames); + public native static boolean eventStorageIsEmpty(); + public native static void resetEventStorage(); } --- old/test/hotspot/jtreg/serviceability/jvmti/HeapMonitor/MyPackage/HeapMonitorCachedTest.java 2018-03-08 15:53:36.087573212 -0800 +++ new/test/hotspot/jtreg/serviceability/jvmti/HeapMonitor/MyPackage/HeapMonitorCachedTest.java 2018-03-08 15:53:35.851574009 -0800 @@ -36,7 +36,7 @@ public class HeapMonitorCachedTest { private static native boolean cachedAndLiveAreSame(); - private static native void getLiveTracesToForceGc(); + private static native boolean forceGC(); private static native long getCachedHashCode(); public static void main(String[] args) { @@ -52,7 +52,8 @@ } // Check cached & live are the same after a GC. - getLiveTracesToForceGc(); + System.gc(); + status = cachedAndLiveAreSame(); if (!status) { throw new RuntimeException("Cached frames and live frames are not the same."); --- old/test/hotspot/jtreg/serviceability/jvmti/HeapMonitor/libHeapMonitor.c 2018-03-08 15:53:36.959570266 -0800 +++ new/test/hotspot/jtreg/serviceability/jvmti/HeapMonitor/libHeapMonitor.c 2018-03-08 15:53:36.703571131 -0800 @@ -21,6 +21,8 @@ * questions. */ +#include +#include #include #include #include @@ -49,6 +51,108 @@ static const char *EXC_CNAME = "java/lang/Exception"; static jvmtiEnv *jvmti = NULL; +static pthread_mutex_t event_data_lock; + +// Event storage code. + +typedef struct _LiveObjectTrace{ + jvmtiFrameInfo* frames; + size_t frame_count; +} LiveObjectTrace; + +typedef struct _EventStorage { + int live_object_size; + int live_object_count; + LiveObjectTrace** live_objects; +} EventStorage; + +typedef struct _ExpectedContentFrame { + const char *name; + const char *signature; + const char *file_name; + int line_number; +} ExpectedContentFrame; + +static jboolean check_live_object_trace_content( + JNIEnv *env, LiveObjectTrace* trace, ExpectedContentFrame *expected, + size_t expected_count, int print_out_comparisons); + +static EventStorage global_event_storage; + +static int event_storage_get_count(EventStorage* storage) { + return storage->live_object_count; +} + +static jboolean event_storage_contains(JNIEnv* env, + EventStorage* storage, + ExpectedContentFrame* frames, + size_t size) { + int i; + fprintf(stderr, "Event storage contains: %d\n", storage->live_object_count); + for (i = 0; i < storage->live_object_count; i++) { + LiveObjectTrace* trace = storage->live_objects[i]; + + if (check_live_object_trace_content(env, trace, frames, size, PRINT_OUT)) { + return TRUE; + } + } + return FALSE; +} + +static void event_storage_augment_storage(EventStorage* storage) { + int new_max = (storage->live_object_size * 2) + 1; + LiveObjectTrace** new_objects = malloc(new_max * sizeof(*new_objects)); + + int current_count = storage->live_object_count; + memcpy(new_objects, storage->live_objects, current_count * sizeof(*new_objects)); + free(storage->live_objects); + storage->live_objects = new_objects; + + storage->live_object_size = new_max; +} + +static void event_storage_add(EventStorage* storage, + jthread thread, + jobject object, + jclass klass, + jlong size) { + pthread_mutex_lock(&event_data_lock); + jvmtiFrameInfo frames[64]; + jint count; + jvmtiError err; + err = (*jvmti)->GetStackTrace(jvmti, thread, 0, 64, frames, &count); + if (err == JVMTI_ERROR_NONE && count >= 1) { + if (storage->live_object_count >= storage->live_object_size) { + event_storage_augment_storage(storage); + } + assert(storage->live_object_count < storage->live_object_size); + + jvmtiFrameInfo* allocated_frames = malloc(count * sizeof(*allocated_frames)); + memcpy(allocated_frames, frames, count * sizeof(*allocated_frames)); + + LiveObjectTrace* live_object = malloc(sizeof(*live_object)); + live_object->frames = allocated_frames; + live_object->frame_count = count; + storage->live_objects[storage->live_object_count] = live_object; + storage->live_object_count++; + } + pthread_mutex_unlock(&event_data_lock); +} + +static void event_storage_reset(EventStorage* storage) { + pthread_mutex_lock(&event_data_lock); + int max = storage->live_object_count; + int i; + for (i = 0; i < max; i++) { + LiveObjectTrace* object = storage->live_objects[i]; + free(object); + } + free(storage->live_objects); + memset(storage, 0, sizeof(*storage)); + pthread_mutex_unlock(&event_data_lock); +} + +// General JVMTI agent code. static int check_error(jvmtiError err, const char *s) { if (err != JVMTI_ERROR_NONE) { @@ -97,6 +201,17 @@ return JNI_VERSION_1_8; } +JNIEXPORT +void JNICALL SampledObjectAlloc(jvmtiEnv *jvmti_env, + JNIEnv* jni_env, + jthread thread, + jobject object, + jclass object_klass, + jlong size) { + // Not optimal to do this at the callback but makes testing easier for now. + event_storage_add(&global_event_storage, thread, object, object_klass, size); +} + static jint Agent_Initialize(JavaVM *jvm, char *options, void *reserved) { jint res; @@ -110,22 +225,31 @@ jvmtiEventCallbacks callbacks; memset(&callbacks, 0, sizeof(callbacks)); + callbacks.SampledObjectAlloc = &SampledObjectAlloc; jvmtiCapabilities caps; memset(&caps, 0, sizeof(caps)); // Get line numbers, sample heap, and filename for the test. caps.can_get_line_numbers = 1; caps.can_sample_heap = 1; + caps.can_generate_sampled_object_alloc_events = 1; caps.can_get_source_file_name = 1; if (check_error((*jvmti)->AddCapabilities(jvmti, &caps), "Add capabilities\n")){ return JNI_ERR; } + if (check_error((*jvmti)->SetEventCallbacks(jvmti, &callbacks, sizeof(jvmtiEventCallbacks)), " Set Event Callbacks")) { return JNI_ERR; } + + + if (pthread_mutex_init(&event_data_lock, NULL) != 0) { + return JNI_ERR; + } + return JNI_OK; } @@ -170,28 +294,12 @@ } } -typedef struct _ExpectedContentFrame { - const char *name; - const char *signature; - const char *file_name; - int line_number; -} ExpectedContentFrame; - -static jboolean check_sample_content(JNIEnv *env, - jvmtiAllocTraceInfo* trace, - ExpectedContentFrame *expected, - int expected_count, - int print_out_comparisons) { +static jboolean check_frame_content(JNIEnv *env, + jvmtiFrameInfo* frames, + ExpectedContentFrame *expected, + int expected_count, + int print_out_comparisons) { int i; - - jvmtiStackInfo* stack_info = trace->stack_info; - - if (expected_count > stack_info->frame_count) { - return FALSE; - } - - jvmtiFrameInfo* frames = stack_info->frame_buffer; - for (i = 0; i < expected_count; i++) { // Get basic information out of the trace. int bci = frames[i].location; @@ -255,6 +363,33 @@ return TRUE; } +static jboolean check_live_object_trace_content( + JNIEnv *env, LiveObjectTrace* trace, ExpectedContentFrame *expected, + size_t expected_count, int print_out_comparisons) { + + if (expected_count > trace->frame_count) { + return FALSE; + } + + return check_frame_content(env, trace->frames, + expected, expected_count, print_out_comparisons); +} + +static jboolean check_sample_content(JNIEnv *env, + jvmtiAllocTraceInfo* trace, + ExpectedContentFrame *expected, + int expected_count, + int print_out_comparisons) { + jvmtiStackInfo* stack_info = trace->stack_info; + + if (expected_count > stack_info->frame_count) { + return FALSE; + } + + return check_frame_content(env, stack_info->frame_buffer, + expected, expected_count, print_out_comparisons); +} + static jboolean compare_samples(JNIEnv* env, jvmtiAllocTraceInfo* traces, int trace_count, ExpectedContentFrame* expected_content, @@ -473,11 +608,24 @@ int max_traces) { check_error((*jvmti)->StartHeapSampling(jvmti, rate, max_traces), "Start Heap Sampling"); + check_error( + (*jvmti)->SetEventNotificationMode(jvmti, + JVMTI_ENABLE, + JVMTI_EVENT_SAMPLED_OBJECT_ALLOC, + NULL), + "Start sampling events"); } JNIEXPORT void JNICALL Java_MyPackage_HeapMonitor_disableSampling(JNIEnv *env, jclass cls) { check_error((*jvmti)->StopHeapSampling(jvmti), "Stop Heap Sampling"); + + check_error( + (*jvmti)->SetEventNotificationMode(jvmti, + JVMTI_DISABLE, + JVMTI_EVENT_SAMPLED_OBJECT_ALLOC, + NULL), + "Start sampling events"); } JNIEXPORT jboolean JNICALL @@ -762,6 +910,8 @@ int other_trace_count, int print_out_comparisons) { if (trace_count != other_trace_count) { + fprintf(stderr, "Trace count not the same!\n %d %d", + trace_count, other_trace_count); return FALSE; } @@ -774,20 +924,24 @@ jvmtiStackInfo* other_stack_info = trace->stack_info; if (stack_info->frame_count != other_stack_info->frame_count) { + fprintf(stderr, "Frame count not the same!\n"); return FALSE; } if (trace->size != other_trace->size) { + fprintf(stderr, "Size not the same!\n"); return FALSE; } if (trace->thread_id != other_trace->thread_id) { + fprintf(stderr, "Thread id not the same!\n"); return FALSE; } jvmtiFrameInfo* frames = stack_info->frame_buffer; jvmtiFrameInfo* other_frames = other_stack_info->frame_buffer; if (memcmp(frames, other_frames, sizeof(*frames) * stack_info->frame_count)) { + fprintf(stderr, "memcmp not the same!\n"); return FALSE; } } @@ -831,6 +985,16 @@ return result; } +JNIEXPORT jboolean JNICALL +Java_MyPackage_HeapMonitorCachedTest_forceGC(JNIEnv *env, jclass cls) { + jvmtiError error = (*jvmti)->ForceGarbageCollection(jvmti); + + if (error != JVMTI_ERROR_NONE) { + return FALSE; + } + return TRUE; +} + static long hash(long hash_code, long value) { return hash_code * 31 + value; } @@ -891,6 +1055,43 @@ return !check_and(env, frames, FALSE, FALSE, TRUE, PRINT_OUT); } +JNIEXPORT jboolean JNICALL +Java_MyPackage_HeapMonitorEventNoCapabilityTest_eventSamplingFail(JNIEnv *env, + jclass cls) { + jvmtiCapabilities caps; + memset(&caps, 0, sizeof(caps)); + caps.can_generate_sampled_object_alloc_events = 1; + if (check_error((*jvmti)->RelinquishCapabilities(jvmti, &caps), + "Add capabilities\n")){ + return FALSE; + } + + if (check_capability_error( + (*jvmti)->SetEventNotificationMode(jvmti, JVMTI_ENABLE, JVMTI_EVENT_SAMPLED_OBJECT_ALLOC, NULL), + "Set Tlab Heap Sampling")) { + return FALSE; + } + return TRUE; +} + +JNIEXPORT jboolean JNICALL +Java_MyPackage_HeapMonitor_eventStorageIsEmpty(JNIEnv* env, jclass cls) { + return event_storage_get_count(&global_event_storage) == 0; +} + +JNIEXPORT jboolean JNICALL +Java_MyPackage_HeapMonitor_obtainedEvents(JNIEnv* env, jclass cls, jobjectArray frames) { + jsize size = (*env)->GetArrayLength(env, frames); + ExpectedContentFrame native_frames[size]; + fill_native_frames(env, frames, native_frames, size); + return event_storage_contains(env, &global_event_storage, native_frames, size); +} + +JNIEXPORT void JNICALL +Java_MyPackage_HeapMonitor_resetEventStorage(JNIEnv* env, jclass cls) { + return event_storage_reset(&global_event_storage); +} + #ifdef __cplusplus } #endif --- /dev/null 2018-03-08 15:11:46.002676589 -0800 +++ new/src/hotspot/share/runtime/threadHeapSampler.cpp 2018-03-08 15:53:37.515568388 -0800 @@ -0,0 +1,53 @@ +/* + * Copyright (c) 2018, Google and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#include "runtime/sharedRuntime.hpp" +#include "runtime/threadHeapSampler.hpp" +#include "runtime/heapMonitoring.hpp" + +void ThreadHeapSampler::pick_next_sample(size_t overflowed_bytes) { + HeapMonitoring::pick_next_sample(&_bytes_until_sample); + + // Try to correct sample size by removing extra space from last allocation. + if (overflowed_bytes > 0 && _bytes_until_sample > overflowed_bytes) { + _bytes_until_sample -= overflowed_bytes; + } +} + +void ThreadHeapSampler::check_for_sampling(HeapWord* ptr, size_t allocation_size, size_t bytes_since_allocation) { + oopDesc* oop = reinterpret_cast(ptr); + size_t total_allocated_bytes = bytes_since_allocation + allocation_size; + + // If not yet time for a sample, skip it. + if (total_allocated_bytes < _bytes_until_sample) { + _bytes_until_sample -= total_allocated_bytes; + return; + } + + HeapMonitoring::object_alloc_do_sample(_thread, oop, allocation_size); + JvmtiExport::sampled_object_alloc_event_collector(oop); + + size_t overflow_bytes = total_allocated_bytes - _bytes_until_sample; + pick_next_sample(overflow_bytes); +} --- /dev/null 2018-03-08 15:11:46.002676589 -0800 +++ new/src/hotspot/share/runtime/threadHeapSampler.hpp 2018-03-08 15:53:38.371565497 -0800 @@ -0,0 +1,49 @@ +/* + * Copyright (c) 2018, Google and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#ifndef RUNTIME_THREADHEAPSAMPLER_HPP +#define RUNTIME_THREADHEAPSAMPLER_HPP + +#include "memory/allocation.hpp" + +class ThreadHeapSampler { + private: + size_t _bytes_until_sample; + Thread* _thread; + + void pick_next_sample(size_t diff = 0); + + public: + ThreadHeapSampler() : _bytes_until_sample(0), _thread(NULL) { + } + + void set_thread(Thread* t) { _thread = t; } + + size_t bytes_until_sample() { return _bytes_until_sample; } + void set_bytes_until_sample(size_t bytes) { _bytes_until_sample = bytes; } + + void check_for_sampling(HeapWord* obj, size_t size_in_bytes, size_t bytes_allocated_before = 0); +}; + +#endif // SHARE_RUNTIME_THREADHEAPSAMPLER_HPP --- /dev/null 2018-03-08 15:11:46.002676589 -0800 +++ new/test/hotspot/jtreg/serviceability/jvmti/HeapMonitor/MyPackage/HeapMonitorEventNoCapabilityTest.java 2018-03-08 15:53:39.243562551 -0800 @@ -0,0 +1,54 @@ +/* + * Copyright (c) 2018, Google and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +package MyPackage; + +/** + * @test + * @summary Verifies the JVMTI Heap Monitor does not work without the required capability. + * @compile HeapMonitorEventNoCapabilityTest.java + * @run main/othervm/native -agentlib:HeapMonitor MyPackage.HeapMonitorEventNoCapabilityTest + */ + +public class HeapMonitorEventNoCapabilityTest { + static { + try { + System.loadLibrary("HeapMonitor"); + } catch (UnsatisfiedLinkError ule) { + System.err.println("Could not load HeapMonitor library"); + System.err.println("java.library.path: " + + System.getProperty("java.library.path")); + throw ule; + } + } + + private native static int eventSamplingFail(); + + public static void main(String[] args) { + int result = eventSamplingFail(); + + if (result == 0) { + throw new RuntimeException("Some methods could be called without a capability."); + } + } +} --- /dev/null 2018-03-08 15:11:46.002676589 -0800 +++ new/test/hotspot/jtreg/serviceability/jvmti/HeapMonitor/MyPackage/HeapMonitorEventTest.java 2018-03-08 15:53:40.255559134 -0800 @@ -0,0 +1,65 @@ +/* + * Copyright (c) 2018, Google and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +package MyPackage; + +import java.util.List; + +/** + * @test + * @summary Verifies the JVMTI Heap Monitor API + * @build Frame HeapMonitor + * @compile HeapMonitorEventTest.java + * @run main/othervm/native -agentlib:HeapMonitor MyPackage.HeapMonitorEventTest + */ + +public class HeapMonitorEventTest { + + private static native boolean framesAreNotLive(Frame[] frames); + + public static void main(String[] args) { + if (!HeapMonitor.eventStorageIsEmpty()) { + throw new RuntimeException("Storage is not empty at test start..."); + } + + HeapMonitor.enableSampling(); + List frameList = HeapMonitor.allocate(); + frameList.add(new Frame("main", "([Ljava/lang/String;)V", "HeapMonitorEventTest.java", 46)); + + Frame[] frames = frameList.toArray(new Frame[0]); + if (!HeapMonitor.obtainedEvents(frames)) { + throw new RuntimeException("Events not found with the right frames."); + } + + HeapMonitor.disableSampling(); + HeapMonitor.resetEventStorage(); + if (!HeapMonitor.eventStorageIsEmpty()) { + throw new RuntimeException("Storage is not empty after reset."); + } + + HeapMonitor.allocate(); + if (!HeapMonitor.eventStorageIsEmpty()) { + throw new RuntimeException("Storage is not empty after allocation while disabled."); + } + } +}