--- old/src/hotspot/cpu/aarch64/macroAssembler_aarch64.cpp 2018-02-12 20:04:54.491807156 -0800 +++ new/src/hotspot/cpu/aarch64/macroAssembler_aarch64.cpp 2018-02-12 20:04:54.239808126 -0800 @@ -4076,7 +4076,7 @@ } else { lea(end, Address(obj, var_size_in_bytes)); } - ldr(rscratch1, Address(rthread, JavaThread::tlab_end_offset())); + ldr(rscratch1, Address(rthread, JavaThread::tlab_current_end_offset())); cmp(end, rscratch1); br(Assembler::HI, slow_case); @@ -4106,7 +4106,7 @@ } ldr(top, Address(rthread, in_bytes(JavaThread::tlab_top_offset()))); - ldr(t1, Address(rthread, in_bytes(JavaThread::tlab_end_offset()))); + ldr(t1, Address(rthread, in_bytes(JavaThread::tlab_current_end_offset()))); // calculate amount of free space sub(t1, t1, top); @@ -4200,7 +4200,7 @@ str(top, Address(rthread, in_bytes(JavaThread::tlab_top_offset()))); add(top, top, t1); sub(top, top, (int32_t)ThreadLocalAllocBuffer::alignment_reserve_in_bytes()); - str(top, Address(rthread, in_bytes(JavaThread::tlab_end_offset()))); + str(top, Address(rthread, in_bytes(JavaThread::tlab_current_end_offset()))); if (ZeroTLAB) { // This is a fast TLAB refill, therefore the GC is not notified of it. @@ -4347,7 +4347,7 @@ should_not_reach_here(); bind(next); - ldr(rscratch2, Address(rthread, in_bytes(JavaThread::tlab_end_offset()))); + ldr(rscratch2, Address(rthread, in_bytes(JavaThread::tlab_current_end_offset()))); ldr(rscratch1, Address(rthread, in_bytes(JavaThread::tlab_top_offset()))); cmp(rscratch2, rscratch1); br(Assembler::HS, ok); --- old/src/hotspot/cpu/arm/macroAssembler_arm.cpp 2018-02-12 20:04:55.415803597 -0800 +++ new/src/hotspot/cpu/arm/macroAssembler_arm.cpp 2018-02-12 20:04:55.167804552 -0800 @@ -1309,7 +1309,7 @@ assert_different_registers(obj, obj_end, tlab_end); ldr(obj, Address(Rthread, JavaThread::tlab_top_offset())); - ldr(tlab_end, Address(Rthread, JavaThread::tlab_end_offset())); + ldr(tlab_end, Address(Rthread, JavaThread::tlab_current_end_offset())); add_rc(obj_end, obj, size_expression); cmp(obj_end, tlab_end); b(slow_case, hi); @@ -1327,7 +1327,7 @@ InlinedAddress intArrayKlass_addr((address)Universe::intArrayKlassObj_addr()); Label discard_tlab, do_refill; ldr(top, Address(Rthread, JavaThread::tlab_top_offset())); - ldr(tmp1, Address(Rthread, JavaThread::tlab_end_offset())); + ldr(tmp1, Address(Rthread, JavaThread::tlab_current_end_offset())); ldr(tmp2, Address(Rthread, JavaThread::tlab_refill_waste_limit_offset())); // Calculate amount of free space @@ -1397,7 +1397,7 @@ #endif sub(tmp1, tmp1, ThreadLocalAllocBuffer::alignment_reserve_in_bytes()); - str(tmp1, Address(Rthread, JavaThread::tlab_end_offset())); + str(tmp1, Address(Rthread, JavaThread::tlab_current_end_offset())); if (ZeroTLAB) { // clobbers start and tmp --- old/src/hotspot/cpu/arm/templateTable_arm.cpp 2018-02-12 20:04:56.303800177 -0800 +++ new/src/hotspot/cpu/arm/templateTable_arm.cpp 2018-02-12 20:04:56.043801178 -0800 @@ -4408,7 +4408,7 @@ assert_different_registers(Robj, Rsize, Rklass, Rtlab_top, Rtlab_end); __ ldr(Robj, Address(Rthread, JavaThread::tlab_top_offset())); - __ ldr(Rtlab_end, Address(Rthread, in_bytes(JavaThread::tlab_end_offset()))); + __ ldr(Rtlab_end, Address(Rthread, in_bytes(JavaThread::tlab_current_end_offset()))); __ add(Rtlab_top, Robj, Rsize); __ cmp(Rtlab_top, Rtlab_end); __ b(slow_case, hi); --- old/src/hotspot/cpu/ppc/macroAssembler_ppc.cpp 2018-02-12 20:04:57.179796803 -0800 +++ new/src/hotspot/cpu/ppc/macroAssembler_ppc.cpp 2018-02-12 20:04:56.911797835 -0800 @@ -2309,7 +2309,7 @@ //verify_tlab(); not implemented ld(obj, in_bytes(JavaThread::tlab_top_offset()), R16_thread); - ld(R0, in_bytes(JavaThread::tlab_end_offset()), R16_thread); + ld(R0, in_bytes(JavaThread::tlab_current_end_offset()), R16_thread); if (var_size_in_bytes == noreg) { addi(new_top, obj, con_size_in_bytes); } else { --- old/src/hotspot/cpu/ppc/templateTable_ppc_64.cpp 2018-02-12 20:04:58.067793383 -0800 +++ new/src/hotspot/cpu/ppc/templateTable_ppc_64.cpp 2018-02-12 20:04:57.815794353 -0800 @@ -3692,7 +3692,7 @@ // Check if we can allocate in the TLAB. __ ld(RoldTopValue, in_bytes(JavaThread::tlab_top_offset()), R16_thread); - __ ld(RendValue, in_bytes(JavaThread::tlab_end_offset()), R16_thread); + __ ld(RendValue, in_bytes(JavaThread::tlab_current_end_offset()), R16_thread); __ add(RnewTopValue, Rinstance_size, RoldTopValue); --- old/src/hotspot/cpu/s390/macroAssembler_s390.cpp 2018-02-12 20:04:58.999789794 -0800 +++ new/src/hotspot/cpu/s390/macroAssembler_s390.cpp 2018-02-12 20:04:58.731790825 -0800 @@ -2783,7 +2783,7 @@ } else { z_lay(end, Address(obj, var_size_in_bytes)); } - z_cg(end, Address(thread, JavaThread::tlab_end_offset())); + z_cg(end, Address(thread, JavaThread::tlab_current_end_offset())); branch_optimized(bcondHigh, slow_case); // Update the tlab top pointer. --- old/src/hotspot/cpu/s390/templateTable_s390.cpp 2018-02-12 20:04:59.775786805 -0800 +++ new/src/hotspot/cpu/s390/templateTable_s390.cpp 2018-02-12 20:04:59.551787668 -0800 @@ -3768,7 +3768,7 @@ Register RnewTopValue = tmp; __ z_lg(RoldTopValue, Address(Z_thread, JavaThread::tlab_top_offset())); __ load_address(RnewTopValue, Address(RoldTopValue, Rsize)); - __ z_cg(RnewTopValue, Address(Z_thread, JavaThread::tlab_end_offset())); + __ z_cg(RnewTopValue, Address(Z_thread, JavaThread::tlab_current_end_offset())); __ z_brh(slow_case); __ z_stg(RnewTopValue, Address(Z_thread, JavaThread::tlab_top_offset())); --- old/src/hotspot/cpu/sparc/macroAssembler_sparc.cpp 2018-02-12 20:05:00.619783554 -0800 +++ new/src/hotspot/cpu/sparc/macroAssembler_sparc.cpp 2018-02-12 20:05:00.363784541 -0800 @@ -3074,7 +3074,7 @@ bind(next); ld_ptr(G2_thread, in_bytes(JavaThread::tlab_top_offset()), t1); - ld_ptr(G2_thread, in_bytes(JavaThread::tlab_end_offset()), t2); + ld_ptr(G2_thread, in_bytes(JavaThread::tlab_current_end_offset()), t2); or3(t3, t2, t3); cmp_and_br_short(t1, t2, Assembler::lessEqual, Assembler::pn, next2); STOP("assert(top <= end)"); @@ -3196,7 +3196,7 @@ ld_ptr(G2_thread, in_bytes(JavaThread::tlab_top_offset()), obj); // calculate amount of free space - ld_ptr(G2_thread, in_bytes(JavaThread::tlab_end_offset()), free); + ld_ptr(G2_thread, in_bytes(JavaThread::tlab_current_end_offset()), free); sub(free, obj, free); Label done; @@ -3248,7 +3248,7 @@ } ld_ptr(G2_thread, in_bytes(JavaThread::tlab_top_offset()), top); - ld_ptr(G2_thread, in_bytes(JavaThread::tlab_end_offset()), t1); + ld_ptr(G2_thread, in_bytes(JavaThread::tlab_current_end_offset()), t1); ld_ptr(G2_thread, in_bytes(JavaThread::tlab_refill_waste_limit_offset()), t2); // calculate amount of free space @@ -3340,7 +3340,7 @@ #endif // ASSERT add(top, t1, top); // t1 is tlab_size sub(top, ThreadLocalAllocBuffer::alignment_reserve_in_bytes(), top); - st_ptr(top, G2_thread, in_bytes(JavaThread::tlab_end_offset())); + st_ptr(top, G2_thread, in_bytes(JavaThread::tlab_current_end_offset())); if (ZeroTLAB) { // This is a fast TLAB refill, therefore the GC is not notified of it. --- old/src/hotspot/cpu/sparc/templateTable_sparc.cpp 2018-02-12 20:05:01.547779980 -0800 +++ new/src/hotspot/cpu/sparc/templateTable_sparc.cpp 2018-02-12 20:05:01.243781151 -0800 @@ -3296,7 +3296,7 @@ // check if we can allocate in the TLAB __ ld_ptr(G2_thread, in_bytes(JavaThread::tlab_top_offset()), RoldTopValue); // sets up RalocatedObject - __ ld_ptr(G2_thread, in_bytes(JavaThread::tlab_end_offset()), RendValue); + __ ld_ptr(G2_thread, in_bytes(JavaThread::tlab_current_end_offset()), RendValue); __ add(RoldTopValue, Roffset, RnewTopValue); // if there is enough space, we do not CAS and do not clear --- old/src/hotspot/cpu/x86/macroAssembler_x86.cpp 2018-02-12 20:05:02.419776622 -0800 +++ new/src/hotspot/cpu/x86/macroAssembler_x86.cpp 2018-02-12 20:05:02.175777562 -0800 @@ -5585,7 +5585,7 @@ } else { lea(end, Address(obj, var_size_in_bytes, Address::times_1)); } - cmpptr(end, Address(thread, JavaThread::tlab_end_offset())); + cmpptr(end, Address(thread, JavaThread::tlab_current_end_offset())); jcc(Assembler::above, slow_case); // update the tlab top pointer @@ -5617,7 +5617,7 @@ NOT_LP64(get_thread(thread_reg)); movptr(top, Address(thread_reg, in_bytes(JavaThread::tlab_top_offset()))); - movptr(t1, Address(thread_reg, in_bytes(JavaThread::tlab_end_offset()))); + movptr(t1, Address(thread_reg, in_bytes(JavaThread::tlab_current_end_offset()))); // calculate amount of free space subptr(t1, top); @@ -5698,7 +5698,7 @@ movptr(Address(thread_reg, in_bytes(JavaThread::tlab_top_offset())), top); addptr(top, t1); subptr(top, (int32_t)ThreadLocalAllocBuffer::alignment_reserve_in_bytes()); - movptr(Address(thread_reg, in_bytes(JavaThread::tlab_end_offset())), top); + movptr(Address(thread_reg, in_bytes(JavaThread::tlab_current_end_offset())), top); if (ZeroTLAB) { // This is a fast TLAB refill, therefore the GC is not notified of it. @@ -6259,7 +6259,7 @@ should_not_reach_here(); bind(next); - movptr(t1, Address(thread_reg, in_bytes(JavaThread::tlab_end_offset()))); + movptr(t1, Address(thread_reg, in_bytes(JavaThread::tlab_current_end_offset()))); cmpptr(t1, Address(thread_reg, in_bytes(JavaThread::tlab_top_offset()))); jcc(Assembler::aboveEqual, ok); STOP("assert(top <= end)"); --- old/src/hotspot/cpu/x86/templateTable_x86.cpp 2018-02-12 20:05:03.359773003 -0800 +++ new/src/hotspot/cpu/x86/templateTable_x86.cpp 2018-02-12 20:05:03.099774003 -0800 @@ -3903,7 +3903,7 @@ if (UseTLAB) { __ movptr(rax, Address(thread, in_bytes(JavaThread::tlab_top_offset()))); __ lea(rbx, Address(rax, rdx, Address::times_1)); - __ cmpptr(rbx, Address(thread, in_bytes(JavaThread::tlab_end_offset()))); + __ cmpptr(rbx, Address(thread, in_bytes(JavaThread::tlab_current_end_offset()))); __ jcc(Assembler::above, slow_case); __ movptr(Address(thread, in_bytes(JavaThread::tlab_top_offset())), rbx); if (ZeroTLAB) { --- old/src/hotspot/share/gc/shared/collectedHeap.cpp 2018-02-12 20:05:04.183769829 -0800 +++ new/src/hotspot/share/gc/shared/collectedHeap.cpp 2018-02-12 20:05:03.927770814 -0800 @@ -302,7 +302,7 @@ } HeapWord* CollectedHeap::allocate_sampled_object(Thread* thread, size_t size) { - thread->tlab().set_back_actual_end(); + thread->tlab().set_back_allocation_end(); // The tlab could still have space after this sample. return thread->tlab().allocate(size); @@ -315,7 +315,7 @@ HeapWord* obj = NULL; if (should_sample) { // Remember the tlab end to fix up the sampling rate. - HeapWord* tlab_old_end = thread->tlab().end(); + HeapWord* tlab_old_end = thread->tlab().current_end(); obj = allocate_sampled_object(thread, size); // If we did allocate in this tlab, sample it. Otherwise, we wait for the --- old/src/hotspot/share/gc/shared/collectedHeap.inline.hpp 2018-02-12 20:05:05.039766532 -0800 +++ new/src/hotspot/share/gc/shared/collectedHeap.inline.hpp 2018-02-12 20:05:04.783767518 -0800 @@ -33,6 +33,7 @@ #include "oops/arrayOop.hpp" #include "oops/oop.inline.hpp" #include "prims/jvmtiExport.hpp" +#include "runtime/heapMonitoring.hpp" #include "runtime/sharedRuntime.hpp" #include "runtime/thread.inline.hpp" #include "services/lowMemoryDetector.hpp" @@ -154,17 +155,17 @@ check_for_non_bad_heap_word_value(result, size)); assert(!HAS_PENDING_EXCEPTION, "Unexpected exception, will result in uninitialized storage"); - THREAD->incr_allocated_bytes(size * HeapWordSize); + int size_in_bytes = size * HeapWordSize; + THREAD->incr_allocated_bytes(size_in_bytes); - AllocTracer::send_allocation_outside_tlab(klass, result, size * HeapWordSize, THREAD); + AllocTracer::send_allocation_outside_tlab(klass, result, size_in_bytes, THREAD); if (UseTLAB) { - THREAD->tlab().handle_sample(THREAD, result, size); + THREAD->tlab().handle_sample(THREAD, result, size_in_bytes); } return result; } - if (!gc_overhead_limit_was_exceeded) { // -XX:+HeapDumpOnOutOfMemoryError and -XX:OnOutOfMemoryError support report_java_out_of_memory("Java heap space"); --- old/src/hotspot/share/gc/shared/threadLocalAllocBuffer.cpp 2018-02-12 20:05:05.907763189 -0800 +++ new/src/hotspot/share/gc/shared/threadLocalAllocBuffer.cpp 2018-02-12 20:05:05.651764176 -0800 @@ -47,6 +47,16 @@ make_parsable(true); // also retire the TLAB } +size_t ThreadLocalAllocBuffer::remaining() { + if (current_end() == NULL) { + return 0; + } + + // TODO: To be deprecated when FastTLABRefill is deprecated. + update_end_pointers(); + return pointer_delta(reserved_end(), top()); +} + void ThreadLocalAllocBuffer::accumulate_statistics_before_gc() { global_stats()->initialize(); @@ -109,27 +119,29 @@ // Waste accounting should be done in caller as appropriate; see, // for example, clear_before_allocation(). void ThreadLocalAllocBuffer::make_parsable(bool retire, bool zap) { - if (end() != NULL) { + if (current_end() != NULL) { invariants(); if (retire) { myThread()->incr_allocated_bytes(used_bytes()); } - CollectedHeap::fill_with_object(top(), hard_end(), retire && zap); + // TODO: To be deprecated when FastTLABRefill is deprecated. + update_end_pointers(); + CollectedHeap::fill_with_object(top(), reserved_end(), retire && zap); if (retire || ZeroTLAB) { // "Reset" the TLAB set_start(NULL); set_top(NULL); set_pf_top(NULL); - set_end(NULL); - set_actual_end(NULL); - set_slow_path_end(NULL); + set_current_end(NULL); + set_allocation_end(NULL); + set_last_slow_path_end(NULL); } } assert(!(retire || ZeroTLAB) || - (start() == NULL && end() == NULL && top() == NULL && - _actual_end == NULL && _slow_path_end == NULL), + (start() == NULL && current_end() == NULL && top() == NULL && + _allocation_end == NULL && _last_slow_path_end == NULL), "TLAB must be reset"); } @@ -200,9 +212,9 @@ set_start(start); set_top(top); set_pf_top(top); - set_end(end); - set_actual_end(end); - set_slow_path_end(end); + set_current_end(end); + set_allocation_end(end); + set_last_slow_path_end(end); invariants(); _bytes_until_sample = 0; } @@ -327,14 +339,14 @@ } void ThreadLocalAllocBuffer::set_sample_end() { - size_t heap_words_remaining = pointer_delta(_end, _top); + size_t heap_words_remaining = pointer_delta(_current_end, _top); size_t bytes_left = _bytes_until_sample; size_t words_until_sample = bytes_left / HeapWordSize; if (heap_words_remaining > words_until_sample) { HeapWord* new_end = _top + words_until_sample; - set_end(new_end); - set_slow_path_end(new_end); + set_current_end(new_end); + set_last_slow_path_end(new_end); set_bytes_until_sample(0); } else { bytes_left -= heap_words_remaining * HeapWordSize; @@ -361,11 +373,12 @@ set_sample_end(); log_trace(gc, tlab)("TLAB picked next sample: thread: " INTPTR_FORMAT " [id: %2d]" - " start: " INTPTR_FORMAT " top: " INTPTR_FORMAT " end: " INTPTR_FORMAT " actual_end:" - INTPTR_FORMAT " slow_path_end: " INTPTR_FORMAT, + " start: " INTPTR_FORMAT " top: " INTPTR_FORMAT " end: " + INTPTR_FORMAT " allocation_end:" + INTPTR_FORMAT " last_slow_path_end: " INTPTR_FORMAT, p2i(myThread()), myThread()->osthread()->thread_id(), - p2i(start()), p2i(top()), p2i(end()), - p2i(_actual_end), p2i(_slow_path_end)); + p2i(start()), p2i(top()), p2i(current_end()), + p2i(_allocation_end), p2i(_last_slow_path_end)); } Thread* ThreadLocalAllocBuffer::myThread() { @@ -374,51 +387,58 @@ in_bytes(Thread::tlab_start_offset())); } -void ThreadLocalAllocBuffer::set_back_actual_end() { +void ThreadLocalAllocBuffer::set_back_allocation_end() { // Did a fast TLAB refill occur? - if (_slow_path_end != _end) { + if (_last_slow_path_end != _current_end) { // Fix up the actual end to be now the end of this TLAB. - _slow_path_end = _end; - _actual_end = _end; + _last_slow_path_end = _current_end; + _allocation_end = _current_end; } else { - _end = _actual_end; + _current_end = _allocation_end; } } void ThreadLocalAllocBuffer::handle_sample(Thread* thread, HeapWord* result, - size_t size) { + size_t size_in_bytes) { if (!HeapMonitoring::enabled()) { return; } - size_t size_in_bytes = size * HeapWordSize; - if (_bytes_until_sample > size_in_bytes) { - set_bytes_until_sample(_bytes_until_sample - size_in_bytes); - } else { - // Technically this is not exactly right, we probably should remember how many bytes are - // negative probably to then reduce our next sample size. - set_bytes_until_sample(0); - } - - // Should we sample now? - if (should_sample()) { + if (_bytes_until_sample < size_in_bytes) { HeapMonitoring::object_alloc_do_sample(thread, reinterpret_cast(result), size_in_bytes); - set_back_actual_end(); - pick_next_sample(); } + + update_tlab_sample_point(size_in_bytes); } -HeapWord* ThreadLocalAllocBuffer::hard_end() { - // Did a fast TLAB refill occur? - if (_slow_path_end != _end) { - // Fix up the actual end to be now the end of this TLAB. - _slow_path_end = _end; - _actual_end = _end; +void ThreadLocalAllocBuffer::update_tlab_sample_point(size_t size_in_bytes) { + if (_bytes_until_sample > size_in_bytes) { + _bytes_until_sample -= size_in_bytes; + return; + } + + // We sampled here, so reset it all and start a new sample point. + set_bytes_until_sample(0); + set_back_allocation_end(); + pick_next_sample(); +} + +void ThreadLocalAllocBuffer::update_end_pointers() { + // Did a fast TLAB refill occur? (This will be deprecated when fast TLAB + // refill disappears). + if (_last_slow_path_end != _current_end) { + // Fix up the last slow path end to be now the end of this TLAB. + _last_slow_path_end = _current_end; + _allocation_end = _current_end; } +} - return _actual_end + alignment_reserve(); +HeapWord* ThreadLocalAllocBuffer::reserved_end() { + assert (_last_slow_path_end == _current_end, + "Have to call update_end_pointers before reserved_end."); + return _allocation_end + alignment_reserve(); } GlobalTLABStats::GlobalTLABStats() : --- old/src/hotspot/share/gc/shared/threadLocalAllocBuffer.hpp 2018-02-12 20:05:06.771759862 -0800 +++ new/src/hotspot/share/gc/shared/threadLocalAllocBuffer.hpp 2018-02-12 20:05:06.507760880 -0800 @@ -51,10 +51,9 @@ HeapWord* _start; // address of TLAB HeapWord* _top; // address after last allocation HeapWord* _pf_top; // allocation prefetch watermark - HeapWord* _end; // allocation end (can be the sampling end point or - // the actual TLAB end, excluding alignment_reserve) - HeapWord* _actual_end; // allocation actual_end (actual TLAB end, excluding alignment_reserve) - HeapWord* _slow_path_end; // remember the end in case a fast refill occurs. + HeapWord* _current_end; // allocation end (can be the sampling end point or _allocation_end) + HeapWord* _allocation_end; // end for allocations (actual TLAB end, excluding alignment_reserve) + HeapWord* _last_slow_path_end; // last address for slow_path_end (as opposed to _allocation_end) size_t _desired_size; // desired size (including alignment_reserve) size_t _refill_waste_limit; // hold onto tlab if free() is larger than this @@ -77,9 +76,9 @@ void initialize_statistics(); void set_start(HeapWord* start) { _start = start; } - void set_end(HeapWord* end) { _end = end; } - void set_actual_end(HeapWord* actual_end) { _actual_end = actual_end; } - void set_slow_path_end(HeapWord* slow_path_end) { _slow_path_end = slow_path_end; } + void set_current_end(HeapWord* current_end) { _current_end = current_end; } + void set_allocation_end(HeapWord* ptr) { _allocation_end = ptr; } + void set_last_slow_path_end(HeapWord* ptr) { _last_slow_path_end = ptr; } void set_top(HeapWord* top) { _top = top; } void set_pf_top(HeapWord* pf_top) { _pf_top = pf_top; } void set_desired_size(size_t desired_size) { _desired_size = desired_size; } @@ -91,9 +90,10 @@ static int target_refills() { return _target_refills; } size_t initial_desired_size(); - size_t remaining() { return end() == NULL ? 0 : pointer_delta(hard_end(), top()); } + size_t remaining(); void set_sample_end(); + void update_end_pointers(); // Make parsable and release it. void reset(); @@ -101,7 +101,7 @@ // Resize based on amount of allocation, etc. void resize(); - void invariants() const { assert(top() >= start() && top() <= end(), "invalid tlab"); } + void invariants() const { assert(top() >= start() && top() <= current_end(), "invalid tlab"); } void initialize(HeapWord* start, HeapWord* top, HeapWord* end); @@ -131,14 +131,14 @@ static void set_max_size(size_t max_size) { _max_size = max_size; } HeapWord* start() const { return _start; } - HeapWord* end() const { return _end; } + HeapWord* current_end() const { return _current_end; } HeapWord* top() const { return _top; } - HeapWord* hard_end(); + HeapWord* reserved_end(); HeapWord* pf_top() const { return _pf_top; } size_t desired_size() const { return _desired_size; } size_t used() const { return pointer_delta(top(), start()); } size_t used_bytes() const { return pointer_delta(top(), start(), 1); } - size_t free() const { return pointer_delta(end(), top()); } + size_t free() const { return pointer_delta(current_end(), top()); } // Don't discard tlab if remaining space is larger than this. size_t refill_waste_limit() const { return _refill_waste_limit; } @@ -180,15 +180,16 @@ void initialize(); void pick_next_sample(size_t diff = 0); - void set_back_actual_end(); - void handle_sample(Thread* thread, HeapWord* result, size_t size); + void set_back_allocation_end(); + void update_tlab_sample_point(size_t size_in_bytes); + void handle_sample(Thread* thread, HeapWord* result, size_t size_in_bytes); bool should_sample() { return _bytes_until_sample == 0; } static size_t refill_waste_limit_increment() { return TLABWasteIncrement; } // Code generation support static ByteSize start_offset() { return byte_offset_of(ThreadLocalAllocBuffer, _start); } - static ByteSize end_offset() { return byte_offset_of(ThreadLocalAllocBuffer, _end ); } + static ByteSize current_end_offset() { return byte_offset_of(ThreadLocalAllocBuffer, _current_end ); } static ByteSize top_offset() { return byte_offset_of(ThreadLocalAllocBuffer, _top ); } static ByteSize pf_top_offset() { return byte_offset_of(ThreadLocalAllocBuffer, _pf_top ); } static ByteSize size_offset() { return byte_offset_of(ThreadLocalAllocBuffer, _desired_size ); } --- old/src/hotspot/share/gc/shared/threadLocalAllocBuffer.inline.hpp 2018-02-12 20:05:07.631756550 -0800 +++ new/src/hotspot/share/gc/shared/threadLocalAllocBuffer.inline.hpp 2018-02-12 20:05:07.355757613 -0800 @@ -34,7 +34,7 @@ inline HeapWord* ThreadLocalAllocBuffer::allocate(size_t size) { invariants(); HeapWord* obj = top(); - if (pointer_delta(end(), obj) >= size) { + if (pointer_delta(current_end(), obj) >= size) { // successful thread-local allocation #ifdef ASSERT // Skip mangling the space corresponding to the object header to --- old/src/hotspot/share/jvmci/vmStructs_jvmci.cpp 2018-02-12 20:05:08.427753485 -0800 +++ new/src/hotspot/share/jvmci/vmStructs_jvmci.cpp 2018-02-12 20:05:08.171754471 -0800 @@ -321,7 +321,7 @@ \ nonstatic_field(ThreadLocalAllocBuffer, _start, HeapWord*) \ nonstatic_field(ThreadLocalAllocBuffer, _top, HeapWord*) \ - nonstatic_field(ThreadLocalAllocBuffer, _end, HeapWord*) \ + nonstatic_field(ThreadLocalAllocBuffer, _current_end, HeapWord*) \ nonstatic_field(ThreadLocalAllocBuffer, _pf_top, HeapWord*) \ nonstatic_field(ThreadLocalAllocBuffer, _desired_size, size_t) \ nonstatic_field(ThreadLocalAllocBuffer, _refill_waste_limit, size_t) \ --- old/src/hotspot/share/opto/macro.cpp 2018-02-12 20:05:09.307750096 -0800 +++ new/src/hotspot/share/opto/macro.cpp 2018-02-12 20:05:09.043751113 -0800 @@ -1241,9 +1241,9 @@ if (UseTLAB) { // Private allocation: load from TLS Node* thread = transform_later(new ThreadLocalNode()); int tlab_top_offset = in_bytes(JavaThread::tlab_top_offset()); - int tlab_end_offset = in_bytes(JavaThread::tlab_end_offset()); + int tlab_current_end_offset = in_bytes(JavaThread::tlab_current_end_offset()); eden_top_adr = basic_plus_adr(top()/*not oop*/, thread, tlab_top_offset); - eden_end_adr = basic_plus_adr(top()/*not oop*/, thread, tlab_end_offset); + eden_end_adr = basic_plus_adr(top()/*not oop*/, thread, tlab_current_end_offset); } else { // Shared allocation: load from globals CollectedHeap* ch = Universe::heap(); address top_adr = (address)ch->top_addr(); --- old/src/hotspot/share/prims/jvmti.xml 2018-02-12 20:05:10.175746753 -0800 +++ new/src/hotspot/share/prims/jvmti.xml 2018-02-12 20:05:09.907747785 -0800 @@ -11539,42 +11539,20 @@ - - - - jvmtiFrameInfo + + + + jvmtiStackInfo - Pointer to the call frames. - - - - The number of frames for the trace. + Pointer to the stack information. - + The size of the object allocation. - - The thread id number. - - - - - - - jvmtiStackTrace - - - The array with the various stack traces. - - - - - - Number of traces pointed by the array . - + The thread id of the object allocation. @@ -11627,7 +11605,7 @@ rate requested and will fill internal data structures with heap allocation samples. The samples are obtained via the , , , - or functions. + or functions. new @@ -11664,12 +11642,12 @@ Stop the heap sampler in the JVM. Any sample obtained during sampling is still available via the , , , - or functions. + or functions. Stopping the heap sampler resets internal traces and counters. Therefore stopping the sampler frees any internal trace samples, any subsequent call to the , , , - or functions will return no traces. + or functions will return no traces. new @@ -11681,15 +11659,15 @@ - - Get Live Traces + + Get Object Allocation Traces Get Live Heap Sampled traces. The fields of the structure are filled in with details of the specified sampled allocation. This methods call full GC and can be costly. Use with care as it can affect performance. For - continuous profiling, perhaps prefer GetCachedTraces, which returns the live traces at the last - full GC point. + continuous profiling, perhaps prefer GetCachedObjectAllocTraces, which returns the live + traces at the last full GC point. This method can be called at any time but if the sampler is not enabled, via , it returns no traces. @@ -11699,10 +11677,27 @@ - - jvmtiStackTraces + + + jvmtiAllocTraceInfo + - The stack trace data structure to be filled. + On return, this buffer is filled with stack information for each live object. + The number of records is determined + by . +

+ Note that this buffer is allocated to include the + buffers pointed to by , which also + include the buffers pointed by + . + + All these buffers must not be separately deallocated. + + + + + + The number of traces allocated. @@ -11724,10 +11719,27 @@ - - jvmtiStackTraces + + + jvmtiAllocTraceInfo + + + On return, this buffer is filled with stack information for each live object. + The number of records is determined + by . +

+ Note that this buffer is allocated to include the + buffers pointed to by , which also + include the buffers pointed by + . + + All these buffers must not be separately deallocated. + + + + - The stack trace data structure to be filled. + The number of traces allocated. @@ -11749,10 +11761,27 @@ - - jvmtiStackTraces + + + jvmtiAllocTraceInfo + + + On return, this buffer is filled with stack information for each live object. + The number of records is determined + by . +

+ Note that this buffer is allocated to include the + buffers pointed to by , which also + include the buffers pointed by + . + + All these buffers must not be separately deallocated. + + + + - The stack trace data structure to be filled. + The number of traces allocated. @@ -11760,8 +11789,8 @@ - - Get Live Traces + + Get Cached Object Allocated Traces Get the cached sampled traces: the traces are the ones that were collected during the last full GC. The fields of the structure are filled in with @@ -11775,31 +11804,27 @@ - - jvmtiStackTraces + + + jvmtiAllocTraceInfo + - The stack trace data structure to be filled. + On return, this buffer is filled with stack information for each live object. + The number of records is determined + by . +

+ Note that this buffer is allocated to include the + buffers pointed to by , which also + include the buffers pointed by + . + + All these buffers must not be separately deallocated. - - - - - - - Release traces provided by the heap monitoring - - Release traces provided by any of the trace retrieval methods. - - new - - - - - - jvmtiStackTraces + + - The stack trace data structure to be released. + The number of traces allocated. @@ -11807,7 +11832,7 @@ - + Get the heap sampling statistics Returns a to understand the heap sampling behavior and current --- old/src/hotspot/share/prims/jvmtiEnv.cpp 2018-02-12 20:05:11.307742394 -0800 +++ new/src/hotspot/share/prims/jvmtiEnv.cpp 2018-02-12 20:05:11.035743441 -0800 @@ -2028,62 +2028,56 @@ // Provoke a GC and get the currently live sampled allocations. jvmtiError -JvmtiEnv::GetLiveTraces(jvmtiStackTraces* stack_traces) { +JvmtiEnv::GetObjectAllocTraces(jvmtiAllocTraceInfo** stack_traces, + jint* trace_counter_ptr) { ForceGarbageCollection(); HeapThreadTransition htt(Thread::current()); if (stack_traces == NULL) { return JVMTI_ERROR_ILLEGAL_ARGUMENT; } - HeapMonitoring::get_live_traces(stack_traces); + HeapMonitoring::get_live_traces(this, stack_traces, trace_counter_ptr); return JVMTI_ERROR_NONE; } /* end GetLiveTraces */ // Get the recently garbage collected allocations. jvmtiError -JvmtiEnv::GetGarbageTraces(jvmtiStackTraces* stack_traces) { +JvmtiEnv::GetGarbageTraces(jvmtiAllocTraceInfo** stack_traces, + jint* trace_counter_ptr) { HeapThreadTransition htt(Thread::current()); if (stack_traces == NULL) { return JVMTI_ERROR_ILLEGAL_ARGUMENT; } - HeapMonitoring::get_garbage_traces(stack_traces); + HeapMonitoring::get_garbage_traces(this, stack_traces, trace_counter_ptr); return JVMTI_ERROR_NONE; } /* end GetGarbageTraces */ // Get the frequently garbage collected traces. jvmtiError -JvmtiEnv::GetFrequentGarbageTraces(jvmtiStackTraces* stack_traces) { +JvmtiEnv::GetFrequentGarbageTraces(jvmtiAllocTraceInfo** stack_traces, + jint* trace_counter_ptr) { HeapThreadTransition htt(Thread::current()); if (stack_traces == NULL) { return JVMTI_ERROR_ILLEGAL_ARGUMENT; } - HeapMonitoring::get_frequent_garbage_traces(stack_traces); + HeapMonitoring::get_frequent_garbage_traces(this, stack_traces, trace_counter_ptr); return JVMTI_ERROR_NONE; } /* end GetFrequentGarbageTraces */ // Get the traces that were garbage collected in the last full GC. jvmtiError -JvmtiEnv::GetCachedTraces(jvmtiStackTraces* stack_traces) { +JvmtiEnv::GetCachedObjectAllocTraces(jvmtiAllocTraceInfo** stack_traces, + jint* trace_counter_ptr) { HeapThreadTransition htt(Thread::current()); if (stack_traces == NULL) { return JVMTI_ERROR_ILLEGAL_ARGUMENT; } - HeapMonitoring::get_cached_traces(stack_traces); + HeapMonitoring::get_cached_traces(this, stack_traces, trace_counter_ptr); return JVMTI_ERROR_NONE; -} /* end GetCachedTraces */ - -// Release sampled traces. -jvmtiError -JvmtiEnv::ReleaseTraces(jvmtiStackTraces* stack_traces) { - if (stack_traces == NULL) { - return JVMTI_ERROR_NONE; - } - HeapMonitoring::release_traces(stack_traces); - return JVMTI_ERROR_NONE; -} /* end ReleaseTraces */ +} /* end GetObjectAllocTraces */ // Get the heap sampling statistics. jvmtiError --- old/src/hotspot/share/runtime/heapMonitoring.cpp 2018-02-12 20:05:12.179739036 -0800 +++ new/src/hotspot/share/runtime/heapMonitoring.cpp 2018-02-12 20:05:11.915740053 -0800 @@ -26,6 +26,7 @@ #include "gc/shared/collectedHeap.hpp" #include "memory/universe.hpp" +#include "prims/jvmtiEnvBase.hpp" #include "runtime/heapMonitoring.hpp" #include "runtime/orderAccess.inline.hpp" #include "runtime/vframe.hpp" @@ -35,17 +36,17 @@ // Internal data structure representing traces, used when object has been GC'd. class StackTraceData : public CHeapObj { private: - jvmtiStackTrace* _trace; + jvmtiAllocTraceInfo* _trace; int _references; public: - StackTraceData(jvmtiStackTrace* t) : _trace(t), _references(0) {} + StackTraceData(jvmtiAllocTraceInfo* t) : _trace(t), _references(0) {} void increment_reference_count() { _references++; } - jvmtiStackTrace* get_trace() const { + jvmtiAllocTraceInfo* get_trace() const { return _trace; } @@ -57,7 +58,9 @@ data->_references--; if (data->_references == 0) { if (data->_trace != NULL) { - FREE_C_HEAP_ARRAY(jvmtiFrameInfo, data->_trace->frames); + jvmtiStackInfo* stack_info = data->_trace->stack_info; + FREE_C_HEAP_ARRAY(jvmtiFrameInfo, stack_info->frame_buffer); + FREE_C_HEAP_OBJ(stack_info); FREE_C_HEAP_OBJ(data->_trace); } delete data; @@ -73,7 +76,7 @@ oop _obj; public: - StackTraceDataWithOop(jvmtiStackTrace* t, oop o) : StackTraceData(t) { + StackTraceDataWithOop(jvmtiAllocTraceInfo* t, oop o) : StackTraceData(t) { store_oop(o); } @@ -218,23 +221,31 @@ public: // The function that gets called to add a trace to the list of // traces we are maintaining. - void add_trace(jvmtiStackTrace* trace, oop o); + void add_trace(jvmtiAllocTraceInfo* trace, oop o); // The function that gets called by the client to retrieve the list - // of stack traces. Passes a jvmtiStackTraces which will get mutated. - void get_all_stack_traces(jvmtiStackTraces* traces); + // of stack traces. Passes a jvmtiAllocTraceInfo which will get mutated. + void get_all_stack_traces(JvmtiEnv* env, + jvmtiAllocTraceInfo** traces, + jint* trace_counter_ptr); // The function that gets called by the client to retrieve the list - // of stack traces. Passes a jvmtiStackTraces which will get mutated. - void get_garbage_stack_traces(jvmtiStackTraces* traces); + // of stack traces. Passes a jvmtiAllocTraceInfo which will get mutated. + void get_garbage_stack_traces(JvmtiEnv* env, + jvmtiAllocTraceInfo** traces, + jint* trace_counter_ptr); // The function that gets called by the client to retrieve the list - // of stack traces. Passes a jvmtiStackTraces which will get mutated. - void get_frequent_garbage_stack_traces(jvmtiStackTraces* traces); + // of stack traces. Passes a jvmtiAllocTraceInfo which will get mutated. + void get_frequent_garbage_stack_traces(JvmtiEnv* env, + jvmtiAllocTraceInfo** traces, + jint* trace_counter_ptr); // The function that gets called by the client to retrieve the list - // of stack traces. Passes a jvmtiStackTraces which will get mutated. - void get_cached_stack_traces(jvmtiStackTraces* traces); + // of stack traces. Passes a jvmtiAllocTraceInfo which will get mutated. + void get_cached_stack_traces(JvmtiEnv* env, + jvmtiAllocTraceInfo** traces, + jint* trace_counter_ptr); // Executes whenever weak references are traversed. is_alive tells // you if the given oop is still reachable and live. @@ -271,6 +282,7 @@ bool initialized() { return OrderAccess::load_acquire(&_initialized) != 0; + return _initialized; } private: @@ -326,12 +338,11 @@ int _size; }; - // Copies from StackTraceData to jvmtiStackTrace. - bool deep_copy(jvmtiStackTrace* to, const StackTraceData* from); - // Creates a deep copy of the list of StackTraceData. - void copy_stack_traces(const StackTraceDataCopier &copier, - jvmtiStackTraces* traces); + void copy_stack_traces(JvmtiEnv* env, + const StackTraceDataCopier &copier, + jvmtiAllocTraceInfo** traces, + jint* trace_counter_ptr); void store_garbage_trace(const StackTraceDataWithOop &trace); @@ -340,6 +351,20 @@ void reset(); void allocate_storage(int max_gc_storage); + + int calculate_frame_count(const StackTraceDataCopier &copier); + int calculate_info_count(const StackTraceDataCopier &copier); + + bool copy_frame(const StackTraceData* stack_trace_data, + jvmtiAllocTraceInfo* current_alloc_traces, + jvmtiStackInfo* current_stack_info, + jvmtiFrameInfo* current_frame_info); + + // Returns frame copy success. Failure can result when there is no longer + // enough memory. + bool copy_frames(const StackTraceDataCopier& copier, int info_count, + unsigned char* start, + unsigned char* end); }; StackTraceStorage* StackTraceStorage::internal_storage; @@ -353,10 +378,15 @@ uint64_t HeapMonitoring::_rnd; StackTraceStorage::StackTraceStorage() { + MutexLocker mu(HeapMonitorStorage_lock); reset(); } void StackTraceStorage::reset() { + assert(HeapMonitorStorage_lock->owned_by_self() + || (SafepointSynchronize::is_at_safepoint() && Thread::current()->is_VM_thread()), + "This should not be accessed concurrently"); + _allocated_traces = NULL; _traces_on_last_full_gc = NULL; _recent_garbage_traces = NULL; @@ -415,6 +445,10 @@ } void StackTraceStorage::allocate_storage(int max_gc_storage) { + assert(HeapMonitorStorage_lock->owned_by_self() + || (SafepointSynchronize::is_at_safepoint() && Thread::current()->is_VM_thread()), + "This should not be accessed concurrently"); + // In case multiple threads got locked and then 1 by 1 got through. if (initialized()) { return; @@ -433,7 +467,7 @@ OrderAccess::release_store(&_initialized, 1); } -void StackTraceStorage::add_trace(jvmtiStackTrace* trace, oop o) { +void StackTraceStorage::add_trace(jvmtiAllocTraceInfo* trace, oop o) { MutexLocker mu(HeapMonitorStorage_lock); // Last minute check on initialization here in case: // Between the moment object_alloc_do_sample's check for initialization @@ -441,7 +475,7 @@ if (initialized()) { StackTraceDataWithOop new_data(trace, o); _stats.sample_count++; - _stats.stack_depth_accumulation += trace->frame_count; + _stats.stack_depth_accumulation += trace->stack_info->frame_count; _allocated_traces->append(new_data); } } @@ -491,112 +525,201 @@ log_develop_trace(gc, ref)("Clearing HeapMonitoring weak reference (" INT64_FORMAT ")", count); } -bool StackTraceStorage::deep_copy(jvmtiStackTrace* to, - const StackTraceData* from) { - const jvmtiStackTrace* src = from->get_trace(); - *to = *src; - - to->frames = - NEW_C_HEAP_ARRAY(jvmtiFrameInfo, src->frame_count, mtInternal); - - if (to->frames == NULL) { - return false; - } - - memcpy(to->frames, - src->frames, - sizeof(jvmtiFrameInfo) * src->frame_count); - return true; -} - // Called by the outside world; returns a copy of the stack traces // (because we could be replacing them as the user handles them). // The array is secretly null-terminated (to make it easier to reclaim). -void StackTraceStorage::get_all_stack_traces(jvmtiStackTraces* traces) { +void StackTraceStorage::get_all_stack_traces(JvmtiEnv* env, + jvmtiAllocTraceInfo** traces, + jint* trace_counter_ptr) { MutexLocker mu(HeapMonitorStorage_lock); if (!_allocated_traces) { - traces->stack_traces = NULL; - traces->trace_count = 0; + *traces = NULL; + *trace_counter_ptr = 0; return; } LiveStackTraceDataCopier copier(_allocated_traces); - copy_stack_traces(copier, traces); + copy_stack_traces(env, copier, traces, trace_counter_ptr); } // See comment on get_all_stack_traces -void StackTraceStorage::get_garbage_stack_traces(jvmtiStackTraces* traces) { +void StackTraceStorage::get_garbage_stack_traces(JvmtiEnv* env, + jvmtiAllocTraceInfo** traces, + jint* trace_counter_ptr) { MutexLocker mu(HeapMonitorStorage_lock); if (!_recent_garbage_traces) { - traces->stack_traces = NULL; - traces->trace_count = 0; + *traces = NULL; + *trace_counter_ptr = 0; return; } GarbageStackTraceDataCopier copier(_recent_garbage_traces->get_traces(), _recent_garbage_traces->size()); - copy_stack_traces(copier, traces); + copy_stack_traces(env, copier, traces, trace_counter_ptr); } // See comment on get_all_stack_traces void StackTraceStorage::get_frequent_garbage_stack_traces( - jvmtiStackTraces* traces) { + JvmtiEnv* env, jvmtiAllocTraceInfo** traces, jint* trace_counter_ptr) { MutexLocker mu(HeapMonitorStorage_lock); if (!_frequent_garbage_traces) { - traces->stack_traces = NULL; - traces->trace_count = 0; + *traces = NULL; + *trace_counter_ptr = 0; return; } GarbageStackTraceDataCopier copier(_frequent_garbage_traces->get_traces(), _frequent_garbage_traces->size()); - copy_stack_traces(copier, traces); + copy_stack_traces(env, copier, traces, trace_counter_ptr); } // See comment on get_all_stack_traces -void StackTraceStorage::get_cached_stack_traces(jvmtiStackTraces* traces) { +void StackTraceStorage::get_cached_stack_traces(JvmtiEnv* env, + jvmtiAllocTraceInfo** traces, + jint* trace_counter_ptr) { MutexLocker mu(HeapMonitorStorage_lock); if (!_traces_on_last_full_gc) { - traces->stack_traces = NULL; - traces->trace_count = 0; + *traces = NULL; + *trace_counter_ptr = 0; return; } LiveStackTraceDataCopier copier(_traces_on_last_full_gc); - copy_stack_traces(copier, traces); + copy_stack_traces(env, copier, traces, trace_counter_ptr); } -void StackTraceStorage::copy_stack_traces(const StackTraceDataCopier &copier, - jvmtiStackTraces* traces) { +int StackTraceStorage::calculate_frame_count(const StackTraceDataCopier &copier) { int len = copier.size(); - // Create a new array to store the StackTraceData objects. - // + 1 for a NULL at the end. - jvmtiStackTrace* t = - NEW_C_HEAP_ARRAY(jvmtiStackTrace, len + 1, mtInternal); - if (t == NULL) { - traces->stack_traces = NULL; - traces->trace_count = 0; - return; + // Walk the traces first to find the size of the frames as well. + int frame_total = 0; + + for (int i = 0; i < len; i++) { + const StackTraceData* stack_trace = copier.get(i); + + if (stack_trace != NULL) { + jvmtiAllocTraceInfo* trace = stack_trace->get_trace(); + jvmtiStackInfo* stack_info = trace->stack_info; + frame_total += stack_info->frame_count; + } } - // +1 to have a NULL at the end of the array. - memset(t, 0, (len + 1) * sizeof(*t)); - // Copy the StackTraceData objects into the new array. - int trace_count = 0; + return frame_total; +} + +int StackTraceStorage::calculate_info_count(const StackTraceDataCopier &copier) { + int len = copier.size(); + + int info_total = 0; + for (int i = 0; i < len; i++) { const StackTraceData* stack_trace = copier.get(i); + if (stack_trace != NULL) { - jvmtiStackTrace* to = &t[trace_count]; - if (!deep_copy(to, stack_trace)) { - continue; + // TODO: merge this with the method above. + info_total++; + } + } + + return info_total; +} + +// Method to test if the data structure would fit between the src address and +// the end address. +template +static bool next_ptr_less_or_equal(T src, U* end) { + return (src + 1) <= reinterpret_cast(end); +} + +bool StackTraceStorage::copy_frame(const StackTraceData* stack_trace_data, + jvmtiAllocTraceInfo* current_alloc_trace, + jvmtiStackInfo* current_stack_info, + jvmtiFrameInfo* current_frame_info) { + jvmtiAllocTraceInfo* trace = stack_trace_data->get_trace(); + jvmtiStackInfo* stack_info = trace->stack_info; + int frame_count = stack_info->frame_count; + + memcpy(current_alloc_trace, trace, sizeof(*trace)); + + current_alloc_trace->stack_info = current_stack_info; + memcpy(current_stack_info, stack_info, sizeof(*stack_info)); + + current_stack_info->frame_buffer = current_frame_info; + memcpy(current_frame_info, stack_info->frame_buffer, + sizeof(jvmtiFrameInfo) * frame_count); + return true; +} + +bool StackTraceStorage::copy_frames(const StackTraceDataCopier& copier, + int info_count, + unsigned char* start, + unsigned char* end) { + jvmtiAllocTraceInfo* start_alloc_trace = reinterpret_cast(start); + jvmtiStackInfo* start_stack_info = reinterpret_cast(start_alloc_trace + info_count); + jvmtiFrameInfo* start_frame_info = reinterpret_cast(start_stack_info + info_count); + + jvmtiAllocTraceInfo* current_alloc_trace = start_alloc_trace; + jvmtiStackInfo* current_stack_info = start_stack_info; + jvmtiFrameInfo* current_frame_info = start_frame_info; + + for (int i = 0; i < info_count; i++) { + assert(next_ptr_less_or_equal(current_alloc_trace, start_stack_info), + "jvmtiAllocTraceInfo would write over jvmtiStackInfos."); + assert(next_ptr_less_or_equal(current_stack_info, start_frame_info), + "jvmtiStackInfo would write over jvmtiFrameInfos."); + + assert(next_ptr_less_or_equal(current_frame_info, end), + "jvmtiFrameInfo would write over the end of the buffer."); + + const StackTraceData* stack_trace_data = copier.get(i); + if (stack_trace_data != NULL) { + if (!copy_frame(stack_trace_data, current_alloc_trace, + current_stack_info, current_frame_info)) { + return false; } - trace_count++; + + current_frame_info += current_stack_info->frame_count; + current_stack_info++; + current_alloc_trace++; } } - traces->stack_traces = t; - traces->trace_count = trace_count; + return true; +} + +void StackTraceStorage::copy_stack_traces(JvmtiEnv* env, + const StackTraceDataCopier& copier, + jvmtiAllocTraceInfo** traces, + jint* trace_counter_ptr) { + *traces = NULL; + *trace_counter_ptr = 0; + + int frame_total = calculate_frame_count(copier); + int len = calculate_info_count(copier); + + // Allocate the whole stacktraces in one bloc to simplify freeing. + size_t total_size = len * sizeof(jvmtiAllocTraceInfo) + + len * sizeof(jvmtiStackInfo) + + frame_total * sizeof(jvmtiFrameInfo); + + unsigned char* buffer = NULL; + jvmtiAllocTraceInfo* result = NULL; + JvmtiEnvBase* env_base = reinterpret_cast(env); + env_base->allocate(total_size, &buffer); + + if (buffer == NULL) { + return; + } + + bool success = copy_frames(copier, len, buffer, buffer + total_size); + + if (!success) { + env_base->deallocate(buffer); + return; + } + + *trace_counter_ptr = len; + *traces = reinterpret_cast(buffer); } void StackTraceStorage::store_garbage_trace(const StackTraceDataWithOop &trace) { @@ -615,8 +738,12 @@ _stats.garbage_collected_samples++; } -void HeapMonitoring::get_live_traces(jvmtiStackTraces* traces) { - StackTraceStorage::storage()->get_all_stack_traces(traces); +void HeapMonitoring::get_live_traces(JvmtiEnv* env, + jvmtiAllocTraceInfo** traces, + jint* trace_counter_ptr) { + StackTraceStorage::storage()->get_all_stack_traces(env, + traces, + trace_counter_ptr); } void HeapMonitoring::get_sampling_statistics(jvmtiHeapSamplingStats* stats) { @@ -625,30 +752,27 @@ *stats = internal_stats; } -void HeapMonitoring::get_frequent_garbage_traces(jvmtiStackTraces* traces) { - StackTraceStorage::storage()->get_frequent_garbage_stack_traces(traces); -} - -void HeapMonitoring::get_garbage_traces(jvmtiStackTraces* traces) { - StackTraceStorage::storage()->get_garbage_stack_traces(traces); -} - -void HeapMonitoring::get_cached_traces(jvmtiStackTraces* traces) { - StackTraceStorage::storage()->get_cached_stack_traces(traces); -} - -void HeapMonitoring::release_traces(jvmtiStackTraces* traces) { - jint trace_count = traces->trace_count; - jvmtiStackTrace* stack_traces = traces->stack_traces; - - for (jint i = 0; i < trace_count; i++) { - jvmtiStackTrace* current_trace = stack_traces + i; - FREE_C_HEAP_ARRAY(jvmtiFrameInfo, current_trace->frames); - } - - FREE_C_HEAP_ARRAY(jvmtiStackTrace, traces->stack_traces); - traces->trace_count = 0; - traces->stack_traces = NULL; +void HeapMonitoring::get_frequent_garbage_traces(JvmtiEnv* env, + jvmtiAllocTraceInfo** traces, + jint* trace_counter_ptr) { + StackTraceStorage::storage()->get_frequent_garbage_stack_traces( + env, traces, trace_counter_ptr); +} + +void HeapMonitoring::get_garbage_traces(JvmtiEnv* env, + jvmtiAllocTraceInfo** traces, + jint* trace_counter_ptr) { + StackTraceStorage::storage()->get_garbage_stack_traces(env, + traces, + trace_counter_ptr); +} + +void HeapMonitoring::get_cached_traces(JvmtiEnv* env, + jvmtiAllocTraceInfo** traces, + jint* trace_counter_ptr) { + StackTraceStorage::storage()->get_cached_stack_traces(env, + traces, + trace_counter_ptr); } // Invoked by the GC to clean up old stack traces and remove old arrays @@ -731,29 +855,37 @@ StackTraceStorage::storage()->accumulate_sample_rate(rate); } -void HeapMonitoring::object_alloc_do_sample(Thread* t, oopDesc* o, intx byte_size) { +void HeapMonitoring::object_alloc_do_sample(Thread* t, oopDesc* o, size_t byte_size) { JavaThread* thread = static_cast(t); if (StackTraceStorage::storage()->initialized()) { assert(t->is_Java_thread(), "non-Java thread passed to do_sample"); JavaThread* thread = static_cast(t); - jvmtiStackTrace* trace = NEW_C_HEAP_OBJ(jvmtiStackTrace, mtInternal); + jvmtiAllocTraceInfo* trace = NEW_C_HEAP_OBJ(jvmtiAllocTraceInfo, mtInternal); + if (trace == NULL) { + return; + } + + jvmtiStackInfo* stack_info = NEW_C_HEAP_OBJ(jvmtiStackInfo, mtInternal); if (trace == NULL) { + FREE_C_HEAP_OBJ(trace); return; } + trace->stack_info = stack_info; jvmtiFrameInfo* frames = NEW_C_HEAP_ARRAY(jvmtiFrameInfo, MaxStackDepth, mtInternal); if (frames == NULL) { + FREE_C_HEAP_OBJ(stack_info); FREE_C_HEAP_OBJ(trace); return; } + stack_info->frame_buffer = frames; + stack_info->frame_count = 0; - trace->frames = frames; trace->thread_id = SharedRuntime::get_java_tid(thread); trace->size = byte_size; - trace->frame_count = 0; if (thread->has_last_Java_frame()) { // just to be safe vframeStream vfst(thread, true); @@ -766,17 +898,18 @@ vfst.next(); } - trace->frame_count = count; + stack_info->frame_count = count; } - if (trace->frame_count> 0) { + if (stack_info->frame_count > 0) { // Success! StackTraceStorage::storage()->add_trace(trace, o); return; } // Failure! - FREE_C_HEAP_ARRAY(jvmtiFrameInfo, trace->frames); + FREE_C_HEAP_ARRAY(jvmtiFrameInfo, frames); + FREE_C_HEAP_OBJ(stack_info); FREE_C_HEAP_OBJ(trace); } } --- old/src/hotspot/share/runtime/heapMonitoring.hpp 2018-02-12 20:05:13.055735663 -0800 +++ new/src/hotspot/share/runtime/heapMonitoring.hpp 2018-02-12 20:05:12.799736648 -0800 @@ -78,18 +78,25 @@ // initialize_profiling method. static void pick_next_sample(size_t* ptr); - // Get live/garbage traces and provide a method to release the traces. - static void get_live_traces(jvmtiStackTraces* stack_traces); - static void get_garbage_traces(jvmtiStackTraces* stack_traces); - static void get_frequent_garbage_traces(jvmtiStackTraces* stack_traces); - static void get_cached_traces(jvmtiStackTraces* stack_traces); - static void release_traces(jvmtiStackTraces* trace_info); + // Get live/cached/garbage traces. + static void get_live_traces(JvmtiEnv* env, + jvmtiAllocTraceInfo** stack_traces, + jint* trace_counter_ptr); + static void get_garbage_traces(JvmtiEnv* env, + jvmtiAllocTraceInfo** stack_traces, + jint* trace_counter_ptr); + static void get_frequent_garbage_traces(JvmtiEnv* env, + jvmtiAllocTraceInfo** stack_traces, + jint* trace_counter_ptr); + static void get_cached_traces(JvmtiEnv* env, + jvmtiAllocTraceInfo** stack_traces, + jint* trace_counter_ptr); static void get_sampling_statistics(jvmtiHeapSamplingStats* stats); static void stop_profiling(); // Called when o is to be sampled from a given thread and a given size. - static void object_alloc_do_sample(Thread* t, oopDesc* o, intx size_in_bytes); + static void object_alloc_do_sample(Thread* t, oopDesc* o, size_t size_in_bytes); // Called to clean up oops that have been saved by our sampling function, // but which no longer have other references in the heap. --- old/src/hotspot/share/runtime/thread.hpp 2018-02-12 20:05:13.923732320 -0800 +++ new/src/hotspot/share/runtime/thread.hpp 2018-02-12 20:05:13.663733321 -0800 @@ -678,7 +678,7 @@ static ByteSize tlab_##name##_offset() { return byte_offset_of(Thread, _tlab) + ThreadLocalAllocBuffer::name##_offset(); } TLAB_FIELD_OFFSET(start) - TLAB_FIELD_OFFSET(end) + TLAB_FIELD_OFFSET(current_end) TLAB_FIELD_OFFSET(top) TLAB_FIELD_OFFSET(pf_top) TLAB_FIELD_OFFSET(size) // desired_size --- old/src/hotspot/share/runtime/vmStructs.cpp 2018-02-12 20:05:14.735729193 -0800 +++ new/src/hotspot/share/runtime/vmStructs.cpp 2018-02-12 20:05:14.487730148 -0800 @@ -533,7 +533,7 @@ \ nonstatic_field(ThreadLocalAllocBuffer, _start, HeapWord*) \ nonstatic_field(ThreadLocalAllocBuffer, _top, HeapWord*) \ - nonstatic_field(ThreadLocalAllocBuffer, _end, HeapWord*) \ + nonstatic_field(ThreadLocalAllocBuffer, _current_end, HeapWord*) \ nonstatic_field(ThreadLocalAllocBuffer, _pf_top, HeapWord*) \ nonstatic_field(ThreadLocalAllocBuffer, _desired_size, size_t) \ nonstatic_field(ThreadLocalAllocBuffer, _refill_waste_limit, size_t) \ --- old/test/hotspot/jtreg/serviceability/jvmti/HeapMonitor/MyPackage/HeapMonitorStackDepthTest.java 2018-02-12 20:05:15.603725851 -0800 +++ new/test/hotspot/jtreg/serviceability/jvmti/HeapMonitor/MyPackage/HeapMonitorStackDepthTest.java 2018-02-12 20:05:15.363726775 -0800 @@ -53,8 +53,8 @@ int depth = depths[depthIdx]; HeapMonitor.enableSampling(); - // Do the runner 10 times to ensure the stack is really sampled. - runner(10, depth); + // Do the runner 3 times to ensure the stack is really sampled. + runner(3, depth); // baseDepth represents the helper method depth: main, runner, HeapMonitor.allocate, // and HeapMonitor.actuallyAllocate. @@ -65,7 +65,7 @@ // 3% error should be close enough. if (errorPercentage > 3) { - throw new RuntimeException("Stack depth average over 5% for depth " + depth + " : " + averageDepth + " , error: " + errorPercentage); + throw new RuntimeException("Stack depth average over 3% for depth " + depth + " : " + averageDepth + " , error: " + errorPercentage); } HeapMonitor.disableSampling(); @@ -75,8 +75,8 @@ // Last test is 1024, which is the current maximum. HeapMonitor.enableSampling(); final int maximumDepth = 1024; - // Do the runner 10 times to ensure the stack is really sampled. - runner(10, maximumDepth); + // Do the runner 3 times to ensure the stack is really sampled. + runner(3, maximumDepth); // Because of the extra frames, we should be at (maximumDepth + a few frames). Due to the // maximum depth allowed, we hit it and so should still be at an average of 1024. double averageDepth = getAverageStackDepth(); @@ -85,7 +85,7 @@ // 3% error should be close enough. if (errorPercentage > 3) { - throw new RuntimeException("Stack depth average over 5% for depth 1024 : " + averageDepth + " , error: " + errorPercentage); + throw new RuntimeException("Stack depth average over 3% for depth 1024 : " + averageDepth + " , error: " + errorPercentage); } } } --- old/test/hotspot/jtreg/serviceability/jvmti/HeapMonitor/libHeapMonitor.c 2018-02-12 20:05:16.399722786 -0800 +++ new/test/hotspot/jtreg/serviceability/jvmti/HeapMonitor/libHeapMonitor.c 2018-02-12 20:05:16.175723648 -0800 @@ -178,20 +178,24 @@ } ExpectedContentFrame; static jboolean check_sample_content(JNIEnv *env, - jvmtiStackTrace *trace, + jvmtiAllocTraceInfo* trace, ExpectedContentFrame *expected, int expected_count, int print_out_comparisons) { int i; - if (expected_count > trace->frame_count) { + jvmtiStackInfo* stack_info = trace->stack_info; + + if (expected_count > stack_info->frame_count) { return FALSE; } + jvmtiFrameInfo* frames = stack_info->frame_buffer; + for (i = 0; i < expected_count; i++) { // Get basic information out of the trace. - int bci = trace->frames[i].location; - jmethodID methodid = trace->frames[i].method; + int bci = frames[i].location; + jmethodID methodid = frames[i].method; char *name = NULL, *signature = NULL, *file_name = NULL; if (bci < 0) { @@ -228,12 +232,12 @@ } if (print_out_comparisons) { - fprintf(stderr, "Comparing:\n"); - fprintf(stderr, "\tNames: %s and %s\n", name, expected[i].name); - fprintf(stderr, "\tSignatures: %s and %s\n", signature, expected[i].signature); - fprintf(stderr, "\tFile name: %s and %s\n", file_name, expected[i].file_name); - fprintf(stderr, "\tLines: %d and %d\n", line_number, expected[i].line_number); - fprintf(stderr, "\tResult is %d\n", + fprintf(stderr, "\tComparing:\n"); + fprintf(stderr, "\t\tNames: %s and %s\n", name, expected[i].name); + fprintf(stderr, "\t\tSignatures: %s and %s\n", signature, expected[i].signature); + fprintf(stderr, "\t\tFile name: %s and %s\n", file_name, expected[i].file_name); + fprintf(stderr, "\t\tLines: %d and %d\n", line_number, expected[i].line_number); + fprintf(stderr, "\t\tResult is %d\n", (strcmp(name, expected[i].name) || strcmp(signature, expected[i].signature) || strcmp(file_name, expected[i].file_name) || @@ -251,7 +255,7 @@ return TRUE; } -static jboolean compare_samples(JNIEnv* env, jvmtiStackTrace* traces, +static jboolean compare_samples(JNIEnv* env, jvmtiAllocTraceInfo* traces, int trace_count, ExpectedContentFrame* expected_content, size_t size, @@ -259,8 +263,12 @@ // We expect the code to record correctly the bci, retrieve the line // number, have the right method and the class name of the first frames. int i; + if (print_out_comparisons) { + fprintf(stderr, "\tNumber of traces: %d\n", print_out_comparisons); + } + for (i = 0; i < trace_count; i++) { - jvmtiStackTrace *trace = traces + i; + jvmtiAllocTraceInfo* trace = traces + i; if (check_sample_content(env, trace, expected_content, size, print_out_comparisons)) { // At least one frame matched what we were looking for. @@ -275,18 +283,23 @@ check_samples(JNIEnv* env, ExpectedContentFrame* expected, size_t size, - jvmtiError (*const get_traces)(jvmtiEnv*, jvmtiStackTraces*), + jvmtiError (*const get_traces)(jvmtiEnv*, jvmtiAllocTraceInfo**, jint*), int print_out_comparisons) { - jvmtiStackTraces traces; - jvmtiError error = get_traces(jvmti, &traces); + jvmtiAllocTraceInfo *traces; + jint trace_counter; + jvmtiError error = get_traces(jvmti, &traces, &trace_counter); if (error != JVMTI_ERROR_NONE) { return FALSE; } - int result = compare_samples(env, traces.stack_traces, traces.trace_count, + int result = compare_samples(env, traces, trace_counter, expected, size, print_out_comparisons); - (*jvmti)->ReleaseTraces(jvmti, &traces); + + if ((*jvmti)->Deallocate(jvmti, (unsigned char*) traces) != JVMTI_ERROR_NONE) { + return FALSE; + } + return result; } @@ -294,7 +307,7 @@ ExpectedContentFrame* expected, size_t size, int print_out_comparisons) { - return check_samples(env, expected, size, (*jvmti)->GetLiveTraces, + return check_samples(env, expected, size, (*jvmti)->GetObjectAllocTraces, print_out_comparisons); } @@ -538,28 +551,28 @@ return FALSE; } - if (check_capability_error((*jvmti)->ReleaseTraces(jvmti, NULL), - "Release Traces")) { - return FALSE; - } - if (check_capability_error((*jvmti)->GetHeapSamplingStats(jvmti, NULL), "Get Heap Sampling Stats")) { return FALSE; } - if (check_capability_error((*jvmti)->GetGarbageTraces(jvmti, NULL), + if (check_capability_error((*jvmti)->GetGarbageTraces(jvmti, NULL, NULL), "Get Garbage Traces")) { return FALSE; } - if (check_capability_error((*jvmti)->GetFrequentGarbageTraces(jvmti, NULL), + if (check_capability_error((*jvmti)->GetFrequentGarbageTraces(jvmti, NULL, NULL), "Get Frequent Garbage Traces")) { return FALSE; } - if (check_capability_error((*jvmti)->GetLiveTraces(jvmti, NULL), - "Get Live Traces")) { + if (check_capability_error((*jvmti)->GetObjectAllocTraces(jvmti, NULL, NULL), + "Get Object Allocated Traces")) { + return FALSE; + } + + if (check_capability_error((*jvmti)->GetObjectAllocTraces(jvmti, NULL, NULL), + "Get Cached Object Allocated Traces")) { return FALSE; } return TRUE; @@ -594,30 +607,29 @@ } static double calculate_average_stack_depth( - jvmtiError (*const get_traces)(jvmtiEnv*, jvmtiStackTraces*)) { - jvmtiStackTraces traces; + jvmtiError (*const get_traces)(jvmtiEnv*, jvmtiAllocTraceInfo**, jint*)) { + jvmtiAllocTraceInfo* traces = NULL; + jint trace_counter; - jvmtiError error = get_traces(jvmti, &traces);; + jvmtiError error = get_traces(jvmti, &traces, &trace_counter);; if (error != JVMTI_ERROR_NONE) { return 0; } - int trace_count = traces.trace_count; - - if (trace_count == 0) { + if (trace_counter == 0) { return 0; } int i; - jvmtiStackTrace* stack_traces = traces.stack_traces; double sum = 0; - for (i = 0; i < trace_count; i++) { - jvmtiStackTrace *stack_trace = stack_traces + i; - sum += stack_trace->frame_count; + for (i = 0; i < trace_counter; i++) { + jvmtiAllocTraceInfo* trace = traces + i; + jvmtiStackInfo* stack_info = trace->stack_info; + sum += stack_info->frame_count; } - if ((*jvmti)->ReleaseTraces(jvmti, &traces) != JVMTI_ERROR_NONE) { + if ((*jvmti)->Deallocate(jvmti, (unsigned char*) traces) != JVMTI_ERROR_NONE) { return 0; } @@ -627,7 +639,7 @@ JNIEXPORT jdouble JNICALL Java_MyPackage_HeapMonitorStackDepthTest_getAverageStackDepth(JNIEnv *env, jclass cls) { - double result = calculate_average_stack_depth((*jvmti)->GetLiveTraces); + double result = calculate_average_stack_depth((*jvmti)->GetObjectAllocTraces); if (result != 0) { return result; @@ -639,26 +651,29 @@ } typedef struct sThreadsFound { - jint *threads; + jint* threads; int num_threads; } ThreadsFound; -static void find_threads_in_traces(jvmtiStackTraces* traces, +static void find_threads_in_traces(jvmtiAllocTraceInfo* traces, + jint trace_counter, ThreadsFound* thread_data) { int i; - jvmtiStackTrace* stack_traces = traces->stack_traces; - int trace_count = traces->trace_count; - - jint *threads = thread_data->threads; + jint* threads = thread_data->threads; int num_threads = thread_data->num_threads; // We are looking for at last expected_num_threads different traces. - for (i = 0; i < trace_count; i++) { - jvmtiStackTrace *stack_trace = stack_traces + i; - jlong thread_id = stack_trace->thread_id; + for (i = 0; i < trace_counter; i++) { + jvmtiAllocTraceInfo* trace = traces + i; + jvmtiStackInfo* stack_info = trace->stack_info; + jint thread_id = trace->thread_id; // Check it is the right frame: only accept helper top framed traces. - jmethodID methodid = stack_trace->frames[0].method; + if (stack_info->frame_count == 0) { + continue; + } + + jmethodID methodid = stack_info->frame_buffer[0].method; char *name = NULL, *signature = NULL, *file_name = NULL; (*jvmti)->GetMethodName(jvmti, methodid, &name, &signature, 0); @@ -687,30 +702,32 @@ JNIEXPORT jboolean JNICALL Java_MyPackage_HeapMonitorThreadTest_checkSamples(JNIEnv* env, jclass cls, jintArray threads) { - jvmtiStackTraces traces; + jvmtiAllocTraceInfo* traces; + jint trace_counter; + ThreadsFound thread_data; thread_data.threads = (*env)->GetIntArrayElements(env, threads, 0); thread_data.num_threads = 0; // Get live and garbage traces to ensure we capture all the threads that have // been sampled. - if ((*jvmti)->GetLiveTraces(jvmti, &traces) != JVMTI_ERROR_NONE) { + if ((*jvmti)->GetObjectAllocTraces(jvmti, &traces, &trace_counter) != JVMTI_ERROR_NONE) { return FALSE; } - find_threads_in_traces(&traces, &thread_data); + find_threads_in_traces(traces, trace_counter, &thread_data); - if ((*jvmti)->ReleaseTraces(jvmti, &traces) != JVMTI_ERROR_NONE) { + if ((*jvmti)->Deallocate(jvmti, (unsigned char*) traces) != JVMTI_ERROR_NONE) { return FALSE; } - if ((*jvmti)->GetGarbageTraces(jvmti, &traces) != JVMTI_ERROR_NONE) { + if ((*jvmti)->GetGarbageTraces(jvmti, &traces, &trace_counter) != JVMTI_ERROR_NONE) { return FALSE; } - find_threads_in_traces(&traces, &thread_data); + find_threads_in_traces(traces, trace_counter, &thread_data); - if ((*jvmti)->ReleaseTraces(jvmti, &traces) != JVMTI_ERROR_NONE) { + if ((*jvmti)->Deallocate(jvmti, (unsigned char*) traces) != JVMTI_ERROR_NONE) { return FALSE; } @@ -721,30 +738,37 @@ JNIEXPORT void JNICALL Java_MyPackage_HeapMonitorCachedTest_getLiveTracesToForceGc(JNIEnv *env, jclass cls) { - jvmtiStackTraces live_traces; - jvmtiError error = (*jvmti)->GetLiveTraces(jvmti, &live_traces); + jvmtiAllocTraceInfo* traces; + jint trace_counter; + + jvmtiError error = (*jvmti)->GetObjectAllocTraces(jvmti, &traces, + &trace_counter); if (error != JVMTI_ERROR_NONE) { return; } - (*jvmti)->ReleaseTraces(jvmti, &live_traces); + (*jvmti)->Deallocate(jvmti, (unsigned char*) traces); } -static jboolean compare_traces(jvmtiStackTraces* traces, - jvmtiStackTraces* other_traces, +static jboolean compare_traces(jvmtiAllocTraceInfo* traces, + int trace_count, + jvmtiAllocTraceInfo* other_traces, + int other_trace_count, int print_out_comparisons) { - int trace_count = traces->trace_count; - if (trace_count != other_traces->trace_count) { + if (trace_count != other_trace_count) { return FALSE; } int i; for (i = 0; i < trace_count; i++) { - jvmtiStackTrace* trace = traces->stack_traces + i; - jvmtiStackTrace* other_trace = other_traces->stack_traces + i; + jvmtiAllocTraceInfo* trace = traces + i; + jvmtiAllocTraceInfo* other_trace = other_traces + i; - if (trace->frame_count != other_trace->frame_count) { + jvmtiStackInfo* stack_info = trace->stack_info; + jvmtiStackInfo* other_stack_info = trace->stack_info; + + if (stack_info->frame_count != other_stack_info->frame_count) { return FALSE; } @@ -756,9 +780,9 @@ return FALSE; } - jvmtiFrameInfo* frames = trace->frames; - jvmtiFrameInfo* other_frames = other_trace->frames; - if (memcmp(frames, other_frames, sizeof(*frames) * trace->frame_count)) { + jvmtiFrameInfo* frames = stack_info->frame_buffer; + jvmtiFrameInfo* other_frames = other_stack_info->frame_buffer; + if (memcmp(frames, other_frames, sizeof(*frames) * stack_info->frame_count)) { return FALSE; } } @@ -770,24 +794,34 @@ Java_MyPackage_HeapMonitorCachedTest_cachedAndLiveAreSame(JNIEnv *env, jclass cls) { // Get cached first, then get live (since live performs a GC). - jvmtiStackTraces cached_traces; - jvmtiError error = (*jvmti)->GetCachedTraces(jvmti, &cached_traces); + jvmtiAllocTraceInfo* cached_traces; + jint cached_trace_counter; + jvmtiError error = (*jvmti)->GetCachedObjectAllocTraces(jvmti, &cached_traces, + &cached_trace_counter); if (error != JVMTI_ERROR_NONE) { return FALSE; } - jvmtiStackTraces live_traces; - error = (*jvmti)->GetLiveTraces(jvmti, &live_traces); + jvmtiAllocTraceInfo* live_traces; + jint live_trace_counter; + error = (*jvmti)->GetObjectAllocTraces(jvmti, &live_traces, + &live_trace_counter); if (error != JVMTI_ERROR_NONE) { return FALSE; } - int result = compare_traces(&cached_traces, &live_traces, PRINT_OUT); + int result = compare_traces(cached_traces, cached_trace_counter, + live_traces, live_trace_counter, + PRINT_OUT); - (*jvmti)->ReleaseTraces(jvmti, &cached_traces); - (*jvmti)->ReleaseTraces(jvmti, &live_traces); + if ((*jvmti)->Deallocate(jvmti, (unsigned char*) cached_traces) != JVMTI_ERROR_NONE) { + return FALSE; + } + if ((*jvmti)->Deallocate(jvmti, (unsigned char*) live_traces) != JVMTI_ERROR_NONE) { + return FALSE; + } return result; } @@ -795,21 +829,22 @@ return hash_code * 31 + value; } -static long get_hash_code(jvmtiStackTraces* traces) { - int trace_count = traces->trace_count; +static long get_hash_code(jvmtiAllocTraceInfo* traces, jint trace_counter) { int hash_code = 17; + int i, j; + + hash_code = hash(hash_code, trace_counter); + for (i = 0; i < trace_counter; i++) { + jvmtiAllocTraceInfo* trace = traces + i; - int i; - hash_code = hash(hash_code, trace_count); - for (i = 0; i < trace_count; i++) { - jvmtiStackTrace* trace = traces->stack_traces + i; - hash_code = hash(hash_code, trace->frame_count); hash_code = hash(hash_code, trace->size); hash_code = hash(hash_code, trace->thread_id); - int j; - int frame_count = trace->frame_count; - jvmtiFrameInfo* frames = trace->frames; + jvmtiStackInfo* stack_info = trace->stack_info; + hash_code = hash(hash_code, stack_info->frame_count); + + int frame_count = stack_info->frame_count; + jvmtiFrameInfo* frames = stack_info->frame_buffer; hash_code = hash(hash_code, frame_count); for (j = 0; j < frame_count; j++) { hash_code = hash(hash_code, (long) frames[i].method); @@ -824,15 +859,20 @@ Java_MyPackage_HeapMonitorCachedTest_getCachedHashCode(JNIEnv *env, jclass cls) { // Get cached first, then get live. - jvmtiStackTraces cached_traces; - jvmtiError error = (*jvmti)->GetCachedTraces(jvmti, &cached_traces); + jvmtiAllocTraceInfo* cached_traces; + jint cached_trace_counter; + jvmtiError error = (*jvmti)->GetCachedObjectAllocTraces(jvmti, &cached_traces, + &cached_trace_counter); if (error != JVMTI_ERROR_NONE) { return 0; } - long hash_code = get_hash_code(&cached_traces); - (*jvmti)->ReleaseTraces(jvmti, &cached_traces); + long hash_code = get_hash_code(cached_traces, cached_trace_counter); + + if ((*jvmti)->Deallocate(jvmti, (unsigned char*) cached_traces) != JVMTI_ERROR_NONE) { + return FALSE; + } return hash_code; }