< prev index next >

src/hotspot/share/gc/shared/collectedHeap.cpp

Print this page
rev 47223 : [mq]: heapz8
rev 47224 : [mq]: heap9a

*** 36,45 **** --- 36,46 ---- #include "logging/log.hpp" #include "memory/metaspace.hpp" #include "memory/resourceArea.hpp" #include "oops/instanceMirrorKlass.hpp" #include "oops/oop.inline.hpp" + #include "runtime/heapMonitoring.hpp" #include "runtime/init.hpp" #include "runtime/thread.inline.hpp" #include "services/heapDumper.hpp" #include "utilities/align.hpp"
*** 294,304 **** --- 295,344 ---- thread->check_for_valid_safepoint_state(true); } } #endif + HeapWord* CollectedHeap::handle_heap_sampling(Thread* thread, HeapWord* obj, size_t size) { + // We can come here for three reasons: + // - We either really did fill the tlab. + // - We pretended to everyone we did and we want to sample. + // - Both of the above reasons are true at the same time. + if (HeapMonitoring::enabled()) { + if (thread->tlab().should_sample()) { + HeapWord *end = thread->tlab().end(); + thread->tlab().set_back_actual_end(); + + // If we don't have an object yet, try to allocate it. + if (obj == NULL) { + // The tlab could still have space after this sample. + obj = thread->tlab().allocate(size); + } + + // Is the object allocated now? + // If not, this means we have to wait till a new TLAB, let the subsequent + // call to handle_heap_sampling pick the next sample. + if (obj != NULL) { + // Object is allocated, sample it now. + HeapMonitoring::object_alloc_do_sample(thread, + reinterpret_cast<oopDesc*>(obj), + size * HeapWordSize); + // Pick a next sample in this case, we allocated right. + thread->tlab().pick_next_sample(thread->tlab().top() - end); + } + } + } + + return obj; + } + HeapWord* CollectedHeap::allocate_from_tlab_slow(Klass* klass, Thread* thread, size_t size) { + HeapWord* obj = handle_heap_sampling(thread, NULL, size); + bool should_sample = thread->tlab().should_sample(); + + if (obj != NULL) { + return obj; + } // Retain tlab and allocate object in shared space if // the amount free in the tlab is too large to discard. if (thread->tlab().free() > thread->tlab().refill_waste_limit()) { thread->tlab().record_slow_allocation(size);
*** 314,324 **** if (new_tlab_size == 0) { return NULL; } // Allocate a new TLAB... ! HeapWord* obj = Universe::heap()->allocate_new_tlab(new_tlab_size); if (obj == NULL) { return NULL; } AllocTracer::send_allocation_in_new_tlab_event(klass, new_tlab_size * HeapWordSize, size * HeapWordSize); --- 354,364 ---- if (new_tlab_size == 0) { return NULL; } // Allocate a new TLAB... ! obj = Universe::heap()->allocate_new_tlab(new_tlab_size); if (obj == NULL) { return NULL; } AllocTracer::send_allocation_in_new_tlab_event(klass, new_tlab_size * HeapWordSize, size * HeapWordSize);
*** 335,345 **** --- 375,390 ---- size_t hdr_size = oopDesc::header_size(); Copy::fill_to_words(obj + hdr_size, new_tlab_size - hdr_size, badHeapWordVal); #endif // ASSERT } thread->tlab().fill(obj, obj + size, new_tlab_size); + + if (should_sample) { + return handle_heap_sampling(thread, obj, size); + } else { return obj; + } } void CollectedHeap::flush_deferred_store_barrier(JavaThread* thread) { MemRegion deferred = thread->deferred_card_mark(); if (!deferred.is_empty()) {
< prev index next >