< prev index next >

src/share/vm/gc/shared/collectedHeap.cpp

Print this page
rev 13139 : [mq]: heap7
rev 13140 : [mq]: heapz8

*** 36,45 **** --- 36,46 ---- #include "logging/log.hpp" #include "memory/metaspace.hpp" #include "memory/resourceArea.hpp" #include "oops/instanceMirrorKlass.hpp" #include "oops/oop.inline.hpp" + #include "runtime/heapMonitoring.hpp" #include "runtime/init.hpp" #include "runtime/thread.inline.hpp" #include "services/heapDumper.hpp"
*** 293,303 **** --- 294,340 ---- thread->check_for_valid_safepoint_state(true); } } #endif + HeapWord* CollectedHeap::handle_heap_sampling(Thread* thread, HeapWord* obj, size_t size) { + // We can come here for three reasons: + // - We either really did fill the tlab. + // - We pretended to everyone we did and we want to sample. + // - Both of the above reasons are true at the same time. + if (HeapMonitoring::enabled()) { + if (thread->tlab().should_sample()) { + // If we don't have an object yet, try to allocate it. + if (obj == NULL) { + // The tlab could still have space after this sample. + thread->tlab().set_back_actual_end(); + obj = thread->tlab().allocate(size); + } + + // Is the object allocated now? + // If not, this means we have to wait till a new TLAB, let the subsequent + // call to handle_heap_sampling pick the next sample. + if (obj != NULL) { + // Object is allocated, sample it now. + HeapMonitoring::object_alloc_do_sample(thread, + reinterpret_cast<oopDesc*>(obj), + size); + // Pick a next sample in this case, we allocated right. + thread->tlab().pick_next_sample(); + } + } + } + + return obj; + } + HeapWord* CollectedHeap::allocate_from_tlab_slow(Klass* klass, Thread* thread, size_t size) { + HeapWord* obj = handle_heap_sampling(thread, NULL, size); + + if (obj != NULL) { + return obj; + } // Retain tlab and allocate object in shared space if // the amount free in the tlab is too large to discard. if (thread->tlab().free() > thread->tlab().refill_waste_limit()) { thread->tlab().record_slow_allocation(size);
*** 313,323 **** if (new_tlab_size == 0) { return NULL; } // Allocate a new TLAB... ! HeapWord* obj = Universe::heap()->allocate_new_tlab(new_tlab_size); if (obj == NULL) { return NULL; } AllocTracer::send_allocation_in_new_tlab_event(klass, new_tlab_size * HeapWordSize, size * HeapWordSize); --- 350,360 ---- if (new_tlab_size == 0) { return NULL; } // Allocate a new TLAB... ! obj = Universe::heap()->allocate_new_tlab(new_tlab_size); if (obj == NULL) { return NULL; } AllocTracer::send_allocation_in_new_tlab_event(klass, new_tlab_size * HeapWordSize, size * HeapWordSize);
*** 334,343 **** --- 371,381 ---- size_t hdr_size = oopDesc::header_size(); Copy::fill_to_words(obj + hdr_size, new_tlab_size - hdr_size, badHeapWordVal); #endif // ASSERT } thread->tlab().fill(obj, obj + size, new_tlab_size); + handle_heap_sampling(thread, obj, size); return obj; } void CollectedHeap::flush_deferred_store_barrier(JavaThread* thread) { MemRegion deferred = thread->deferred_card_mark();
< prev index next >