< prev index next >
src/share/vm/gc_implementation/concurrentMarkSweep/compactibleFreeListSpace.cpp
Print this page
rev 8156 : [mq]: 8066444
@@ -2590,26 +2590,44 @@
i += CompactibleFreeListSpace::IndexSetStride) {
_blocks_to_claim[i].modify(n, wt, true /* force */);
}
}
-HeapWord* CFLS_LAB::alloc(size_t word_sz) {
+HeapWord* CFLS_LAB::alloc(size_t word_sz, oop old, markOop m) {
FreeChunk* res;
+ const ParNewTracer* gc_tracer = _cfls->_collector->young_gen()->gc_tracer();
assert(word_sz == _cfls->adjustObjectSize(word_sz), "Error");
if (word_sz >= CompactibleFreeListSpace::IndexSetSize) {
// This locking manages sync with other large object allocations.
MutexLockerEx x(_cfls->parDictionaryAllocLock(),
Mutex::_no_safepoint_check_flag);
res = _cfls->getChunkFromDictionaryExact(word_sz);
if (res == NULL) return NULL;
+ if (gc_tracer->should_report_promotion_outside_plab_event()) {
+ size_t obj_bytes = word_sz * HeapWordSize;
+ uint age = m->has_displaced_mark_helper() ?
+ m->displaced_mark_helper()->age() :
+ m->age();
+ gc_tracer->report_promotion_outside_plab_event(old->klass(), obj_bytes,
+ age, true);
+ }
} else {
AdaptiveFreeList<FreeChunk>* fl = &_indexedFreeList[word_sz];
if (fl->count() == 0) {
// Attempt to refill this local free list.
get_from_global_pool(word_sz, fl);
// If it didn't work, give up.
if (fl->count() == 0) return NULL;
+ if (gc_tracer->should_report_promotion_in_new_plab_event()) {
+ size_t obj_bytes = word_sz * HeapWordSize;
+ uint age = m->has_displaced_mark_helper() ?
+ m->displaced_mark_helper()->age() :
+ m->age();
+ size_t lab_bytes = fl->list_size() * HeapWordSize;
+ gc_tracer->report_promotion_in_new_plab_event(old->klass(), obj_bytes,
+ age, true, lab_bytes);
+ }
}
res = fl->get_chunk_at_head();
assert(res != NULL, "Why was count non-zero?");
}
res->markNotFree();
< prev index next >