--- old/src/share/vm/gc_implementation/parallelScavenge/psPromotionManager.inline.hpp 2019-01-28 17:43:43.000000000 +0800 +++ new/src/share/vm/gc_implementation/parallelScavenge/psPromotionManager.inline.hpp 2019-01-28 17:43:43.000000000 +0800 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2002, 2014, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2002, 2019, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -64,6 +64,34 @@ claim_or_forward_internal_depth(p); } +inline void PSPromotionManager::promotion_trace_event(oop new_obj, oop old_obj, + size_t obj_size, + uint age, bool tenured, + const PSPromotionLAB* lab) { + assert(EnableJFR, "sanity check"); + // Skip if memory allocation failed + if (new_obj != NULL) { + const ParallelScavengeTracer* gc_tracer = PSScavenge::gc_tracer(); + + if (lab != NULL) { + // Promotion of object through newly allocated PLAB + if (gc_tracer->should_report_promotion_in_new_plab_event()) { + size_t obj_bytes = obj_size * HeapWordSize; + size_t lab_size = lab->capacity(); + gc_tracer->report_promotion_in_new_plab_event(old_obj->klass(), obj_bytes, + age, tenured, lab_size); + } + } else { + // Promotion of object directly to heap + if (gc_tracer->should_report_promotion_outside_plab_event()) { + size_t obj_bytes = obj_size * HeapWordSize; + gc_tracer->report_promotion_outside_plab_event(old_obj->klass(), obj_bytes, + age, tenured); + } + } + } +} + // // This method is pretty bulky. It would be nice to split it up // into smaller submethods, but we need to be careful not to hurt @@ -98,6 +126,9 @@ if (new_obj_size > (YoungPLABSize / 2)) { // Allocate this object directly new_obj = (oop)young_space()->cas_allocate(new_obj_size); + if (EnableJFR) { + promotion_trace_event(new_obj, o, new_obj_size, age, false, NULL); + } } else { // Flush and fill _young_lab.flush(); @@ -107,6 +138,9 @@ _young_lab.initialize(MemRegion(lab_base, YoungPLABSize)); // Try the young lab allocation again. new_obj = (oop) _young_lab.allocate(new_obj_size); + if (EnableJFR) { + promotion_trace_event(new_obj, o, new_obj_size, age, false, &_young_lab); + } } else { _young_gen_is_full = true; } @@ -127,11 +161,20 @@ new_obj_is_tenured = true; if (new_obj == NULL) { + uint age = 0; + if (EnableJFR) { + // Find the objects age, MT safe. + age = (test_mark->has_displaced_mark_helper() /* o->has_displaced_mark() */) ? + test_mark->displaced_mark_helper()->age() : test_mark->age(); + } if (!_old_gen_is_full) { // Do we allocate directly, or flush and refill? if (new_obj_size > (OldPLABSize / 2)) { // Allocate this object directly new_obj = (oop)old_gen()->cas_allocate(new_obj_size); + if (EnableJFR) { + promotion_trace_event(new_obj, o, new_obj_size, age, true, NULL); + } } else { // Flush and fill _old_lab.flush(); @@ -148,6 +191,9 @@ _old_lab.initialize(MemRegion(lab_base, OldPLABSize)); // Try the old lab allocation again. new_obj = (oop) _old_lab.allocate(new_obj_size); + if (EnableJFR) { + promotion_trace_event(new_obj, o, new_obj_size, age, true, &_old_lab); + } } } }