< prev index next >

src/hotspot/share/runtime/perfMemory.cpp

Print this page

        

*** 51,60 **** --- 51,61 ---- char* PerfMemory::_end = NULL; char* PerfMemory::_top = NULL; size_t PerfMemory::_capacity = 0; jint PerfMemory::_initialized = false; PerfDataPrologue* PerfMemory::_prologue = NULL; + bool PerfMemory::_destroyed = false; void perfMemory_init() { if (!UsePerfData) return;
*** 62,72 **** } void perfMemory_exit() { if (!UsePerfData) return; ! if (!PerfMemory::is_initialized()) return; // Only destroy PerfData objects if we're at a safepoint and the // StatSampler is not active. Otherwise, we risk removing PerfData // objects that are currently being used by running JavaThreads // or the StatSampler. This method is invoked while we are not at --- 63,73 ---- } void perfMemory_exit() { if (!UsePerfData) return; ! if (!PerfMemory::is_usable()) return; // Only destroy PerfData objects if we're at a safepoint and the // StatSampler is not active. Otherwise, we risk removing PerfData // objects that are currently being used by running JavaThreads // or the StatSampler. This method is invoked while we are not at
*** 86,96 **** PerfMemory::destroy(); } void PerfMemory::initialize() { ! if (_prologue != NULL) // initialization already performed return; size_t capacity = align_up(PerfDataMemorySize, os::vm_allocation_granularity()); --- 87,97 ---- PerfMemory::destroy(); } void PerfMemory::initialize() { ! if (is_initialized()) // initialization already performed return; size_t capacity = align_up(PerfDataMemorySize, os::vm_allocation_granularity());
*** 158,168 **** OrderAccess::release_store(&_initialized, 1); } void PerfMemory::destroy() { ! if (_prologue == NULL) return; if (_start != NULL && _prologue->overflow != 0) { // This state indicates that the contiguous memory region exists and // that it wasn't large enough to hold all the counters. In this case, --- 159,169 ---- OrderAccess::release_store(&_initialized, 1); } void PerfMemory::destroy() { ! if (!is_usable()) return; if (_start != NULL && _prologue->overflow != 0) { // This state indicates that the contiguous memory region exists and // that it wasn't large enough to hold all the counters. In this case,
*** 194,208 **** // expected to be the typical condition. // delete_memory_region(); } ! _start = NULL; ! _end = NULL; ! _top = NULL; ! _prologue = NULL; ! _capacity = 0; } // allocate an aligned block of memory from the PerfData memory // region. This method assumes that the PerfData memory region // was aligned on a double word boundary when created. --- 195,205 ---- // expected to be the typical condition. // delete_memory_region(); } ! _destroyed = true; } // allocate an aligned block of memory from the PerfData memory // region. This method assumes that the PerfData memory region // was aligned on a double word boundary when created.
*** 211,221 **** if (!UsePerfData) return NULL; MutexLocker ml(PerfDataMemAlloc_lock); ! assert(_prologue != NULL, "called before initialization"); // check that there is enough memory for this request if ((_top + size) >= _end) { _prologue->overflow += (jint)size; --- 208,218 ---- if (!UsePerfData) return NULL; MutexLocker ml(PerfDataMemAlloc_lock); ! assert(is_usable(), "called before init or after destroy"); // check that there is enough memory for this request if ((_top + size) >= _end) { _prologue->overflow += (jint)size;
*** 236,245 **** --- 233,244 ---- } void PerfMemory::mark_updated() { if (!UsePerfData) return; + assert(is_usable(), "called before init or after destroy"); + _prologue->mod_time_stamp = os::elapsed_counter(); } // Returns the complete path including the file name of performance data file. // Caller is expected to release the allocated memory.
< prev index next >