< prev index next >

src/hotspot/share/memory/metaspace.cpp

Print this page

        

*** 1497,1524 **** return delta; } size_t MetaspaceGC::capacity_until_GC() { ! size_t value = (size_t)OrderAccess::load_ptr_acquire(&_capacity_until_GC); assert(value >= MetaspaceSize, "Not initialized properly?"); return value; } bool MetaspaceGC::inc_capacity_until_GC(size_t v, size_t* new_cap_until_GC, size_t* old_cap_until_GC) { assert_is_aligned(v, Metaspace::commit_alignment()); ! size_t capacity_until_GC = (size_t) _capacity_until_GC; ! size_t new_value = capacity_until_GC + v; if (new_value < capacity_until_GC) { // The addition wrapped around, set new_value to aligned max value. new_value = align_down(max_uintx, Metaspace::commit_alignment()); } ! intptr_t expected = (intptr_t) capacity_until_GC; ! intptr_t actual = Atomic::cmpxchg_ptr((intptr_t) new_value, &_capacity_until_GC, expected); if (expected != actual) { return false; } --- 1497,1524 ---- return delta; } size_t MetaspaceGC::capacity_until_GC() { ! size_t value = OrderAccess::load_acquire(&_capacity_until_GC); assert(value >= MetaspaceSize, "Not initialized properly?"); return value; } bool MetaspaceGC::inc_capacity_until_GC(size_t v, size_t* new_cap_until_GC, size_t* old_cap_until_GC) { assert_is_aligned(v, Metaspace::commit_alignment()); ! intptr_t capacity_until_GC = _capacity_until_GC; ! intptr_t new_value = capacity_until_GC + v; if (new_value < capacity_until_GC) { // The addition wrapped around, set new_value to aligned max value. new_value = align_down(max_uintx, Metaspace::commit_alignment()); } ! intptr_t expected = _capacity_until_GC; ! intptr_t actual = Atomic::cmpxchg(new_value, &_capacity_until_GC, expected); if (expected != actual) { return false; }
*** 1532,1542 **** } size_t MetaspaceGC::dec_capacity_until_GC(size_t v) { assert_is_aligned(v, Metaspace::commit_alignment()); ! return (size_t)Atomic::add_ptr(-(intptr_t)v, &_capacity_until_GC); } void MetaspaceGC::initialize() { // Set the high-water mark to MaxMetapaceSize during VM initializaton since // we can't do a GC during initialization. --- 1532,1542 ---- } size_t MetaspaceGC::dec_capacity_until_GC(size_t v) { assert_is_aligned(v, Metaspace::commit_alignment()); ! return (size_t)Atomic::sub((intptr_t)v, &_capacity_until_GC); } void MetaspaceGC::initialize() { // Set the high-water mark to MaxMetapaceSize during VM initializaton since // we can't do a GC during initialization.
*** 2396,2406 **** MetaspaceAux::inc_used(mdtype(), Metachunk::overhead()); } void SpaceManager::inc_used_metrics(size_t words) { // Add to the per SpaceManager total ! Atomic::add_ptr(words, &_allocated_blocks_words); // Add to the global total MetaspaceAux::inc_used(mdtype(), words); } void SpaceManager::dec_total_from_size_metrics() { --- 2396,2406 ---- MetaspaceAux::inc_used(mdtype(), Metachunk::overhead()); } void SpaceManager::inc_used_metrics(size_t words) { // Add to the per SpaceManager total ! Atomic::add(words, &_allocated_blocks_words); // Add to the global total MetaspaceAux::inc_used(mdtype(), words); } void SpaceManager::dec_total_from_size_metrics() {
*** 2751,2770 **** words, mdtype, used_words(mdtype)); // For CMS deallocation of the Metaspaces occurs during the // sweep which is a concurrent phase. Protection by the expand_lock() // is not enough since allocation is on a per Metaspace basis // and protected by the Metaspace lock. ! jlong minus_words = (jlong) - (jlong) words; ! Atomic::add_ptr(minus_words, &_used_words[mdtype]); } void MetaspaceAux::inc_used(Metaspace::MetadataType mdtype, size_t words) { // _used_words tracks allocations for // each piece of metadata. Those allocations are // generally done concurrently by different application // threads so must be done atomically. ! Atomic::add_ptr(words, &_used_words[mdtype]); } size_t MetaspaceAux::used_bytes_slow(Metaspace::MetadataType mdtype) { size_t used = 0; ClassLoaderDataGraphMetaspaceIterator iter; --- 2751,2769 ---- words, mdtype, used_words(mdtype)); // For CMS deallocation of the Metaspaces occurs during the // sweep which is a concurrent phase. Protection by the expand_lock() // is not enough since allocation is on a per Metaspace basis // and protected by the Metaspace lock. ! Atomic::sub(words, &_used_words[mdtype]); } void MetaspaceAux::inc_used(Metaspace::MetadataType mdtype, size_t words) { // _used_words tracks allocations for // each piece of metadata. Those allocations are // generally done concurrently by different application // threads so must be done atomically. ! Atomic::add(words, &_used_words[mdtype]); } size_t MetaspaceAux::used_bytes_slow(Metaspace::MetadataType mdtype) { size_t used = 0; ClassLoaderDataGraphMetaspaceIterator iter;
< prev index next >