1482
1483 if (delta <= min_delta) {
1484 delta = min_delta;
1485 } else if (delta <= max_delta) {
1486 // Don't want to hit the high water mark on the next
1487 // allocation so make the delta greater than just enough
1488 // for this allocation.
1489 delta = max_delta;
1490 } else {
1491 // This allocation is large but the next ones are probably not
1492 // so increase by the minimum.
1493 delta = delta + min_delta;
1494 }
1495
1496 assert_is_aligned(delta, Metaspace::commit_alignment());
1497
1498 return delta;
1499 }
1500
1501 size_t MetaspaceGC::capacity_until_GC() {
1502 size_t value = (size_t)OrderAccess::load_ptr_acquire(&_capacity_until_GC);
1503 assert(value >= MetaspaceSize, "Not initialized properly?");
1504 return value;
1505 }
1506
1507 bool MetaspaceGC::inc_capacity_until_GC(size_t v, size_t* new_cap_until_GC, size_t* old_cap_until_GC) {
1508 assert_is_aligned(v, Metaspace::commit_alignment());
1509
1510 size_t capacity_until_GC = (size_t) _capacity_until_GC;
1511 size_t new_value = capacity_until_GC + v;
1512
1513 if (new_value < capacity_until_GC) {
1514 // The addition wrapped around, set new_value to aligned max value.
1515 new_value = align_down(max_uintx, Metaspace::commit_alignment());
1516 }
1517
1518 intptr_t expected = (intptr_t) capacity_until_GC;
1519 intptr_t actual = Atomic::cmpxchg_ptr((intptr_t) new_value, &_capacity_until_GC, expected);
1520
1521 if (expected != actual) {
1522 return false;
1523 }
1524
1525 if (new_cap_until_GC != NULL) {
1526 *new_cap_until_GC = new_value;
1527 }
1528 if (old_cap_until_GC != NULL) {
1529 *old_cap_until_GC = capacity_until_GC;
1530 }
1531 return true;
1532 }
1533
1534 size_t MetaspaceGC::dec_capacity_until_GC(size_t v) {
1535 assert_is_aligned(v, Metaspace::commit_alignment());
1536
1537 return (size_t)Atomic::add_ptr(-(intptr_t)v, &_capacity_until_GC);
1538 }
1539
1540 void MetaspaceGC::initialize() {
1541 // Set the high-water mark to MaxMetapaceSize during VM initializaton since
1542 // we can't do a GC during initialization.
1543 _capacity_until_GC = MaxMetaspaceSize;
1544 }
1545
1546 void MetaspaceGC::post_initialize() {
1547 // Reset the high-water mark once the VM initialization is done.
1548 _capacity_until_GC = MAX2(MetaspaceAux::committed_bytes(), MetaspaceSize);
1549 }
1550
1551 bool MetaspaceGC::can_expand(size_t word_size, bool is_class) {
1552 // Check if the compressed class space is full.
1553 if (is_class && Metaspace::using_class_space()) {
1554 size_t class_committed = MetaspaceAux::committed_bytes(Metaspace::ClassType);
1555 if (class_committed + word_size * BytesPerWord > CompressedClassSpaceSize) {
1556 return false;
1557 }
2381 }
2382
2383 void SpaceManager::inc_size_metrics(size_t words) {
2384 assert_lock_strong(SpaceManager::expand_lock());
2385 // Total of allocated Metachunks and allocated Metachunks count
2386 // for each SpaceManager
2387 _allocated_chunks_words = _allocated_chunks_words + words;
2388 _allocated_chunks_count++;
2389 // Global total of capacity in allocated Metachunks
2390 MetaspaceAux::inc_capacity(mdtype(), words);
2391 // Global total of allocated Metablocks.
2392 // used_words_slow() includes the overhead in each
2393 // Metachunk so include it in the used when the
2394 // Metachunk is first added (so only added once per
2395 // Metachunk).
2396 MetaspaceAux::inc_used(mdtype(), Metachunk::overhead());
2397 }
2398
2399 void SpaceManager::inc_used_metrics(size_t words) {
2400 // Add to the per SpaceManager total
2401 Atomic::add_ptr(words, &_allocated_blocks_words);
2402 // Add to the global total
2403 MetaspaceAux::inc_used(mdtype(), words);
2404 }
2405
2406 void SpaceManager::dec_total_from_size_metrics() {
2407 MetaspaceAux::dec_capacity(mdtype(), allocated_chunks_words());
2408 MetaspaceAux::dec_used(mdtype(), allocated_blocks_words());
2409 // Also deduct the overhead per Metachunk
2410 MetaspaceAux::dec_used(mdtype(), allocated_chunks_count() * Metachunk::overhead());
2411 }
2412
2413 void SpaceManager::initialize() {
2414 Metadebug::init_allocation_fail_alot_count();
2415 for (ChunkIndex i = ZeroIndex; i < NumberOfInUseLists; i = next_chunk_index(i)) {
2416 _chunks_in_use[i] = NULL;
2417 }
2418 _current_chunk = NULL;
2419 log_trace(gc, metaspace, freelist)("SpaceManager(): " PTR_FORMAT, p2i(this));
2420 }
2421
2736 " is greater than _capacity_words[%u] " SIZE_FORMAT,
2737 words, mdtype, capacity_words(mdtype));
2738 _capacity_words[mdtype] -= words;
2739 }
2740
2741 void MetaspaceAux::inc_capacity(Metaspace::MetadataType mdtype, size_t words) {
2742 assert_lock_strong(SpaceManager::expand_lock());
2743 // Needs to be atomic
2744 _capacity_words[mdtype] += words;
2745 }
2746
2747 void MetaspaceAux::dec_used(Metaspace::MetadataType mdtype, size_t words) {
2748 assert(words <= used_words(mdtype),
2749 "About to decrement below 0: words " SIZE_FORMAT
2750 " is greater than _used_words[%u] " SIZE_FORMAT,
2751 words, mdtype, used_words(mdtype));
2752 // For CMS deallocation of the Metaspaces occurs during the
2753 // sweep which is a concurrent phase. Protection by the expand_lock()
2754 // is not enough since allocation is on a per Metaspace basis
2755 // and protected by the Metaspace lock.
2756 jlong minus_words = (jlong) - (jlong) words;
2757 Atomic::add_ptr(minus_words, &_used_words[mdtype]);
2758 }
2759
2760 void MetaspaceAux::inc_used(Metaspace::MetadataType mdtype, size_t words) {
2761 // _used_words tracks allocations for
2762 // each piece of metadata. Those allocations are
2763 // generally done concurrently by different application
2764 // threads so must be done atomically.
2765 Atomic::add_ptr(words, &_used_words[mdtype]);
2766 }
2767
2768 size_t MetaspaceAux::used_bytes_slow(Metaspace::MetadataType mdtype) {
2769 size_t used = 0;
2770 ClassLoaderDataGraphMetaspaceIterator iter;
2771 while (iter.repeat()) {
2772 Metaspace* msp = iter.get_next();
2773 // Sum allocated_blocks_words for each metaspace
2774 if (msp != NULL) {
2775 used += msp->used_words_slow(mdtype);
2776 }
2777 }
2778 return used * BytesPerWord;
2779 }
2780
2781 size_t MetaspaceAux::free_bytes_slow(Metaspace::MetadataType mdtype) {
2782 size_t free = 0;
2783 ClassLoaderDataGraphMetaspaceIterator iter;
2784 while (iter.repeat()) {
2785 Metaspace* msp = iter.get_next();
|
1482
1483 if (delta <= min_delta) {
1484 delta = min_delta;
1485 } else if (delta <= max_delta) {
1486 // Don't want to hit the high water mark on the next
1487 // allocation so make the delta greater than just enough
1488 // for this allocation.
1489 delta = max_delta;
1490 } else {
1491 // This allocation is large but the next ones are probably not
1492 // so increase by the minimum.
1493 delta = delta + min_delta;
1494 }
1495
1496 assert_is_aligned(delta, Metaspace::commit_alignment());
1497
1498 return delta;
1499 }
1500
1501 size_t MetaspaceGC::capacity_until_GC() {
1502 size_t value = OrderAccess::load_acquire(&_capacity_until_GC);
1503 assert(value >= MetaspaceSize, "Not initialized properly?");
1504 return value;
1505 }
1506
1507 bool MetaspaceGC::inc_capacity_until_GC(size_t v, size_t* new_cap_until_GC, size_t* old_cap_until_GC) {
1508 assert_is_aligned(v, Metaspace::commit_alignment());
1509
1510 intptr_t capacity_until_GC = _capacity_until_GC;
1511 intptr_t new_value = capacity_until_GC + v;
1512
1513 if (new_value < capacity_until_GC) {
1514 // The addition wrapped around, set new_value to aligned max value.
1515 new_value = align_down(max_uintx, Metaspace::commit_alignment());
1516 }
1517
1518 intptr_t expected = _capacity_until_GC;
1519 intptr_t actual = Atomic::cmpxchg(new_value, &_capacity_until_GC, expected);
1520
1521 if (expected != actual) {
1522 return false;
1523 }
1524
1525 if (new_cap_until_GC != NULL) {
1526 *new_cap_until_GC = new_value;
1527 }
1528 if (old_cap_until_GC != NULL) {
1529 *old_cap_until_GC = capacity_until_GC;
1530 }
1531 return true;
1532 }
1533
1534 size_t MetaspaceGC::dec_capacity_until_GC(size_t v) {
1535 assert_is_aligned(v, Metaspace::commit_alignment());
1536
1537 return (size_t)Atomic::sub((intptr_t)v, &_capacity_until_GC);
1538 }
1539
1540 void MetaspaceGC::initialize() {
1541 // Set the high-water mark to MaxMetapaceSize during VM initializaton since
1542 // we can't do a GC during initialization.
1543 _capacity_until_GC = MaxMetaspaceSize;
1544 }
1545
1546 void MetaspaceGC::post_initialize() {
1547 // Reset the high-water mark once the VM initialization is done.
1548 _capacity_until_GC = MAX2(MetaspaceAux::committed_bytes(), MetaspaceSize);
1549 }
1550
1551 bool MetaspaceGC::can_expand(size_t word_size, bool is_class) {
1552 // Check if the compressed class space is full.
1553 if (is_class && Metaspace::using_class_space()) {
1554 size_t class_committed = MetaspaceAux::committed_bytes(Metaspace::ClassType);
1555 if (class_committed + word_size * BytesPerWord > CompressedClassSpaceSize) {
1556 return false;
1557 }
2381 }
2382
2383 void SpaceManager::inc_size_metrics(size_t words) {
2384 assert_lock_strong(SpaceManager::expand_lock());
2385 // Total of allocated Metachunks and allocated Metachunks count
2386 // for each SpaceManager
2387 _allocated_chunks_words = _allocated_chunks_words + words;
2388 _allocated_chunks_count++;
2389 // Global total of capacity in allocated Metachunks
2390 MetaspaceAux::inc_capacity(mdtype(), words);
2391 // Global total of allocated Metablocks.
2392 // used_words_slow() includes the overhead in each
2393 // Metachunk so include it in the used when the
2394 // Metachunk is first added (so only added once per
2395 // Metachunk).
2396 MetaspaceAux::inc_used(mdtype(), Metachunk::overhead());
2397 }
2398
2399 void SpaceManager::inc_used_metrics(size_t words) {
2400 // Add to the per SpaceManager total
2401 Atomic::add(words, &_allocated_blocks_words);
2402 // Add to the global total
2403 MetaspaceAux::inc_used(mdtype(), words);
2404 }
2405
2406 void SpaceManager::dec_total_from_size_metrics() {
2407 MetaspaceAux::dec_capacity(mdtype(), allocated_chunks_words());
2408 MetaspaceAux::dec_used(mdtype(), allocated_blocks_words());
2409 // Also deduct the overhead per Metachunk
2410 MetaspaceAux::dec_used(mdtype(), allocated_chunks_count() * Metachunk::overhead());
2411 }
2412
2413 void SpaceManager::initialize() {
2414 Metadebug::init_allocation_fail_alot_count();
2415 for (ChunkIndex i = ZeroIndex; i < NumberOfInUseLists; i = next_chunk_index(i)) {
2416 _chunks_in_use[i] = NULL;
2417 }
2418 _current_chunk = NULL;
2419 log_trace(gc, metaspace, freelist)("SpaceManager(): " PTR_FORMAT, p2i(this));
2420 }
2421
2736 " is greater than _capacity_words[%u] " SIZE_FORMAT,
2737 words, mdtype, capacity_words(mdtype));
2738 _capacity_words[mdtype] -= words;
2739 }
2740
2741 void MetaspaceAux::inc_capacity(Metaspace::MetadataType mdtype, size_t words) {
2742 assert_lock_strong(SpaceManager::expand_lock());
2743 // Needs to be atomic
2744 _capacity_words[mdtype] += words;
2745 }
2746
2747 void MetaspaceAux::dec_used(Metaspace::MetadataType mdtype, size_t words) {
2748 assert(words <= used_words(mdtype),
2749 "About to decrement below 0: words " SIZE_FORMAT
2750 " is greater than _used_words[%u] " SIZE_FORMAT,
2751 words, mdtype, used_words(mdtype));
2752 // For CMS deallocation of the Metaspaces occurs during the
2753 // sweep which is a concurrent phase. Protection by the expand_lock()
2754 // is not enough since allocation is on a per Metaspace basis
2755 // and protected by the Metaspace lock.
2756 Atomic::sub(words, &_used_words[mdtype]);
2757 }
2758
2759 void MetaspaceAux::inc_used(Metaspace::MetadataType mdtype, size_t words) {
2760 // _used_words tracks allocations for
2761 // each piece of metadata. Those allocations are
2762 // generally done concurrently by different application
2763 // threads so must be done atomically.
2764 Atomic::add(words, &_used_words[mdtype]);
2765 }
2766
2767 size_t MetaspaceAux::used_bytes_slow(Metaspace::MetadataType mdtype) {
2768 size_t used = 0;
2769 ClassLoaderDataGraphMetaspaceIterator iter;
2770 while (iter.repeat()) {
2771 Metaspace* msp = iter.get_next();
2772 // Sum allocated_blocks_words for each metaspace
2773 if (msp != NULL) {
2774 used += msp->used_words_slow(mdtype);
2775 }
2776 }
2777 return used * BytesPerWord;
2778 }
2779
2780 size_t MetaspaceAux::free_bytes_slow(Metaspace::MetadataType mdtype) {
2781 size_t free = 0;
2782 ClassLoaderDataGraphMetaspaceIterator iter;
2783 while (iter.repeat()) {
2784 Metaspace* msp = iter.get_next();
|