--- old/src/hotspot/share/gc/g1/collectionSetChooser.cpp 2018-10-05 11:08:30.769681500 -0700 +++ new/src/hotspot/share/gc/g1/collectionSetChooser.cpp 2018-10-05 11:08:29.847273100 -0700 @@ -49,6 +49,14 @@ return -1; } + if (hr1->is_premature_old() && !hr2->is_premature_old()) { + return -1; + } + + if (hr2->is_premature_old() && !hr1->is_premature_old()) { + return 1; + } + double gc_eff1 = hr1->gc_efficiency(); double gc_eff2 = hr2->gc_efficiency(); if (gc_eff1 > gc_eff2) { @@ -283,7 +291,7 @@ bool CollectionSetChooser::should_add(HeapRegion* hr) const { return !hr->is_young() && !hr->is_pinned() && - region_occupancy_low_enough_for_evac(hr->live_bytes()) && + (hr->is_premature_old() || region_occupancy_low_enough_for_evac(hr->live_bytes())) && hr->rem_set()->is_complete(); } --- old/src/hotspot/share/gc/g1/g1Allocator.inline.hpp 2018-10-05 11:08:39.335773100 -0700 +++ new/src/hotspot/share/gc/g1/g1Allocator.inline.hpp 2018-10-05 11:08:38.457572700 -0700 @@ -97,7 +97,7 @@ } _archive_check_enabled = true; - size_t length = Universe::heap()->max_capacity(); + size_t length = static_cast (Universe::heap())->max_reserved_capacity(); _closed_archive_region_map.initialize((HeapWord*)Universe::heap()->base(), (HeapWord*)Universe::heap()->base() + length, HeapRegion::GrainBytes); --- old/src/hotspot/share/gc/g1/g1CardCounts.cpp 2018-10-05 11:08:47.531232000 -0700 +++ new/src/hotspot/share/gc/g1/g1CardCounts.cpp 2018-10-05 11:08:46.653874800 -0700 @@ -63,7 +63,7 @@ } void G1CardCounts::initialize(G1RegionToSpaceMapper* mapper) { - assert(_g1h->max_capacity() > 0, "initialization order"); + assert(_g1h->max_reserved_capacity() > 0, "initialization order"); assert(_g1h->capacity() == 0, "initialization order"); if (G1ConcRSHotCardLimit > 0) { --- old/src/hotspot/share/gc/g1/g1CollectedHeap.cpp 2018-10-05 11:08:55.725786500 -0700 +++ new/src/hotspot/share/gc/g1/g1CollectedHeap.cpp 2018-10-05 11:08:54.844171000 -0700 @@ -166,7 +166,7 @@ "the only time we use this to allocate a humongous region is " "when we are allocating a single humongous region"); - HeapRegion* res = _hrm.allocate_free_region(is_old); + HeapRegion* res = _hrm->allocate_free_region(is_old); if (res == NULL && do_expand && _expand_heap_after_alloc_failure) { // Currently, only attempts to allocate GC alloc regions set @@ -183,7 +183,7 @@ // always expand the heap by an amount aligned to the heap // region size, the free list should in theory not be empty. // In either case allocate_free_region() will check for NULL. - res = _hrm.allocate_free_region(is_old); + res = _hrm->allocate_free_region(is_old); } else { _expand_heap_after_alloc_failure = false; } @@ -337,9 +337,9 @@ } else { // Policy: Try only empty regions (i.e. already committed first). Maybe we // are lucky enough to find some. - first = _hrm.find_contiguous_only_empty(obj_regions); + first = _hrm->find_contiguous_only_empty(obj_regions); if (first != G1_NO_HRM_INDEX) { - _hrm.allocate_free_regions_starting_at(first, obj_regions); + _hrm->allocate_free_regions_starting_at(first, obj_regions); } } @@ -347,14 +347,14 @@ // Policy: We could not find enough regions for the humongous object in the // free list. Look through the heap to find a mix of free and uncommitted regions. // If so, try expansion. - first = _hrm.find_contiguous_empty_or_unavailable(obj_regions); + first = _hrm->find_contiguous_empty_or_unavailable(obj_regions); if (first != G1_NO_HRM_INDEX) { // We found something. Make sure these regions are committed, i.e. expand // the heap. Alternatively we could do a defragmentation GC. log_debug(gc, ergo, heap)("Attempt heap expansion (humongous allocation request failed). Allocation request: " SIZE_FORMAT "B", word_size * HeapWordSize); - _hrm.expand_at(first, obj_regions, workers()); + _hrm->expand_at(first, obj_regions, workers()); g1_policy()->record_new_heap_size(num_regions()); #ifdef ASSERT @@ -365,7 +365,7 @@ assert(is_on_master_free_list(hr), "sanity"); } #endif - _hrm.allocate_free_regions_starting_at(first, obj_regions); + _hrm->allocate_free_regions_starting_at(first, obj_regions); } else { // Policy: Potentially trigger a defragmentation GC. } @@ -554,7 +554,7 @@ bool G1CollectedHeap::check_archive_addresses(MemRegion* ranges, size_t count) { assert(ranges != NULL, "MemRegion array NULL"); assert(count != 0, "No MemRegions provided"); - MemRegion reserved = _hrm.reserved(); + MemRegion reserved = _hrm->reserved(); for (size_t i = 0; i < count; i++) { if (!reserved.contains(ranges[i].start()) || !reserved.contains(ranges[i].last())) { return false; @@ -571,7 +571,7 @@ assert(count != 0, "No MemRegions provided"); MutexLockerEx x(Heap_lock); - MemRegion reserved = _hrm.reserved(); + MemRegion reserved = _hrm->reserved(); HeapWord* prev_last_addr = NULL; HeapRegion* prev_last_region = NULL; @@ -605,7 +605,7 @@ // range ended, and adjust the start address so we don't try to allocate // the same region again. If the current range is entirely within that // region, skip it, just adjusting the recorded top. - HeapRegion* start_region = _hrm.addr_to_region(start_address); + HeapRegion* start_region = _hrm->addr_to_region(start_address); if ((prev_last_region != NULL) && (start_region == prev_last_region)) { start_address = start_region->end(); if (start_address > last_address) { @@ -615,12 +615,12 @@ } start_region->set_top(start_address); curr_range = MemRegion(start_address, last_address + 1); - start_region = _hrm.addr_to_region(start_address); + start_region = _hrm->addr_to_region(start_address); } // Perform the actual region allocation, exiting if it fails. // Then note how much new space we have allocated. - if (!_hrm.allocate_containing_regions(curr_range, &commits, workers())) { + if (!_hrm->allocate_containing_regions(curr_range, &commits, workers())) { return false; } increase_used(word_size * HeapWordSize); @@ -632,8 +632,8 @@ // Mark each G1 region touched by the range as archive, add it to // the old set, and set top. - HeapRegion* curr_region = _hrm.addr_to_region(start_address); - HeapRegion* last_region = _hrm.addr_to_region(last_address); + HeapRegion* curr_region = _hrm->addr_to_region(start_address); + HeapRegion* last_region = _hrm->addr_to_region(last_address); prev_last_region = last_region; while (curr_region != NULL) { @@ -650,7 +650,7 @@ HeapRegion* next_region; if (curr_region != last_region) { top = curr_region->end(); - next_region = _hrm.next_region_in_heap(curr_region); + next_region = _hrm->next_region_in_heap(curr_region); } else { top = last_address + 1; next_region = NULL; @@ -671,7 +671,7 @@ assert(!is_init_completed(), "Expect to be called at JVM init time"); assert(ranges != NULL, "MemRegion array NULL"); assert(count != 0, "No MemRegions provided"); - MemRegion reserved = _hrm.reserved(); + MemRegion reserved = _hrm->reserved(); HeapWord *prev_last_addr = NULL; HeapRegion* prev_last_region = NULL; @@ -691,8 +691,8 @@ "Ranges not in ascending order: " PTR_FORMAT " <= " PTR_FORMAT , p2i(start_address), p2i(prev_last_addr)); - HeapRegion* start_region = _hrm.addr_to_region(start_address); - HeapRegion* last_region = _hrm.addr_to_region(last_address); + HeapRegion* start_region = _hrm->addr_to_region(start_address); + HeapRegion* last_region = _hrm->addr_to_region(last_address); HeapWord* bottom_address = start_region->bottom(); // Check for a range beginning in the same region in which the @@ -708,7 +708,7 @@ guarantee(curr_region->is_archive(), "Expected archive region at index %u", curr_region->hrm_index()); if (curr_region != last_region) { - curr_region = _hrm.next_region_in_heap(curr_region); + curr_region = _hrm->next_region_in_heap(curr_region); } else { curr_region = NULL; } @@ -757,7 +757,7 @@ assert(!is_init_completed(), "Expect to be called at JVM init time"); assert(ranges != NULL, "MemRegion array NULL"); assert(count != 0, "No MemRegions provided"); - MemRegion reserved = _hrm.reserved(); + MemRegion reserved = _hrm->reserved(); HeapWord* prev_last_addr = NULL; HeapRegion* prev_last_region = NULL; size_t size_used = 0; @@ -779,8 +779,8 @@ size_used += ranges[i].byte_size(); prev_last_addr = last_address; - HeapRegion* start_region = _hrm.addr_to_region(start_address); - HeapRegion* last_region = _hrm.addr_to_region(last_address); + HeapRegion* start_region = _hrm->addr_to_region(start_address); + HeapRegion* last_region = _hrm->addr_to_region(last_address); // Check for ranges that start in the same G1 region in which the previous // range ended, and adjust the start address so we don't try to free @@ -791,7 +791,7 @@ if (start_address > last_address) { continue; } - start_region = _hrm.addr_to_region(start_address); + start_region = _hrm->addr_to_region(start_address); } prev_last_region = last_region; @@ -806,11 +806,11 @@ curr_region->set_free(); curr_region->set_top(curr_region->bottom()); if (curr_region != last_region) { - curr_region = _hrm.next_region_in_heap(curr_region); + curr_region = _hrm->next_region_in_heap(curr_region); } else { curr_region = NULL; } - _hrm.shrink_at(curr_index, 1); + _hrm->shrink_at(curr_index, 1); uncommitted_regions++; } @@ -1072,7 +1072,7 @@ } void G1CollectedHeap::verify_after_full_collection() { - _hrm.verify_optional(); + _hrm->verify_optional(); _verifier->verify_region_sets_optional(); _verifier->verify_after_gc(G1HeapVerifier::G1VerifyFull); // Clear the previous marking bitmap, if needed for bitmap verification. @@ -1324,7 +1324,7 @@ if (expand(expand_bytes, _workers)) { - _hrm.verify_optional(); + _hrm->verify_optional(); _verifier->verify_region_sets_optional(); return attempt_allocation_at_safepoint(word_size, false /* expect_null_mutator_alloc_region */); @@ -1349,7 +1349,7 @@ uint regions_to_expand = (uint)(aligned_expand_bytes / HeapRegion::GrainBytes); assert(regions_to_expand > 0, "Must expand by at least one region"); - uint expanded_by = _hrm.expand_by(regions_to_expand, pretouch_workers); + uint expanded_by = _hrm->expand_by(regions_to_expand, pretouch_workers); if (expand_time_ms != NULL) { *expand_time_ms = (os::elapsedTime() - expand_heap_start_time_sec) * MILLIUNITS; } @@ -1364,7 +1364,7 @@ // The expansion of the virtual storage space was unsuccessful. // Let's see if it was because we ran out of swap. if (G1ExitOnExpansionFailure && - _hrm.available() >= regions_to_expand) { + _hrm->available() >= regions_to_expand) { // We had head room... vm_exit_out_of_memory(aligned_expand_bytes, OOM_MMAP_ERROR, "G1 heap expansion"); } @@ -1379,7 +1379,7 @@ HeapRegion::GrainBytes); uint num_regions_to_remove = (uint)(shrink_bytes / HeapRegion::GrainBytes); - uint num_regions_removed = _hrm.shrink_by(num_regions_to_remove); + uint num_regions_removed = _hrm->shrink_by(num_regions_to_remove); size_t shrunk_bytes = num_regions_removed * HeapRegion::GrainBytes; @@ -1407,7 +1407,7 @@ shrink_helper(shrink_bytes); rebuild_region_sets(true /* free_list_only */); - _hrm.verify_optional(); + _hrm->verify_optional(); _verifier->verify_region_sets_optional(); } @@ -1485,7 +1485,8 @@ _humongous_set("Humongous Region Set", new HumongousRegionSetChecker()), _bot(NULL), _listener(), - _hrm(), + _hrm(NULL), + _is_hetero_heap(false), _allocator(NULL), _verifier(NULL), _summary_bytes_used(0), @@ -1638,6 +1639,10 @@ // address that was requested (i.e. the preferred heap base). // If this happens then we could end up using a non-optimal // compressed oops mode. + if (AllocateOldGenAt != NULL) { + _is_hetero_heap = true; + max_byte_size *= 2; + } ReservedSpace heap_rs = Universe::reserve_heap(max_byte_size, heap_alignment); @@ -1660,7 +1665,7 @@ ReservedSpace g1_rs = heap_rs.first_part(max_byte_size); size_t page_size = UseLargePages ? os::large_page_size() : os::vm_page_size(); G1RegionToSpaceMapper* heap_storage = - G1RegionToSpaceMapper::create_mapper(g1_rs, + G1RegionToSpaceMapper::create_heap_mapper(g1_rs, g1_rs.size(), page_size, HeapRegion::GrainBytes, @@ -1696,7 +1701,13 @@ G1RegionToSpaceMapper* next_bitmap_storage = create_aux_memory_mapper("Next Bitmap", bitmap_size, G1CMBitMap::heap_map_factor()); - _hrm.initialize(heap_storage, prev_bitmap_storage, next_bitmap_storage, bot_storage, cardtable_storage, card_counts_storage); + if (is_hetero_heap()) { + _hrm = new HeapRegionManagerForHeteroHeap((uint)((max_byte_size / 2) / HeapRegion::GrainBytes /*heap size as num of regions*/)); + } + else { + _hrm = new HeapRegionManager(); + } + _hrm->initialize(heap_storage, prev_bitmap_storage, next_bitmap_storage, bot_storage, cardtable_storage, card_counts_storage); _card_table->initialize(cardtable_storage); // Do later initialization work for concurrent refinement. _hot_card_cache->initialize(card_counts_storage); @@ -1711,20 +1722,20 @@ guarantee(g1_rs.base() >= (char*)G1CardTable::card_size, "Java heap must not start within the first card."); // Also create a G1 rem set. _g1_rem_set = new G1RemSet(this, _card_table, _hot_card_cache); - _g1_rem_set->initialize(max_capacity(), max_regions()); + _g1_rem_set->initialize(max_reserved_capacity(), max_regions()); size_t max_cards_per_region = ((size_t)1 << (sizeof(CardIdx_t)*BitsPerByte-1)) - 1; guarantee(HeapRegion::CardsPerRegion > 0, "make sure it's initialized"); guarantee(HeapRegion::CardsPerRegion < max_cards_per_region, "too many cards per region"); - FreeRegionList::set_unrealistically_long_length(max_regions() + 1); + FreeRegionList::set_unrealistically_long_length(max_expandable_regions() + 1); _bot = new G1BlockOffsetTable(reserved_region(), bot_storage); { - HeapWord* start = _hrm.reserved().start(); - HeapWord* end = _hrm.reserved().end(); + HeapWord* start = _hrm->reserved().start(); + HeapWord* end = _hrm->reserved().end(); size_t granularity = HeapRegion::GrainBytes; _in_cset_fast_test.initialize(start, end, granularity); @@ -1783,7 +1794,7 @@ // Here we allocate the dummy HeapRegion that is required by the // G1AllocRegion class. - HeapRegion* dummy_region = _hrm.get_dummy_region(); + HeapRegion* dummy_region = _hrm->get_dummy_region(); // We'll re-use the same region whether the alloc region will // require BOT updates or not and, if it doesn't, then a non-young @@ -1908,11 +1919,11 @@ } size_t G1CollectedHeap::capacity() const { - return _hrm.length() * HeapRegion::GrainBytes; + return _hrm->length() * HeapRegion::GrainBytes; } size_t G1CollectedHeap::unused_committed_regions_in_bytes() const { - return _hrm.total_free_bytes(); + return _hrm->total_free_bytes(); } void G1CollectedHeap::iterate_hcc_closure(CardTableEntryClosure* cl, uint worker_i) { @@ -2127,7 +2138,7 @@ } bool G1CollectedHeap::is_in(const void* p) const { - if (_hrm.reserved().contains(p)) { + if (_hrm->reserved().contains(p)) { // Given that we know that p is in the reserved space, // heap_region_containing() should successfully // return the containing region. @@ -2141,7 +2152,7 @@ #ifdef ASSERT bool G1CollectedHeap::is_in_exact(const void* p) const { bool contains = reserved_region().contains(p); - bool available = _hrm.is_available(addr_to_region((HeapWord*)p)); + bool available = _hrm->is_available(addr_to_region((HeapWord*)p)); if (contains && available) { return true; } else { @@ -2172,18 +2183,18 @@ } void G1CollectedHeap::heap_region_iterate(HeapRegionClosure* cl) const { - _hrm.iterate(cl); + _hrm->iterate(cl); } void G1CollectedHeap::heap_region_par_iterate_from_worker_offset(HeapRegionClosure* cl, HeapRegionClaimer *hrclaimer, uint worker_id) const { - _hrm.par_iterate(cl, hrclaimer, hrclaimer->offset_for_worker(worker_id)); + _hrm->par_iterate(cl, hrclaimer, hrclaimer->offset_for_worker(worker_id)); } void G1CollectedHeap::heap_region_par_iterate_from_start(HeapRegionClosure* cl, HeapRegionClaimer *hrclaimer) const { - _hrm.par_iterate(cl, hrclaimer, 0); + _hrm->par_iterate(cl, hrclaimer, 0); } void G1CollectedHeap::collection_set_iterate(HeapRegionClosure* cl) { @@ -2232,7 +2243,11 @@ } size_t G1CollectedHeap::max_capacity() const { - return _hrm.reserved().byte_size(); + return _hrm->max_expandable_length() * HeapRegion::GrainBytes; +} + +size_t G1CollectedHeap::max_reserved_capacity() const { + return _hrm->max_length() * HeapRegion::GrainBytes; } jlong G1CollectedHeap::millis_since_last_gc() { @@ -2322,8 +2337,8 @@ st->print(" total " SIZE_FORMAT "K, used " SIZE_FORMAT "K", capacity()/K, used_unlocked()/K); st->print(" [" PTR_FORMAT ", " PTR_FORMAT ")", - p2i(_hrm.reserved().start()), - p2i(_hrm.reserved().end())); + p2i(_hrm->reserved().start()), + p2i(_hrm->reserved().end())); st->cr(); st->print(" region size " SIZE_FORMAT "K, ", HeapRegion::GrainBytes / K); uint young_regions = young_regions_count(); @@ -3109,7 +3124,7 @@ // output from the concurrent mark thread interfering with this // logging output either. - _hrm.verify_optional(); + _hrm->verify_optional(); _verifier->verify_region_sets_optional(); TASKQUEUE_STATS_ONLY(print_taskqueue_stats()); @@ -3786,7 +3801,7 @@ bool locked) { assert(!hr->is_free(), "the region should not be free"); assert(!hr->is_empty(), "the region should not be empty"); - assert(_hrm.is_available(hr->hrm_index()), "region should be committed"); + assert(_hrm->is_available(hr->hrm_index()), "region should be committed"); assert(free_list != NULL, "pre-condition"); if (G1VerifyBitmaps) { @@ -3827,7 +3842,7 @@ assert(list != NULL, "list can't be null"); if (!list->is_empty()) { MutexLockerEx x(FreeList_lock, Mutex::_no_safepoint_check_flag); - _hrm.insert_list_into_free_list(list); + _hrm->insert_list_into_free_list(list); } } @@ -3907,7 +3922,15 @@ _bytes_allocated_in_old_since_last_gc += HeapRegion::GrainBytes; } // The region is now considered to be old. - r->set_old(); + if(g1h->is_hetero_heap()) { + if(!r->is_old()) { + // The region was young before, set it as pre-matured old so that next mixed gc can move + // its contents to old region which is on nv-dimm + r->set_premature_old(); + } + } else { + r->set_old(); + } // Do some allocation statistics accounting. Regions that failed evacuation // are always made old, so there is no need to update anything in the young // gen statistics, but we need to update old gen statistics. @@ -4360,7 +4383,7 @@ // this is that during a full GC string deduplication needs to know if // a collected region was young or old when the full GC was initiated. } - _hrm.remove_all_free_regions(); + _hrm->remove_all_free_regions(); } void G1CollectedHeap::increase_used(size_t bytes) { @@ -4435,7 +4458,7 @@ _survivor.clear(); } - RebuildRegionSetsClosure cl(free_list_only, &_old_set, &_hrm); + RebuildRegionSetsClosure cl(free_list_only, &_old_set, _hrm); heap_region_iterate(&cl); if (!free_list_only) { @@ -4544,14 +4567,14 @@ HeapRegion* G1CollectedHeap::alloc_highest_free_region() { bool expanded = false; - uint index = _hrm.find_highest_free(&expanded); + uint index = _hrm->find_highest_free(&expanded); if (index != G1_NO_HRM_INDEX) { if (expanded) { log_debug(gc, ergo, heap)("Attempt heap expansion (requested address range outside heap bounds). region size: " SIZE_FORMAT "B", HeapRegion::GrainWords * HeapWordSize); } - _hrm.allocate_free_regions_starting_at(index, 1); + _hrm->allocate_free_regions_starting_at(index, 1); return region_at(index); } return NULL; --- old/src/hotspot/share/gc/g1/g1CollectedHeap.hpp 2018-10-05 11:09:04.090325500 -0700 +++ new/src/hotspot/share/gc/g1/g1CollectedHeap.hpp 2018-10-05 11:09:03.201643500 -0700 @@ -43,6 +43,7 @@ #include "gc/g1/g1SurvivorRegions.hpp" #include "gc/g1/g1YCTypes.hpp" #include "gc/g1/heapRegionManager.hpp" +#include "gc/g1/heapRegionManagerForHeteroHeap.hpp" #include "gc/g1/heapRegionSet.hpp" #include "gc/shared/barrierSet.hpp" #include "gc/shared/collectedHeap.hpp" @@ -194,7 +195,10 @@ G1RegionMappingChangedListener _listener; // The sequence of all heap regions in the heap. - HeapRegionManager _hrm; + HeapRegionManager* _hrm; + + // is the heap on heterogenous memory? + bool _is_hetero_heap; // Manages all allocations with regions except humongous object allocations. G1Allocator* _allocator; @@ -953,6 +957,10 @@ // The current policy object for the collector. G1Policy* g1_policy() const { return _g1_policy; } + HeapRegionManager* hrm() const { return _hrm; } + + bool is_hetero_heap() const { return _is_hetero_heap; } + const G1CollectionSet* collection_set() const { return &_collection_set; } G1CollectionSet* collection_set() { return &_collection_set; } @@ -1005,7 +1013,7 @@ // But G1CollectedHeap doesn't yet support this. virtual bool is_maximal_no_gc() const { - return _hrm.available() == 0; + return _hrm->available() == 0; } // Returns whether there are any regions left in the heap for allocation. @@ -1014,16 +1022,19 @@ } // The current number of regions in the heap. - uint num_regions() const { return _hrm.length(); } + uint num_regions() const { return _hrm->length(); } // The max number of regions in the heap. - uint max_regions() const { return _hrm.max_length(); } + uint max_regions() const { return _hrm->max_length(); } + + // Max number of regions that can be comitted. + uint max_expandable_regions() const { return _hrm->max_expandable_length(); } // The number of regions that are completely free. - uint num_free_regions() const { return _hrm.num_free_regions(); } + uint num_free_regions() const { return _hrm->num_free_regions(); } MemoryUsage get_auxiliary_data_memory_usage() const { - return _hrm.get_auxiliary_data_memory_usage(); + return _hrm->get_auxiliary_data_memory_usage(); } // The number of regions that are not completely free. @@ -1031,7 +1042,7 @@ #ifdef ASSERT bool is_on_master_free_list(HeapRegion* hr) { - return _hrm.is_free(hr); + return _hrm->is_free(hr); } #endif // ASSERT @@ -1088,13 +1099,13 @@ // Return "TRUE" iff the given object address is in the reserved // region of g1. bool is_in_g1_reserved(const void* p) const { - return _hrm.reserved().contains(p); + return _hrm->reserved().contains(p); } // Returns a MemRegion that corresponds to the space that has been // reserved for the heap MemRegion g1_reserved() const { - return _hrm.reserved(); + return _hrm->reserved(); } virtual bool is_in_closed_subset(const void* p) const; @@ -1214,6 +1225,9 @@ // Print the maximum heap capacity. virtual size_t max_capacity() const; + // Return the size of reserved memory. Returns different value than max_capacity() when AllocateOldGenAt is used. + virtual size_t max_reserved_capacity() const; + virtual jlong millis_since_last_gc(); --- old/src/hotspot/share/gc/g1/g1CollectedHeap.inline.hpp 2018-10-05 11:09:12.293863300 -0700 +++ new/src/hotspot/share/gc/g1/g1CollectedHeap.inline.hpp 2018-10-05 11:09:11.451473100 -0700 @@ -58,10 +58,10 @@ // Inline functions for G1CollectedHeap // Return the region with the given index. It assumes the index is valid. -inline HeapRegion* G1CollectedHeap::region_at(uint index) const { return _hrm.at(index); } +inline HeapRegion* G1CollectedHeap::region_at(uint index) const { return _hrm->at(index); } inline HeapRegion* G1CollectedHeap::next_region_in_humongous(HeapRegion* hr) const { - return _hrm.next_region_in_humongous(hr); + return _hrm->next_region_in_humongous(hr); } inline uint G1CollectedHeap::addr_to_region(HeapWord* addr) const { @@ -72,7 +72,7 @@ } inline HeapWord* G1CollectedHeap::bottom_addr_for_region(uint index) const { - return _hrm.reserved().start() + index * HeapRegion::GrainWords; + return _hrm->reserved().start() + index * HeapRegion::GrainWords; } template @@ -81,7 +81,7 @@ assert(is_in_g1_reserved((const void*) addr), "Address " PTR_FORMAT " is outside of the heap ranging from [" PTR_FORMAT " to " PTR_FORMAT ")", p2i((void*)addr), p2i(g1_reserved().start()), p2i(g1_reserved().end())); - return _hrm.addr_to_region((HeapWord*) addr); + return _hrm->addr_to_region((HeapWord*) addr); } inline void G1CollectedHeap::old_set_add(HeapRegion* hr) { @@ -254,12 +254,12 @@ } inline void G1CollectedHeap::set_humongous_reclaim_candidate(uint region, bool value) { - assert(_hrm.at(region)->is_starts_humongous(), "Must start a humongous object"); + assert(_hrm->at(region)->is_starts_humongous(), "Must start a humongous object"); _humongous_reclaim_candidates.set_candidate(region, value); } inline bool G1CollectedHeap::is_humongous_reclaim_candidate(uint region) { - assert(_hrm.at(region)->is_starts_humongous(), "Must start a humongous object"); + assert(_hrm->at(region)->is_starts_humongous(), "Must start a humongous object"); return _humongous_reclaim_candidates.is_candidate(region); } --- old/src/hotspot/share/gc/g1/g1FullCollector.cpp 2018-10-05 11:09:20.422175700 -0700 +++ new/src/hotspot/share/gc/g1/g1FullCollector.cpp 2018-10-05 11:09:19.573368600 -0700 @@ -165,6 +165,11 @@ } void G1FullCollector::collect() { + + if (_heap->is_hetero_heap()) { + static_cast (_heap->_hrm)->prepare_for_full_collection_start(); + } + phase1_mark_live_objects(); verify_after_marking(); @@ -176,6 +181,10 @@ phase3_adjust_pointers(); phase4_do_compaction(); + + if (_heap->is_hetero_heap()) { + static_cast (_heap->_hrm)->prepare_for_full_collection_end(); + } } void G1FullCollector::complete_collection() { --- old/src/hotspot/share/gc/g1/g1HeapRegionTraceType.hpp 2018-10-05 11:09:28.555194500 -0700 +++ new/src/hotspot/share/gc/g1/g1HeapRegionTraceType.hpp 2018-10-05 11:09:27.693392800 -0700 @@ -37,6 +37,7 @@ StartsHumongous, ContinuesHumongous, Old, + PreMatureOld, OpenArchive, ClosedArchive, G1HeapRegionTypeEndSentinel @@ -50,6 +51,7 @@ case StartsHumongous: return "Starts Humongous"; case ContinuesHumongous: return "Continues Humongous"; case Old: return "Old"; + case PreMatureOld: return "PreMatureOld"; case OpenArchive: return "OpenArchive"; case ClosedArchive: return "ClosedArchive"; default: ShouldNotReachHere(); return NULL; --- old/src/hotspot/share/gc/g1/g1HeapVerifier.cpp 2018-10-05 11:09:37.047758400 -0700 +++ new/src/hotspot/share/gc/g1/g1HeapVerifier.cpp 2018-10-05 11:09:36.152345100 -0700 @@ -541,14 +541,14 @@ assert_heap_locked_or_at_safepoint(true /* should_be_vm_thread */); // First, check the explicit lists. - _g1h->_hrm.verify(); + _g1h->_hrm->verify(); // Finally, make sure that the region accounting in the lists is // consistent with what we see in the heap. - VerifyRegionListsClosure cl(&_g1h->_old_set, &_g1h->_archive_set, &_g1h->_humongous_set, &_g1h->_hrm); + VerifyRegionListsClosure cl(&_g1h->_old_set, &_g1h->_archive_set, &_g1h->_humongous_set, _g1h->_hrm); _g1h->heap_region_iterate(&cl); - cl.verify_counts(&_g1h->_old_set, &_g1h->_archive_set, &_g1h->_humongous_set, &_g1h->_hrm); + cl.verify_counts(&_g1h->_old_set, &_g1h->_archive_set, &_g1h->_humongous_set, _g1h->_hrm); } void G1HeapVerifier::prepare_for_verify() { @@ -789,7 +789,7 @@ bool G1HeapVerifier::check_cset_fast_test() { G1CheckCSetFastTableClosure cl; - _g1h->_hrm.iterate(&cl); + _g1h->_hrm->iterate(&cl); return !cl.failures(); } #endif // PRODUCT --- old/src/hotspot/share/gc/g1/g1Policy.cpp 2018-10-05 11:09:45.321163400 -0700 +++ new/src/hotspot/share/gc/g1/g1Policy.cpp 2018-10-05 11:09:44.430248900 -0700 @@ -96,7 +96,7 @@ if (!adaptive_young_list_length()) { _young_list_fixed_length = _young_gen_sizer.min_desired_young_length(); } - _young_gen_sizer.adjust_max_new_size(_g1h->max_regions()); + _young_gen_sizer.adjust_max_new_size(_g1h->max_expandable_regions()); _free_regions_at_end_of_collection = _g1h->num_free_regions(); @@ -218,6 +218,11 @@ uint G1Policy::update_young_list_target_length(size_t rs_lengths) { YoungTargetLengths young_lengths = young_list_target_lengths(rs_lengths); _young_list_target_length = young_lengths.first; + + // Resize dram regions set if called after full collection end. + if(_g1h->is_hetero_heap() && (Thread::current()->is_VM_thread() || Heap_lock->owned_by_self())) { + static_cast (_g1h->hrm())->resize_dram_regions(_young_list_target_length, _g1h->workers()); + } return young_lengths.second; } --- old/src/hotspot/share/gc/g1/g1RegionToSpaceMapper.cpp 2018-10-05 11:09:53.620712700 -0700 +++ new/src/hotspot/share/gc/g1/g1RegionToSpaceMapper.cpp 2018-10-05 11:09:52.735661600 -0700 @@ -170,13 +170,143 @@ } } +void G1RegionToHeteroSpaceMapper::map_nvdimm_space(ReservedSpace rs) { + assert(AllocateOldGenAt != NULL, ""); + int _backing_fd = os::create_file_for_heap(AllocateOldGenAt); + if (_backing_fd == -1) { + vm_exit_during_initialization( + err_msg("Could not create file for Old generation at location %s", AllocateOldGenAt)); + } + // commit this memory in nv-dimm + char* ret = os::attempt_reserve_memory_at(rs.size(), rs.base(), _backing_fd); + //char* ret = os::replace_existing_mapping_with_file_mapping(rs.base(), rs.size(), _backing_fd); + if (ret != rs.base()) { + if (ret != NULL) { + os::unmap_memory(rs.base(), rs.size()); + } + vm_exit_during_initialization( + err_msg("Error in mapping Old Gen to given AllocateOldGenAt = %s", AllocateOldGenAt)); + } +} + +G1RegionToHeteroSpaceMapper::G1RegionToHeteroSpaceMapper(ReservedSpace rs, + size_t actual_size, + size_t page_size, + size_t alloc_granularity, + size_t commit_factor, + MemoryType type) : + G1RegionToSpaceMapper(rs, actual_size, page_size, alloc_granularity, commit_factor, type), + _num_committed_dram(0), _num_committed_nvdimm(0) { + assert(actual_size == 2 * MaxHeapSize, "For 2-way heterogenuous heap, reserved space is two times MaxHeapSize"); + + // Since we need to split the reserved space in half and map second half to file in NV-DIMM, we need to release the reserved memory first + // Because on some OSes (e.g. Windows) you cannot do a file mapping on memory reserved with regular mapping. + os::release_memory(rs.base(), rs.size()); + + /* Toggle HeteroHeap + // We map first part of size Xmx to DRAM. + ReservedSpace rs_dram = rs.first_part(MaxHeapSize); + // Second half of reserved memory is mapped to NV-DIMM. + ReservedSpace rs_nvdimm = rs.last_part(MaxHeapSize);*/ + // We map first part of size Xmx to NVDIMM. + ReservedSpace rs_nvdimm = rs.first_part(MaxHeapSize); + // Second half of reserved memory is mapped to DRAM. + ReservedSpace rs_dram = rs.last_part(MaxHeapSize); + assert(rs_dram.size() == rs_nvdimm.size() && rs_nvdimm.size() == MaxHeapSize, "They all should be same"); + + // Reserve dram memory + char* base = os::attempt_reserve_memory_at(rs_dram.size(), rs_dram.base()); + if (base != rs_dram.base()) { + if (base != NULL) { + os::release_memory(base, rs_dram.size()); + } + vm_exit_during_initialization(err_msg("Error in allocating heap")); + } + + // We reserve and commit this entire space to NV-DIMM. + map_nvdimm_space(rs_nvdimm); + + if (alloc_granularity >= (page_size * commit_factor)) { + _dram_mapper = new G1RegionsLargerThanCommitSizeMapper(rs_dram, rs_dram.size(), page_size, alloc_granularity, commit_factor, type); + } + else { + _dram_mapper = new G1RegionsSmallerThanCommitSizeMapper(rs_dram, rs_dram.size(), page_size, alloc_granularity, commit_factor, type); + } + +/* Toggle HeteroHeap + _start_index_of_nvdimm = (uint)(rs_dram.size() / alloc_granularity); + _start_index_of_dram = 0; */ + _start_index_of_nvdimm = 0; + _start_index_of_dram = (uint)(rs_nvdimm.size() / alloc_granularity); +} + +void G1RegionToHeteroSpaceMapper::commit_regions(uint start_idx, size_t num_regions, WorkGang* pretouch_gang) { + uint end_idx = (start_idx + (uint)num_regions - 1); + + /* Toggle HeteroHeap + uint num_nvdimm = end_idx >= _start_index_of_nvdimm ? MIN2((end_idx - _start_index_of_nvdimm + 1), (uint)num_regions) : 0; + uint num_dram = (uint)num_regions - num_nvdimm;*/ + uint num_dram = end_idx >= _start_index_of_dram ? MIN2((end_idx - _start_index_of_dram + 1), (uint)num_regions) : 0; + uint num_nvdimm = (uint)num_regions - num_dram; + + if (num_nvdimm > 0) { + // We do not need to commit nv-dimm regions, since they are committed in the beginning. + _num_committed_nvdimm += num_nvdimm; + } + if (num_dram > 0) { + /* Toggle HeteroHeap + _dram_mapper->commit_regions(start_idx, num_dram, pretouch_gang); */ + _dram_mapper->commit_regions(start_idx > _start_index_of_dram ? (start_idx - _start_index_of_dram) : 0, num_dram, pretouch_gang); + _num_committed_dram += num_dram; + } +} + +void G1RegionToHeteroSpaceMapper::uncommit_regions(uint start_idx, size_t num_regions) { + uint end_idx = (start_idx + (uint)num_regions - 1); + /* Toggle HeterHeap + uint num_nvdimm = end_idx >= _start_index_of_nvdimm ? MIN2((end_idx - _start_index_of_nvdimm + 1), (uint)num_regions) : 0; + uint num_dram = (uint)num_regions - num_nvdimm;*/ + uint num_dram = end_idx >= _start_index_of_dram ? MIN2((end_idx - _start_index_of_dram + 1), (uint)num_regions) : 0; + uint num_nvdimm = (uint)num_regions - num_dram; + + if (num_nvdimm > 0) { + // We do not uncommit memory for nv-dimm regions. + _num_committed_nvdimm -= num_nvdimm; + } + + if (num_dram > 0) { + _dram_mapper->uncommit_regions(start_idx > _start_index_of_dram ? (start_idx - _start_index_of_dram) : 0, num_dram); + _num_committed_dram -= num_dram; + } +} + +uint G1RegionToHeteroSpaceMapper::num_committed_dram() { + return _num_committed_dram; +} + +uint G1RegionToHeteroSpaceMapper::num_committed_nvdimm() { + return _num_committed_nvdimm; +} + +G1RegionToSpaceMapper* G1RegionToSpaceMapper::create_heap_mapper(ReservedSpace rs, + size_t actual_size, + size_t page_size, + size_t region_granularity, + size_t commit_factor, + MemoryType type) { + if (AllocateOldGenAt != NULL) { + return new G1RegionToHeteroSpaceMapper(rs, actual_size, page_size, region_granularity, commit_factor, type); + } else { + return create_mapper(rs, actual_size, page_size, region_granularity, commit_factor, type); + } +} + G1RegionToSpaceMapper* G1RegionToSpaceMapper::create_mapper(ReservedSpace rs, size_t actual_size, size_t page_size, size_t region_granularity, size_t commit_factor, MemoryType type) { - if (region_granularity >= (page_size * commit_factor)) { return new G1RegionsLargerThanCommitSizeMapper(rs, actual_size, page_size, region_granularity, commit_factor, type); } else { --- old/src/hotspot/share/gc/g1/g1RegionToSpaceMapper.hpp 2018-10-05 11:10:01.801057300 -0700 +++ new/src/hotspot/share/gc/g1/g1RegionToSpaceMapper.hpp 2018-10-05 11:10:00.922833800 -0700 @@ -87,6 +87,34 @@ size_t region_granularity, size_t byte_translation_factor, MemoryType type); + + static G1RegionToSpaceMapper* create_heap_mapper(ReservedSpace rs, + size_t actual_size, + size_t page_size, + size_t region_granularity, + size_t byte_translation_factor, + MemoryType type); }; +// G1RegionToSpaceMapper implementation where +// part of space is mapped to dram and part to nv-dimm +class G1RegionToHeteroSpaceMapper : public G1RegionToSpaceMapper { +private: + size_t _pages_per_region; + G1RegionToSpaceMapper* _dram_mapper; + uint _num_committed_dram; + uint _num_committed_nvdimm; + uint _start_index_of_nvdimm; + uint _start_index_of_dram; + + void map_nvdimm_space(ReservedSpace rs); + +public: + G1RegionToHeteroSpaceMapper(ReservedSpace rs, size_t used_size, size_t page_size, size_t region_granularity, size_t commit_factor, MemoryType type); + uint num_committed_dram(); + uint num_committed_nvdimm(); + + virtual void commit_regions(uint start_idx, size_t num_regions = 1, WorkGang* pretouch_workers = NULL); + virtual void uncommit_regions(uint start_idx, size_t num_regions = 1); +}; #endif // SHARE_VM_GC_G1_G1REGIONTOSPACEMAPPER_HPP --- old/src/hotspot/share/gc/g1/heapRegion.cpp 2018-10-05 11:10:10.065375100 -0700 +++ new/src/hotspot/share/gc/g1/heapRegion.cpp 2018-10-05 11:10:09.201116700 -0700 @@ -184,6 +184,11 @@ _type.set_old(); } +void HeapRegion::set_premature_old() { + report_region_type_change(G1HeapRegionTraceType::Old); + _type.set_premature_old(); +} + void HeapRegion::set_open_archive() { report_region_type_change(G1HeapRegionTraceType::OpenArchive); _type.set_open_archive(); --- old/src/hotspot/share/gc/g1/heapRegion.hpp 2018-10-05 11:10:18.326081000 -0700 +++ new/src/hotspot/share/gc/g1/heapRegion.hpp 2018-10-05 11:10:17.443733200 -0700 @@ -424,6 +424,8 @@ bool is_old() const { return _type.is_old(); } + bool is_premature_old() const { return _type.is_premature_old(); } + bool is_old_or_humongous() const { return _type.is_old_or_humongous(); } bool is_old_or_humongous_or_archive() const { return _type.is_old_or_humongous_or_archive(); } @@ -613,6 +615,7 @@ void move_to_old(); void set_old(); + void set_premature_old(); void set_open_archive(); void set_closed_archive(); --- old/src/hotspot/share/gc/g1/heapRegionManager.cpp 2018-10-05 11:10:26.743212500 -0700 +++ new/src/hotspot/share/gc/g1/heapRegionManager.cpp 2018-10-05 11:10:25.846761400 -0700 @@ -513,7 +513,7 @@ #endif // PRODUCT HeapRegionClaimer::HeapRegionClaimer(uint n_workers) : - _n_workers(n_workers), _n_regions(G1CollectedHeap::heap()->_hrm._allocated_heapregions_length), _claims(NULL) { + _n_workers(n_workers), _n_regions(G1CollectedHeap::heap()->_hrm->_allocated_heapregions_length), _claims(NULL) { assert(n_workers > 0, "Need at least one worker."); uint* new_claims = NEW_C_HEAP_ARRAY(uint, _n_regions, mtGC); memset(new_claims, Unclaimed, sizeof(*_claims) * _n_regions); --- old/src/hotspot/share/gc/g1/heapRegionManager.hpp 2018-10-05 11:10:34.958862100 -0700 +++ new/src/hotspot/share/gc/g1/heapRegionManager.hpp 2018-10-05 11:10:34.092875700 -0700 @@ -71,16 +71,19 @@ friend class VMStructs; friend class HeapRegionClaimer; + protected: G1HeapRegionTable _regions; - G1RegionToSpaceMapper* _heap_mapper; + private: G1RegionToSpaceMapper* _prev_bitmap_mapper; G1RegionToSpaceMapper* _next_bitmap_mapper; G1RegionToSpaceMapper* _bot_mapper; G1RegionToSpaceMapper* _cardtable_mapper; G1RegionToSpaceMapper* _card_counts_mapper; + protected: FreeRegionList _free_list; + private: // Each bit in this bitmap indicates that the corresponding region is available // for allocation. @@ -95,11 +98,12 @@ HeapWord* heap_bottom() const { return _regions.bottom_address_mapped(); } HeapWord* heap_end() const {return _regions.end_address_mapped(); } + protected: void make_regions_available(uint index, uint num_regions = 1, WorkGang* pretouch_gang = NULL); - + void uncommit_regions(uint index, size_t num_regions = 1); + private: // Pass down commit calls to the VirtualSpace. void commit_regions(uint index, size_t num_regions = 1, WorkGang* pretouch_gang = NULL); - void uncommit_regions(uint index, size_t num_regions = 1); // Notify other data structures about change in the heap layout. void update_committed_space(HeapWord* old_end, HeapWord* new_end); @@ -117,14 +121,17 @@ // the heap. Returns the length of the sequence found. If this value is zero, no // sequence could be found, otherwise res_idx contains the start index of this range. uint find_empty_from_idx_reverse(uint start_idx, uint* res_idx) const; - // Allocate a new HeapRegion for the given index. - HeapRegion* new_heap_region(uint hrm_index); #ifdef ASSERT public: bool is_free(HeapRegion* hr) const; -#endif + bool is_available(uint region) const; +#else // Returns whether the given region is available for allocation. +protected: bool is_available(uint region) const; +#endif + // Allocate a new HeapRegion for the given index. + HeapRegion* new_heap_region(uint hrm_index); public: // Empty constructor, we'll initialize it with the initialize() method. @@ -141,7 +148,7 @@ // new HeapRegion that owns HeapRegion at index 0. Since at the moment we commit // the heap from the lowest address, this region (and its associated data // structures) are available and we do not need to check further. - HeapRegion* get_dummy_region() { return new_heap_region(0); } + virtual HeapRegion* get_dummy_region() { return new_heap_region(0); } // Return the HeapRegion at the given index. Assume that the index // is valid. @@ -163,7 +170,7 @@ _free_list.add_ordered(list); } - HeapRegion* allocate_free_region(bool is_old) { + virtual HeapRegion* allocate_free_region(bool is_old) { HeapRegion* hr = _free_list.remove_region(is_old); if (hr != NULL) { @@ -197,6 +204,9 @@ // Return the maximum number of regions in the heap. uint max_length() const { return (uint)_regions.length(); } + + // Return maximum number of regions that heap can expand to. + virtual uint max_expandable_length() const { return (uint)_regions.length(); } MemoryUsage get_auxiliary_data_memory_usage() const; @@ -206,26 +216,26 @@ // HeapRegions, or re-use existing ones. Returns the number of regions the // sequence was expanded by. If a HeapRegion allocation fails, the resulting // number of regions might be smaller than what's desired. - uint expand_by(uint num_regions, WorkGang* pretouch_workers); + virtual uint expand_by(uint num_regions, WorkGang* pretouch_workers); // Makes sure that the regions from start to start+num_regions-1 are available // for allocation. Returns the number of regions that were committed to achieve // this. - uint expand_at(uint start, uint num_regions, WorkGang* pretouch_workers); + virtual uint expand_at(uint start, uint num_regions, WorkGang* pretouch_workers); // Find a contiguous set of empty regions of length num. Returns the start index of // that set, or G1_NO_HRM_INDEX. - uint find_contiguous_only_empty(size_t num) { return find_contiguous(num, true); } + virtual uint find_contiguous_only_empty(size_t num) { return find_contiguous(num, true); } // Find a contiguous set of empty or unavailable regions of length num. Returns the // start index of that set, or G1_NO_HRM_INDEX. - uint find_contiguous_empty_or_unavailable(size_t num) { return find_contiguous(num, false); } + virtual uint find_contiguous_empty_or_unavailable(size_t num) { return find_contiguous(num, false); } HeapRegion* next_region_in_heap(const HeapRegion* r) const; // Find the highest free or uncommitted region in the reserved heap, // and if uncommitted, commit it. If none are available, return G1_NO_HRM_INDEX. // Set the 'expanded' boolean true if a new region was committed. - uint find_highest_free(bool* expanded); + virtual uint find_highest_free(bool* expanded); // Allocate the regions that contain the address range specified, committing the // regions if necessary. Return false if any of the regions is already committed @@ -240,13 +250,13 @@ // Uncommit up to num_regions_to_remove regions that are completely free. // Return the actual number of uncommitted regions. - uint shrink_by(uint num_regions_to_remove); + virtual uint shrink_by(uint num_regions_to_remove); // Uncommit a number of regions starting at the specified index, which must be available, // empty, and free. void shrink_at(uint index, size_t num_regions); - void verify(); + virtual void verify(); // Do some sanity checking. void verify_optional() PRODUCT_RETURN; --- old/src/hotspot/share/gc/g1/heapRegionSet.cpp 2018-10-05 11:10:43.130509400 -0700 +++ new/src/hotspot/share/gc/g1/heapRegionSet.cpp 2018-10-05 11:10:42.217153000 -0700 @@ -234,6 +234,22 @@ verify_optional(); } +uint FreeRegionList::num_of_regions_in_range(uint start, uint end) const { + HeapRegion* cur = _head; + uint num = 0; + bool started = false; + while (cur != NULL && cur->hrm_index() <= end) { + if (!started && cur->hrm_index() >= start) { + started = true; + } + if(started) { + num++; + } + cur = cur->next(); + } + return num; +} + void FreeRegionList::verify() { // See comment in HeapRegionSetBase::verify() about MT safety and // verification. --- old/src/hotspot/share/gc/g1/heapRegionSet.hpp 2018-10-05 11:10:51.242085400 -0700 +++ new/src/hotspot/share/gc/g1/heapRegionSet.hpp 2018-10-05 11:10:50.370635200 -0700 @@ -194,6 +194,8 @@ void remove_starting_at(HeapRegion* first, uint num_regions); virtual void verify(); + + uint num_of_regions_in_range(uint start, uint end) const; }; // Iterator class that provides a convenient way to iterate over the --- old/src/hotspot/share/gc/g1/heapRegionType.cpp 2018-10-05 11:10:59.334576900 -0700 +++ new/src/hotspot/share/gc/g1/heapRegionType.cpp 2018-10-05 11:10:58.492961900 -0700 @@ -34,6 +34,7 @@ case StartsHumongousTag: case ContinuesHumongousTag: case OldTag: + case PreMatureOldTag: case OpenArchiveTag: case ClosedArchiveTag: return true; @@ -51,6 +52,7 @@ case StartsHumongousTag: return "HUMS"; case ContinuesHumongousTag: return "HUMC"; case OldTag: return "OLD"; + case PreMatureOldTag: return "PMOLD"; case OpenArchiveTag: return "OARC"; case ClosedArchiveTag: return "CARC"; default: @@ -68,6 +70,7 @@ case StartsHumongousTag: return "HS"; case ContinuesHumongousTag: return "HC"; case OldTag: return "O"; + case PreMatureOldTag: return "PO"; case OpenArchiveTag: return "OA"; case ClosedArchiveTag: return "CA"; default: @@ -85,6 +88,7 @@ case StartsHumongousTag: return G1HeapRegionTraceType::StartsHumongous; case ContinuesHumongousTag: return G1HeapRegionTraceType::ContinuesHumongous; case OldTag: return G1HeapRegionTraceType::Old; + case PreMatureOldTag: return G1HeapRegionTraceType::PreMatureOld; case OpenArchiveTag: return G1HeapRegionTraceType::OpenArchive; case ClosedArchiveTag: return G1HeapRegionTraceType::ClosedArchive; default: --- old/src/hotspot/share/gc/g1/heapRegionType.hpp 2018-10-05 11:11:07.555645800 -0700 +++ new/src/hotspot/share/gc/g1/heapRegionType.hpp 2018-10-05 11:11:06.653946600 -0700 @@ -74,8 +74,9 @@ ContinuesHumongousTag = HumongousMask | PinnedMask + 1, OldMask = 16, + PreMatureOldMask = OldMask + 1, OldTag = OldMask, - + PreMatureOldTag = PreMatureOldMask, // Archive regions are regions with immutable content (i.e. not reclaimed, and // not allocated into during regular operation). They differ in the kind of references // allowed for the contained objects: @@ -137,6 +138,8 @@ // is_old regions may or may not also be pinned bool is_old() const { return (get() & OldMask) != 0; } + bool is_premature_old() const { return get() == PreMatureOldTag; } + bool is_old_or_humongous() const { return (get() & (OldMask | HumongousMask)) != 0; } bool is_old_or_humongous_or_archive() const { return (get() & (OldMask | HumongousMask | ArchiveMask)) != 0; } @@ -157,6 +160,8 @@ void set_old() { set(OldTag); } + void set_premature_old() { set(PreMatureOldTag); } + // Change the current region type to be of an old region type if not already done so. // Returns whether the region type has been changed or not. bool relabel_as_old() { --- old/src/hotspot/share/gc/g1/vmStructs_g1.hpp 2018-10-05 11:11:16.095894800 -0700 +++ new/src/hotspot/share/gc/g1/vmStructs_g1.hpp 2018-10-05 11:11:15.237747800 -0700 @@ -53,7 +53,7 @@ nonstatic_field(HeapRegionManager, _num_committed, uint) \ \ nonstatic_field(G1CollectedHeap, _summary_bytes_used, size_t) \ - nonstatic_field(G1CollectedHeap, _hrm, HeapRegionManager) \ + nonstatic_field(G1CollectedHeap, _hrm, HeapRegionManager*) \ nonstatic_field(G1CollectedHeap, _g1mm, G1MonitoringSupport*) \ nonstatic_field(G1CollectedHeap, _old_set, HeapRegionSetBase) \ nonstatic_field(G1CollectedHeap, _archive_set, HeapRegionSetBase) \ --- old/src/hotspot/share/runtime/arguments.cpp 2018-10-05 11:11:24.295987700 -0700 +++ new/src/hotspot/share/runtime/arguments.cpp 2018-10-05 11:11:23.448935300 -0700 @@ -2068,6 +2068,19 @@ log_warning(arguments) ("NUMA support for Heap depends on the file system when AllocateHeapAt option is used.\n"); } } + + if(!FLAG_IS_DEFAULT(AllocateHeapAt) && !FLAG_IS_DEFAULT(AllocateOldGenAt)) { + jio_fprintf(defaultStream::error_stream(), + "AllocateHeapAt and AllocateOldGenAt cannot be used together.\n"); + status = false; + } + + if (!FLAG_IS_DEFAULT(AllocateOldGenAt) && (UseSerialGC || UseConcMarkSweepGC || UseEpsilonGC || UseZGC)) { + jio_fprintf(defaultStream::error_stream(), + "AllocateOldGenAt not supported for selected GC.\n"); + status = false; + } + return status; } --- old/src/hotspot/share/runtime/globals.hpp 2018-10-05 11:11:32.475635700 -0700 +++ new/src/hotspot/share/runtime/globals.hpp 2018-10-05 11:11:31.629531900 -0700 @@ -2604,7 +2604,18 @@ "Start flight recording with options")) \ \ experimental(bool, UseFastUnorderedTimeStamps, false, \ - "Use platform unstable time where supported for timestamps only") + "Use platform unstable time where supported for timestamps only") \ + \ + experimental(ccstr, AllocateOldGenAt, NULL, \ + "Directory to use for allocating old generation") \ + \ + experimental(uintx, G1YoungExpansionBufferPerc, 10, \ + "When heterogenous heap is enabled by AllocateOldGenAt " \ + "option, after every GC, yg gen is re-sized which involves " \ + "system calls to commit/uncommit memory. To reduce these " \ + "calls, we keep a buffer of extra regions to absorb small " \ + "changes in yg gen length. This flag takes the buffer " \ + "size as an percentage of young gen length") #define VM_FLAGS(develop, \ develop_pd, \ --- /dev/null 2018-10-05 11:11:41.000000000 -0700 +++ new/src/hotspot/share/gc/g1/heapRegionManagerForHeteroHeap.cpp 2018-10-05 11:11:39.743496700 -0700 @@ -0,0 +1,446 @@ +/* + * Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#include "precompiled.hpp" +#include "gc/g1/g1CollectedHeap.inline.hpp" +#include "gc/g1/g1ConcurrentRefine.hpp" +#include "gc/g1/heapRegion.hpp" +#include "gc/g1/heapRegionManager.inline.hpp" +#include "gc/g1/heapRegionManagerForHeteroHeap.hpp" +#include "gc/g1/heapRegionSet.inline.hpp" +#include "memory/allocation.hpp" + +// expand_by() is called to grow the heap. We grow into nvdimm now. +// Dram regions are committed later as needed during mutator region allocation or +// when young list target length is determined after gc cycle. +uint HeapRegionManagerForHeteroHeap::expand_by(uint num_regions, WorkGang* pretouch_workers) { + uint num_expanded = expand_nvdimm(MIN2(num_regions, max_expandable_length() - total_regions_committed()), pretouch_workers); + assert(total_regions_committed() <= max_expandable_length(), "must be"); + return num_expanded; +} + +// Expands heap starting from 'start' index. The question is should we expand from one memory (e.g. nvdimm) to another (e.g. dram). +// Looking at the code, expand_at() is called for humongous allocation where 'start' is in nv-dimm. +// So we only allocate regions in the same kind of memory as 'start'. +uint HeapRegionManagerForHeteroHeap::expand_at(uint start, uint num_regions, WorkGang* pretouch_workers) { + if (num_regions == 0) { + return 0; + } + num_regions = MIN2(num_regions, max_expandable_length() - total_regions_committed()); + uint end = is_in_nvdimm(start) ? end_index_of_nvdimm() : end_index_of_dram(); + uint num_expanded = expand_in_range(start, end, num_regions, pretouch_workers); + assert(total_regions_committed() <= max_expandable_length(), "must be"); + return num_expanded; +} + +// This function ensures that there are 'expected_num_regions' committed regions in dram. +// If new regions are committed, it un-commits that many regions from nv-dimm. +// If there are already more regions committed in dram, extra regions are un-committed. +void HeapRegionManagerForHeteroHeap::resize_dram_regions(uint expected_num_regions, WorkGang* pretouch_workers) { + + assert(total_regions_committed() <= max_expandable_length(), "must be"); + if(expected_num_regions > free_list_dram_length()) { + // If we are going to expand DRAM, we expand a little more so that we can absorb small variations in Young gen sizing. + uint targeted_dram_regions = expected_num_regions * (1 + (double)G1YoungExpansionBufferPerc / 100); + uint to_be_made_available = targeted_dram_regions - free_list_dram_length(); + +#ifdef ASSERT + uint total_committed_before = total_regions_committed(); +#endif + uint can_be_made_available = shrink_nvdimm(to_be_made_available); + uint ret = expand_dram(can_be_made_available, pretouch_workers); +#ifdef ASSERT + assert(ret == can_be_made_available, "should be equal"); + assert(total_committed_before == total_regions_committed(), "invariant not met"); + assert(total_regions_committed() <= _max_regions, "post-condition"); +#endif + } else { + uint to_be_released = free_list_dram_length() - expected_num_regions; + // if number of extra DRAM regions is small, do not shrink. + if (to_be_released < expected_num_regions * G1YoungExpansionBufferPerc / 100) { + return; + } + +#ifdef ASSERT + uint total_committed_before = total_regions_committed(); +#endif + uint ret = shrink_dram(to_be_released); + assert(ret == to_be_released, "Should be able to shrink by given amount"); + ret = expand_nvdimm(to_be_released, pretouch_workers); +#ifdef ASSERT + assert(ret == to_be_released, "Should be able to expand by given amount"); + assert(total_committed_before == total_regions_committed(), "invariant not met"); + assert(total_regions_committed() <= _max_regions, "post-condition"); +#endif + } + assert(total_regions_committed() <= max_expandable_length(), "must be"); +} + +uint HeapRegionManagerForHeteroHeap::total_regions_committed() const { + return num_committed_dram() + num_committed_nvdimm(); +} + +uint HeapRegionManagerForHeteroHeap::num_committed_dram() const { + // This class does not keep count of committed regions in dram and nv-dimm. + // G1RegionToHeteroSpaceMapper keeps this information. + return static_cast(_heap_mapper)->num_committed_dram(); +} + +uint HeapRegionManagerForHeteroHeap::num_committed_nvdimm() const { + // See comment for num_committed_dram() + return static_cast(_heap_mapper)->num_committed_nvdimm(); +} + +// Return maximum number of regions that heap can expand to. +uint HeapRegionManagerForHeteroHeap::max_expandable_length() const { + return _max_regions; +} + +uint HeapRegionManagerForHeteroHeap::find_unavailable_in_range(uint start_idx, uint end_idx, uint* res_idx) const { + guarantee(res_idx != NULL, "checking"); + guarantee(start_idx <= (max_length() + 1), "checking"); + + uint num_regions = 0; + + uint cur = start_idx; + while (cur <= end_idx && is_available(cur)) { + cur++; + } + if (cur == end_idx + 1) { + return num_regions; + } + *res_idx = cur; + while (cur <= end_idx && !is_available(cur)) { + cur++; + } + num_regions = cur - *res_idx; + +#ifdef ASSERT + for (uint i = *res_idx; i < (*res_idx + num_regions); i++) { + assert(!is_available(i), "just checking"); + } + assert(cur == end_idx + 1 || num_regions == 0 || is_available(cur), + "The region at the current position %u must be available or at the end", cur); +#endif + return num_regions; +} + +uint HeapRegionManagerForHeteroHeap::expand_dram(uint num_regions, WorkGang* pretouch_workers) { + return expand_in_range(start_index_of_dram(), end_index_of_dram(), num_regions, pretouch_workers); +} + +uint HeapRegionManagerForHeteroHeap::expand_nvdimm(uint num_regions, WorkGang* pretouch_workers) { + return expand_in_range(start_index_of_nvdimm(), end_index_of_nvdimm(), num_regions, pretouch_workers); +} + +// Follows same logic as expand_at() form HeapRegionManager. +uint HeapRegionManagerForHeteroHeap::expand_in_range(uint start, uint end, uint num_regions, WorkGang* pretouch_gang) { + + uint so_far = 0; + uint chunk_start = 0; + uint num_last_found = 0; + while (so_far < num_regions && + (num_last_found = find_unavailable_in_range(start, end, &chunk_start)) > 0) { + uint to_commit = MIN2(num_regions - so_far, num_last_found); + make_regions_available(chunk_start, to_commit, pretouch_gang); + so_far += to_commit; + start = chunk_start + to_commit + 1; + } + + return so_far; +} + +// Shrink in the range of indexes which are reserved for dram. +uint HeapRegionManagerForHeteroHeap::shrink_dram(uint num_regions, bool update_free_list) { + return shrink_in_range(start_index_of_dram(), end_index_of_dram(), num_regions, update_free_list); +} + +// Shrink in the range of indexes which are reserved for nv-dimm. +uint HeapRegionManagerForHeteroHeap::shrink_nvdimm(uint num_regions, bool update_free_list) { + return shrink_in_range(start_index_of_nvdimm(), end_index_of_nvdimm(), num_regions, update_free_list); +} + +// Find empty regions in given range, un-commit them and return the count. +uint HeapRegionManagerForHeteroHeap::shrink_in_range(uint start, uint end, uint num_regions, bool update_free_list) { + + if (num_regions == 0) { + return 0; + } + uint so_far = 0; + uint idx_last_found = 0; + uint num_last_found; + while (so_far < num_regions && + (num_last_found = find_empty_in_range_reverse(start, end, &idx_last_found)) > 0) { + uint to_uncommit = MIN2(num_regions - so_far, num_last_found); + if(update_free_list) { + _free_list.remove_starting_at(at(idx_last_found + num_last_found - to_uncommit), to_uncommit); + } + uncommit_regions(idx_last_found + num_last_found - to_uncommit, to_uncommit); + so_far += to_uncommit; + end = idx_last_found; + } + return so_far; +} + +uint HeapRegionManagerForHeteroHeap::find_empty_in_range_reverse(uint start_idx, uint end_idx, uint* res_idx) { + guarantee(res_idx != NULL, "checking"); + guarantee(start_idx < max_length(), "checking"); + guarantee(end_idx < max_length(), "checking"); + if(start_idx > end_idx) { + return 0; + } + + uint num_regions_found = 0; + + jlong cur = end_idx; + while (cur >= start_idx && !(is_available(cur) && at(cur)->is_empty())) { + cur--; + } + if (cur == start_idx - 1) { + return num_regions_found; + } + jlong old_cur = cur; + // cur indexes the first empty region + while (cur >= start_idx && (is_available(cur) && at(cur)->is_empty())) { + cur--; + } + *res_idx = cur + 1; + num_regions_found = old_cur - cur; + +#ifdef ASSERT + for (uint i = *res_idx; i < (*res_idx + num_regions_found); i++) { + assert(at(i)->is_empty(), "just checking"); + } +#endif + return num_regions_found; +} + +HeapRegion* HeapRegionManagerForHeteroHeap::allocate_free_region(bool is_old) { + // old region is allocated from nv-dimm, non-old region from dram + // assumption: dram regions take higher indexes + assert(total_regions_committed() <= max_expandable_length(), "must be"); + bool from_head = is_old ? true : false; + HeapRegion* hr = _free_list.remove_region(from_head); + + if (hr != NULL && ( (is_old && !is_in_nvdimm(hr->hrm_index())) || (!is_old && !is_in_dram(hr->hrm_index())) ) ) { + _free_list.add_ordered(hr); + hr = NULL; + } + +#ifdef ASSERT + uint total_committed_before = total_regions_committed(); +#endif + + if (hr == NULL) { + if (!is_old) { + uint ret = shrink_nvdimm(1); + if (ret == 1) { + ret = expand_dram(1, NULL); + assert(ret == 1, "We should be able to commit one region"); + hr = _free_list.remove_region(from_head); + } + } + else { /*is_old*/ + uint ret = shrink_dram(1); + if (ret == 1) { + ret = expand_nvdimm(1, NULL); + assert(ret == 1, "We should be able to commit one region"); + hr = _free_list.remove_region(from_head); + } + } + } +#ifdef ASSERT + assert(total_committed_before == total_regions_committed(), "invariant not met"); + assert(total_regions_committed() <= max_expandable_length(), "post-condition"); +#endif + + if (hr != NULL) { + assert(hr->next() == NULL, "Single region should not have next"); + assert(is_available(hr->hrm_index()), "Must be committed"); + } + assert(total_regions_committed() <= max_expandable_length(), "must be"); + return hr; +} + +uint HeapRegionManagerForHeteroHeap::find_contiguous_only_empty(size_t num) { + return find_contiguous(start_index_of_nvdimm(), end_index_of_nvdimm(), num, true); +} + +uint HeapRegionManagerForHeteroHeap::find_contiguous_empty_or_unavailable(size_t num) { + return find_contiguous(start_index_of_nvdimm(), end_index_of_nvdimm(), num, false); +} + +uint HeapRegionManagerForHeteroHeap::find_contiguous(size_t start, size_t end, size_t num, bool empty_only) { + uint found = 0; + size_t length_found = 0; + uint cur = (uint)start; + uint length_unavailable = 0; + + while (length_found < num && cur <= end) { + HeapRegion* hr = _regions.get_by_index(cur); + if ((!empty_only && !is_available(cur)) || (is_available(cur) && hr != NULL && hr->is_empty())) { + // This region is a potential candidate for allocation into. + if (!is_available(cur)) { + length_unavailable++; + } + length_found++; + } + else { + // This region is not a candidate. The next region is the next possible one. + found = cur + 1; + length_found = 0; + } + cur++; + } + + if (length_found == num) { + for (uint i = found; i < (found + num); i++) { + HeapRegion* hr = _regions.get_by_index(i); + // sanity check + guarantee((!empty_only && !is_available(i)) || (is_available(i) && hr != NULL && hr->is_empty()), + "Found region sequence starting at " UINT32_FORMAT ", length " SIZE_FORMAT + " that is not empty at " UINT32_FORMAT ". Hr is " PTR_FORMAT, found, num, i, p2i(hr)); + } + if (!empty_only && length_unavailable > (max_expandable_length() - total_regions_committed())) { + // if 'length_unavailable' number of regions will be made available, we will exceed max regions. + return G1_NO_HRM_INDEX; + } + return found; + } + else { + return G1_NO_HRM_INDEX; + } +} + +uint HeapRegionManagerForHeteroHeap::find_highest_free(bool* expanded) { + // Loop downwards from the highest dram region index, looking for an + // entry which is either free or not yet committed. If not yet + // committed, expand_at that index. + uint curr = end_index_of_dram(); + while (true) { + HeapRegion *hr = _regions.get_by_index(curr); + if (hr == NULL && !(total_regions_committed() < _max_regions)) { + uint res = shrink_nvdimm(1); + if (res == 1) { + res = expand_in_range(curr, curr, 1, NULL); + assert(res == 1, "We should be able to expand since shrink was successful"); + *expanded = true; + return curr; + } + } + else { + if (hr->is_free()) { + *expanded = false; + return curr; + } + } + if (curr == start_index_of_dram()) { + return G1_NO_HRM_INDEX; + } + curr--; + } +} + +// We need to override this since region 0 which serves are dummy region in base class may not be available here. +// This is a corner condition when either number of regions is small. When adaptive sizing is used, initial heap size +// could be just one region. This region is commited in dram to be used for young generation, leaving region 0 (which is in nvdimm) +// unavailable. +HeapRegion* HeapRegionManagerForHeteroHeap::get_dummy_region() { + uint curr = 0; + + while (curr < _regions.length()) { + if (is_available(curr)) { + return new_heap_region(curr); + } + curr++; + } + assert(false, "We should always find a region available for dummy region"); + return NULL; +} + +// First shrink in dram, then in nv-dimm. +uint HeapRegionManagerForHeteroHeap::shrink_by(uint num_regions) { + // This call is made at end of full collection. Before making this call the region sets are tore down (tear_down_region_sets()). + // So shrink() calls below do not need to remove uncomitted regions from free list. + uint ret = shrink_dram(num_regions, false /* update_free_list */); + ret += shrink_nvdimm(num_regions - ret, false /* update_free_list */); + return ret; +} + +void HeapRegionManagerForHeteroHeap::verify() { + HeapRegionManager::verify(); +} + +uint HeapRegionManagerForHeteroHeap::free_list_dram_length() const { + return _free_list.num_of_regions_in_range(start_index_of_dram(), end_index_of_dram()); +} + +uint HeapRegionManagerForHeteroHeap::free_list_nvdimm_length() const { + return _free_list.num_of_regions_in_range(start_index_of_nvdimm(), end_index_of_nvdimm()); +} + +bool HeapRegionManagerForHeteroHeap::is_in_nvdimm(uint index) const { + return index >= start_index_of_nvdimm() && index <= end_index_of_nvdimm(); +} + +bool HeapRegionManagerForHeteroHeap::is_in_dram(uint index) const { + return index >= start_index_of_dram() && index <= end_index_of_dram(); +} + +// We have to make sure full collection copies all surviving objects to NV-DIMM. +// We might not have enough regions in nvdimm_set, so we need to make more regions on NV-DIMM available for full collection. +// Note: by doing this we are breaking the in-variant that total number of committed regions is equal to current heap size. +// After full collection ends, we will re-establish this in-variant by freeing DRAM regions. +void HeapRegionManagerForHeteroHeap::prepare_for_full_collection_start() { + _total_commited_before_full_gc = total_regions_committed(); + expand_nvdimm(num_committed_dram(), NULL); + remove_all_free_regions(); +} + +// We need to bring back the total committed regions to before full collection start. +// All regular regions (not pinned regions) in DRAM should be free. +// We shrink all free regions in DRAM and if needed from NV-DIMM (when there are pinned DRAM regions) +void HeapRegionManagerForHeteroHeap::prepare_for_full_collection_end() { + uint shrink_size = total_regions_committed() - _total_commited_before_full_gc; + uint so_far = 0; + uint idx_last_found = 0; + uint num_last_found; + uint end = (uint)_regions.length() - 1; + while (so_far < shrink_size && + (num_last_found = find_empty_in_range_reverse(0, end, &idx_last_found)) > 0) { + uint to_uncommit = MIN2(shrink_size - so_far, num_last_found); + uncommit_regions(idx_last_found + num_last_found - to_uncommit, to_uncommit); + so_far += to_uncommit; + end = idx_last_found; + } + assert(so_far == shrink_size, "We should be able to shrink this many regions"); +} + +uint HeapRegionManagerForHeteroHeap::start_index_of_dram() const { return _max_regions;} + +uint HeapRegionManagerForHeteroHeap::end_index_of_dram() const { return 2*_max_regions - 1; } + +uint HeapRegionManagerForHeteroHeap::start_index_of_nvdimm() const { return 0; } + +uint HeapRegionManagerForHeteroHeap::end_index_of_nvdimm() const { return _max_regions - 1; } --- /dev/null 2018-10-05 11:11:47.000000000 -0700 +++ new/src/hotspot/share/gc/g1/heapRegionManagerForHeteroHeap.hpp 2018-10-05 11:11:46.155318300 -0700 @@ -0,0 +1,132 @@ +/* + * Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#ifndef SHARE_VM_GC_G1_HEAPREGIONMANAGERFORHETEROHEAP_HPP +#define SHARE_VM_GC_G1_HEAPREGIONMANAGERFORHETEROHEAP_HPP + +// This class manages heap regions on heterogenous memory comprising of dram and nv-dimm. +// Regions in dram (dram_set) are used for young objects and archive regions (CDS). +// Regions in nv-dimm (nvdimm_set) are used for old objects and humongous objects. +// At any point there are some regions committed on dram and some on nv-dimm with the following guarantees: +// 1. The total number of regions committed in dram and nv-dimm equals the current size of heap. +// 2. Consequently, total number of regions committed is less than or equal to Xmx. +// 3. To maintain the guarantee stated by 1., whenever one set grows (new regions committed), the other set shrinks (regions un-committed). +// 3a. If more dram regions are needed (young generation expansion), corresponding number of regions in nv-dimm are un-committed. +// 3b. When old generation or humongous set grows, and new regions need to be committed to nv-dimm, corresponding number of regions +// are un-committed in dram. +class HeapRegionManagerForHeteroHeap : public HeapRegionManager { + + uint _max_regions; + uint _max_dram_regions; + uint _max_nvdimm_regions; + uint _total_commited_before_full_gc; + uint _start_index_of_nvdimm; + + uint start_index_of_nvdimm() const; + uint start_index_of_dram() const; + uint end_index_of_nvdimm() const; + uint end_index_of_dram() const; + + uint total_regions_committed() const; + uint num_committed_dram() const; + uint num_committed_nvdimm() const; + + // Similar to find_unavailable_from_idx() function from base class, difference is this function searches in range [start, end]. + uint find_unavailable_in_range(uint start_idx, uint end_idx, uint* res_idx) const; + + // Expand into dram. Maintains the invariant that total number of committed regions is less than current heap size. + uint expand_dram(uint num_regions, WorkGang* pretouch_workers); + + // Expand into nv-dimm. + uint expand_nvdimm(uint num_regions, WorkGang* pretouch_workers); + + // Expand by finding unavailable regions in [start, end] range. + uint expand_in_range(uint start, uint end, uint num_regions, WorkGang* pretouch_workers); + + // Shrink dram set of regions. + uint shrink_dram(uint num_regions, bool update_free_list = true); + + // Shrink nv-dimm set of regions. + uint shrink_nvdimm(uint num_regions, bool update_free_list = true); + + // Shrink regions from [start, end] range. + uint shrink_in_range(uint start, uint end, uint num_regions, bool update_free_list = true); + + // Similar to find_empty_from_idx_reverse() in base class. Only here it searches in a range. + uint find_empty_in_range_reverse(uint start_idx, uint end_idx, uint* res_idx); + + // Similar to find_contiguous() in base class, with [start, end] range + uint find_contiguous(size_t start, size_t end, size_t num, bool empty_only); + + uint free_list_dram_length() const; + uint free_list_nvdimm_length() const; + + // is region with given index in nv-dimm? + bool is_in_nvdimm(uint index) const; + bool is_in_dram(uint index) const; + +public: + + // Empty constructor, we'll initialize it with the initialize() method. + HeapRegionManagerForHeteroHeap(uint num_regions) : _max_regions(num_regions), _max_dram_regions(0), + _max_nvdimm_regions(0), _start_index_of_nvdimm(0) + {} + + // Override. + HeapRegion* get_dummy_region(); + + // Resize dram_set to 'expected_num_regions'. + void resize_dram_regions(uint expected_num_regions, WorkGang* pretouch_workers); + + // Should be called before starting full collection. + void prepare_for_full_collection_start(); + void prepare_for_full_collection_end(); + + virtual HeapRegion* allocate_free_region(bool is_old); + + // Return maximum number of regions that heap can expand to. + uint max_expandable_length() const; + + // Override. Expand in nv-dimm. + uint expand_by(uint num_regions, WorkGang* pretouch_workers); + + // Override. + uint expand_at(uint start, uint num_regions, WorkGang* pretouch_workers); + + // Override. This function is called for humongous allocation, so we need to find empty regions in nv-dimm. + uint find_contiguous_only_empty(size_t num); + + // Override. This function is called for humongous allocation, so we need to find empty or unavailable regions in nv-dimm. + uint find_contiguous_empty_or_unavailable(size_t num); + + // Overrides base class implementation to find highest free region in dram. + uint find_highest_free(bool* expanded); + + // Override. This fuction is called to shrink the heap, we shrink in dram first then in nv-dimm. + uint shrink_by(uint num_regions_to_remove); + + void verify(); +}; + +#endif