--- old/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp 2014-08-11 13:45:55.039234805 +0200 +++ new/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp 2014-08-11 13:45:54.967232697 +0200 @@ -45,6 +45,7 @@ #include "gc_implementation/g1/g1MarkSweep.hpp" #include "gc_implementation/g1/g1OopClosures.inline.hpp" #include "gc_implementation/g1/g1ParScanThreadState.inline.hpp" +#include "gc_implementation/g1/g1RegionToSpaceMapper.hpp" #include "gc_implementation/g1/g1RemSet.inline.hpp" #include "gc_implementation/g1/g1StringDedup.hpp" #include "gc_implementation/g1/g1YCTypes.hpp" @@ -381,6 +382,14 @@ gclog_or_tty->cr(); } +void G1RegionMappingChangedListener::reset_from_card_cache(uint start_idx, size_t num_regions) { + OtherRegionsTable::invalidate(start_idx, num_regions); +} + +void G1RegionMappingChangedListener::on_commit(uint start_idx, size_t num_regions) { + reset_from_card_cache(start_idx, num_regions); +} + void G1CollectedHeap::push_dirty_cards_region(HeapRegion* hr) { // Claim the right to put the region on the dirty cards region list @@ -760,13 +769,14 @@ // to know in which list they are on so that we can remove them. We only // need to do this if we need to allocate more than one region to satisfy the // current humongous allocation request. If we are only allocating one region - // we use the one-region region allocation code (see above), or end up here. + // we use the one-region region allocation code (see above), that already + // potentially waits for regions from the secondary free list. wait_while_free_regions_coming(); append_secondary_free_list_if_not_empty_with_lock(); // Policy: Try only empty regions (i.e. already committed first). Maybe we // are lucky enough to find some. - first = _hrs.find_contiguous(obj_regions, true); + first = _hrs.find_contiguous_only_empty(obj_regions); if (first != G1_NO_HRS_INDEX) { _hrs.allocate_free_regions_starting_at(first, obj_regions); } @@ -776,7 +786,7 @@ // Policy: We could not find enough regions for the humongous object in the // free list. Look through the heap to find a mix of free and uncommitted regions. // If so, try expansion. - first = _hrs.find_contiguous(obj_regions, false); + first = _hrs.find_contiguous_empty_or_unavailable(obj_regions); if (first != G1_NO_HRS_INDEX) { // We found something. Make sure these regions are committed, i.e. expand // the heap. Alternatively we could do a defragmentation GC. @@ -1954,8 +1964,6 @@ _reserved.set_start((HeapWord*)heap_rs.base()); _reserved.set_end((HeapWord*)(heap_rs.base() + heap_rs.size())); - _expansion_regions = (uint) (max_byte_size / HeapRegion::GrainBytes); - // Create the gen rem set (and barrier set) for the entire reserved region. _rem_set = collector_policy()->create_rem_set(_reserved, 2); set_barrier_set(rem_set()->bs()); @@ -1970,14 +1978,65 @@ // Carve out the G1 part of the heap. ReservedSpace g1_rs = heap_rs.first_part(max_byte_size); - _hrs.initialize(g1_rs); - - assert(_hrs.max_length() == _expansion_regions, - err_msg("max length: %u expansion regions: %u", - _hrs.max_length(), _expansion_regions)); - - // Do later initialization work for concurrent refinement. - _cg1r->init(); + G1RegionToSpaceMapper* heap_storage = + G1RegionToSpaceMapper::create_mapper(g1_rs, + UseLargePages ? os::large_page_size() : os::vm_page_size(), + HeapRegion::GrainBytes, + 1, + mtJavaHeap); + heap_storage->set_mapping_changed_listener(&_listener); + + // Reserve space for the block offset table. We do not support automatic uncommit + // for the card table at this time. BOT only. + ReservedSpace bot_rs(G1BlockOffsetSharedArray::compute_size(g1_rs.size() / HeapWordSize)); + G1RegionToSpaceMapper* bot_storage = + G1RegionToSpaceMapper::create_mapper(bot_rs, + os::vm_page_size(), + HeapRegion::GrainBytes, + G1BlockOffsetSharedArray::N_bytes, + mtGC); + + ReservedSpace cardtable_rs(G1SATBCardTableLoggingModRefBS::compute_size(g1_rs.size() / HeapWordSize)); + G1RegionToSpaceMapper* cardtable_storage = + G1RegionToSpaceMapper::create_mapper(cardtable_rs, + os::vm_page_size(), + HeapRegion::GrainBytes, + G1BlockOffsetSharedArray::N_bytes, + mtGC); + g1_barrier_set()->initialize(cardtable_storage); + + // Reserve space for the card counts table. + ReservedSpace card_counts_rs(G1BlockOffsetSharedArray::compute_size(g1_rs.size() / HeapWordSize)); + G1RegionToSpaceMapper* card_counts_storage = + G1RegionToSpaceMapper::create_mapper(card_counts_rs, + os::vm_page_size(), + HeapRegion::GrainBytes, + G1BlockOffsetSharedArray::N_bytes, + mtGC); + + // Reserve space for prev and next bitmap. + size_t bitmap_size = CMBitMap::compute_size(g1_rs.size()); + + ReservedSpace prev_bitmap_rs(ReservedSpace::allocation_align_size_up(bitmap_size)); + G1RegionToSpaceMapper* prev_bitmap_storage = + G1RegionToSpaceMapper::create_mapper(prev_bitmap_rs, + os::vm_page_size(), + HeapRegion::GrainBytes, + CMBitMap::mark_distance(), + mtGC); + + ReservedSpace next_bitmap_rs(ReservedSpace::allocation_align_size_up(bitmap_size)); + G1RegionToSpaceMapper* next_bitmap_storage = + G1RegionToSpaceMapper::create_mapper(next_bitmap_rs, + os::vm_page_size(), + HeapRegion::GrainBytes, + CMBitMap::mark_distance(), + mtGC); + + _hrs.initialize(heap_storage, prev_bitmap_storage, next_bitmap_storage, bot_storage, cardtable_storage, card_counts_storage); + + // Do later initialization work for concurrent refinement. + _cg1r->init(card_counts_storage); // 6843694 - ensure that the maximum region index can fit // in the remembered set structures. @@ -1991,8 +2050,7 @@ FreeRegionList::set_unrealistically_long_length(max_regions() + 1); - _bot_shared = new G1BlockOffsetSharedArray(_reserved, - heap_word_size(init_byte_size)); + _bot_shared = new G1BlockOffsetSharedArray(_reserved, bot_storage); _g1h = this; @@ -2001,7 +2059,7 @@ // Create the ConcurrentMark data structure and thread. // (Must do this late, so that "max_regions" is defined.) - _cm = new ConcurrentMark(this, heap_rs); + _cm = new ConcurrentMark(this, prev_bitmap_storage, next_bitmap_storage); if (_cm == NULL || !_cm->completed_initialization()) { vm_shutdown_during_initialization("Could not create/initialize ConcurrentMark"); return JNI_ENOMEM; @@ -2058,8 +2116,8 @@ // Here we allocate the dummy HeapRegion that is required by the // G1AllocRegion class. - HeapRegion* dummy_region = _hrs.get_dummy_region(); + // We'll re-use the same region whether the alloc region will // require BOT updates or not and, if it doesn't, then a non-young // region will complain that it cannot support allocations without @@ -2480,8 +2538,8 @@ } bool G1CollectedHeap::is_in(const void* p) const { - if (_hrs.committed().contains(p)) { - // Given that we know that p is in the committed space, + if (_hrs.reserved().contains(p)) { + // Given that we know that p is in the reserved space, // heap_region_containing_raw() should successfully // return the containing region. HeapRegion* hr = heap_region_containing_raw(p); @@ -2491,6 +2549,18 @@ } } +#ifdef ASSERT +bool G1CollectedHeap::is_in_exact(const void* p) const { + bool contains = reserved_region().contains(p); + bool available = _hrs.is_available(addr_to_region((HeapWord*)p)); + if (contains && available) { + return true; + } else { + return false; + } +} +#endif + // Iteration functions. // Applies an ExtendedOopClosure onto all references of objects within a HeapRegion. @@ -3368,8 +3438,8 @@ st->print(" total " SIZE_FORMAT "K, used " SIZE_FORMAT "K", capacity()/K, used_unlocked()/K); st->print(" [" INTPTR_FORMAT ", " INTPTR_FORMAT ", " INTPTR_FORMAT ")", - _hrs.committed().start(), - _hrs.committed().end(), + _hrs.reserved().start(), + _hrs.reserved().start() + _hrs.length() + HeapRegion::GrainWords, _hrs.reserved().end()); st->cr(); st->print(" region size " SIZE_FORMAT "K, ", HeapRegion::GrainBytes / K); @@ -4121,10 +4191,6 @@ // RETIRE events are generated before the end GC event. _hr_printer.end_gc(false /* full */, (size_t) total_collections()); - if (mark_in_progress()) { - concurrent_mark()->update_heap_boundaries(_hrs.committed()); - } - #ifdef TRACESPINNING ParallelTaskTerminator::print_termination_counts(); #endif