< prev index next >

src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp

Print this page
rev 7746 : 8058354: SPECjvm2008-Derby -2.7% performance regression on Solaris-X64 starting with 9-b29
Summary: Allow partial use of large pages for auxiliary data structures in G1.
Reviewed-by:

*** 1,7 **** /* ! * Copyright (c) 2001, 2014, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License version 2 only, as * published by the Free Software Foundation. --- 1,7 ---- /* ! * Copyright (c) 2001, 2015, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License version 2 only, as * published by the Free Software Foundation.
*** 1830,1839 **** --- 1830,1866 ---- NOT_PRODUCT(reset_evacuation_should_fail();) guarantee(_task_queues != NULL, "task_queues allocation failure."); } + G1RegionToSpaceMapper* G1CollectedHeap::create_aux_memory_mapper(const char* description, + size_t size, + size_t translation_factor) { + // Determine the preferred page size for the auxiliary data structures. We always + // prefer large pages if the given size allows it for performance reasons. + size_t const commit_size = os::page_size_for_region_unaligned(size, 1); + // The base address reserved space must be aligned to that page. Otherwise we + // would need to split pages (or it would be completely impossible) when + // uncommitting memory within the heap. + // Size need *not* be aligned to above calculated commit size. + size_t const alignment = MAX2(commit_size, (size_t)os::vm_allocation_granularity()); + bool const use_large_pages = commit_size != (size_t)os::vm_page_size() ? UseLargePages : false; + ReservedSpace rs(align_size_up(size, alignment), alignment, use_large_pages); + G1RegionToSpaceMapper* result = + G1RegionToSpaceMapper::create_mapper(rs, + size, + commit_size, + HeapRegion::GrainBytes, + translation_factor, + mtGC); + if (TracePageSizes) { + gclog_or_tty->print_cr("G1 '%s': pg_sz=" SIZE_FORMAT" base=" PTR_FORMAT" size=" SIZE_FORMAT" alignment=" SIZE_FORMAT" reqsize=" SIZE_FORMAT, + description, commit_size, p2i(rs.base()), rs.size(), rs.alignment(), size); + } + return result; + } + jint G1CollectedHeap::initialize() { CollectedHeap::pre_initialize(); os::enable_vtime(); G1Log::init();
*** 1897,1957 **** // Carve out the G1 part of the heap. ReservedSpace g1_rs = heap_rs.first_part(max_byte_size); G1RegionToSpaceMapper* heap_storage = G1RegionToSpaceMapper::create_mapper(g1_rs, UseLargePages ? os::large_page_size() : os::vm_page_size(), HeapRegion::GrainBytes, 1, mtJavaHeap); heap_storage->set_mapping_changed_listener(&_listener); ! // Reserve space for the block offset table. We do not support automatic uncommit ! // for the card table at this time. BOT only. ! ReservedSpace bot_rs(G1BlockOffsetSharedArray::compute_size(g1_rs.size() / HeapWordSize)); G1RegionToSpaceMapper* bot_storage = ! G1RegionToSpaceMapper::create_mapper(bot_rs, ! os::vm_page_size(), ! HeapRegion::GrainBytes, ! G1BlockOffsetSharedArray::N_bytes, ! mtGC); ReservedSpace cardtable_rs(G1SATBCardTableLoggingModRefBS::compute_size(g1_rs.size() / HeapWordSize)); G1RegionToSpaceMapper* cardtable_storage = ! G1RegionToSpaceMapper::create_mapper(cardtable_rs, ! os::vm_page_size(), ! HeapRegion::GrainBytes, ! G1BlockOffsetSharedArray::N_bytes, ! mtGC); - // Reserve space for the card counts table. - ReservedSpace card_counts_rs(G1BlockOffsetSharedArray::compute_size(g1_rs.size() / HeapWordSize)); G1RegionToSpaceMapper* card_counts_storage = ! G1RegionToSpaceMapper::create_mapper(card_counts_rs, ! os::vm_page_size(), ! HeapRegion::GrainBytes, ! G1BlockOffsetSharedArray::N_bytes, ! mtGC); - // Reserve space for prev and next bitmap. size_t bitmap_size = CMBitMap::compute_size(g1_rs.size()); - - ReservedSpace prev_bitmap_rs(ReservedSpace::allocation_align_size_up(bitmap_size)); G1RegionToSpaceMapper* prev_bitmap_storage = ! G1RegionToSpaceMapper::create_mapper(prev_bitmap_rs, ! os::vm_page_size(), ! HeapRegion::GrainBytes, ! CMBitMap::mark_distance(), ! mtGC); ! ! ReservedSpace next_bitmap_rs(ReservedSpace::allocation_align_size_up(bitmap_size)); G1RegionToSpaceMapper* next_bitmap_storage = ! G1RegionToSpaceMapper::create_mapper(next_bitmap_rs, ! os::vm_page_size(), ! HeapRegion::GrainBytes, ! CMBitMap::mark_distance(), ! mtGC); _hrm.initialize(heap_storage, prev_bitmap_storage, next_bitmap_storage, bot_storage, cardtable_storage, card_counts_storage); g1_barrier_set()->initialize(cardtable_storage); // Do later initialization work for concurrent refinement. _cg1r->init(card_counts_storage); --- 1924,1962 ---- // Carve out the G1 part of the heap. ReservedSpace g1_rs = heap_rs.first_part(max_byte_size); G1RegionToSpaceMapper* heap_storage = G1RegionToSpaceMapper::create_mapper(g1_rs, + g1_rs.size(), UseLargePages ? os::large_page_size() : os::vm_page_size(), HeapRegion::GrainBytes, 1, mtJavaHeap); heap_storage->set_mapping_changed_listener(&_listener); ! // Create storage for the BOT, card table, card counts table (hot card cache) and the bitmaps. G1RegionToSpaceMapper* bot_storage = ! create_aux_memory_mapper("Block offset table", ! G1BlockOffsetSharedArray::compute_size(g1_rs.size() / HeapWordSize), ! G1BlockOffsetSharedArray::N_bytes); ReservedSpace cardtable_rs(G1SATBCardTableLoggingModRefBS::compute_size(g1_rs.size() / HeapWordSize)); G1RegionToSpaceMapper* cardtable_storage = ! create_aux_memory_mapper("Card table", ! G1SATBCardTableLoggingModRefBS::compute_size(g1_rs.size() / HeapWordSize), ! G1BlockOffsetSharedArray::N_bytes); G1RegionToSpaceMapper* card_counts_storage = ! create_aux_memory_mapper("Card counts table", ! G1BlockOffsetSharedArray::compute_size(g1_rs.size() / HeapWordSize), ! G1BlockOffsetSharedArray::N_bytes); size_t bitmap_size = CMBitMap::compute_size(g1_rs.size()); G1RegionToSpaceMapper* prev_bitmap_storage = ! create_aux_memory_mapper("Prev Bitmap", bitmap_size, CMBitMap::mark_distance()); G1RegionToSpaceMapper* next_bitmap_storage = ! create_aux_memory_mapper("Next Bitmap", bitmap_size, CMBitMap::mark_distance()); _hrm.initialize(heap_storage, prev_bitmap_storage, next_bitmap_storage, bot_storage, cardtable_storage, card_counts_storage); g1_barrier_set()->initialize(cardtable_storage); // Do later initialization work for concurrent refinement. _cg1r->init(card_counts_storage);
< prev index next >