src/share/vm/memory/genCollectedHeap.cpp
Index
Unified diffs
Context diffs
Sdiffs
Patch
New
Old
Previous File
Next File
8007074 Cdiff src/share/vm/memory/genCollectedHeap.cpp
src/share/vm/memory/genCollectedHeap.cpp
Print this page
*** 97,117 ****
// cases incorrectly returns the size in wordSize units rather than
// HeapWordSize).
guarantee(HeapWordSize == wordSize, "HeapWordSize must equal wordSize");
// The heap must be at least as aligned as generations.
! size_t alignment = Generation::GenGrain;
_gen_specs = gen_policy()->generations();
PermanentGenerationSpec *perm_gen_spec =
collector_policy()->permanent_generation();
// Make sure the sizes are all aligned.
for (i = 0; i < _n_gens; i++) {
! _gen_specs[i]->align(alignment);
}
! perm_gen_spec->align(alignment);
// If we are dumping the heap, then allocate a wasted block of address
// space in order to push the heap to a lower address. This extra
// address range allows for other (or larger) libraries to be loaded
// without them occupying the space required for the shared spaces.
--- 97,119 ----
// cases incorrectly returns the size in wordSize units rather than
// HeapWordSize).
guarantee(HeapWordSize == wordSize, "HeapWordSize must equal wordSize");
// The heap must be at least as aligned as generations.
! size_t gen_alignment = Generation::GenGrain;
_gen_specs = gen_policy()->generations();
PermanentGenerationSpec *perm_gen_spec =
collector_policy()->permanent_generation();
+ size_t heap_alignment = collector_policy()->max_alignment();
+
// Make sure the sizes are all aligned.
for (i = 0; i < _n_gens; i++) {
! _gen_specs[i]->align(gen_alignment);
}
! perm_gen_spec->align(heap_alignment);
// If we are dumping the heap, then allocate a wasted block of address
// space in order to push the heap to a lower address. This extra
// address range allows for other (or larger) libraries to be loaded
// without them occupying the space required for the shared spaces.
*** 128,150 ****
// Allocate space for the heap.
char* heap_address;
size_t total_reserved = 0;
int n_covered_regions = 0;
! ReservedSpace heap_rs(0);
! heap_address = allocate(alignment, perm_gen_spec, &total_reserved,
&n_covered_regions, &heap_rs);
if (UseSharedSpaces) {
if (!heap_rs.is_reserved() || heap_address != heap_rs.base()) {
if (heap_rs.is_reserved()) {
heap_rs.release();
}
FileMapInfo* mapinfo = FileMapInfo::current_info();
mapinfo->fail_continue("Unable to reserve shared region.");
! allocate(alignment, perm_gen_spec, &total_reserved, &n_covered_regions,
&heap_rs);
}
}
if (!heap_rs.is_reserved()) {
--- 130,152 ----
// Allocate space for the heap.
char* heap_address;
size_t total_reserved = 0;
int n_covered_regions = 0;
! ReservedSpace heap_rs;
! heap_address = allocate(heap_alignment, perm_gen_spec, &total_reserved,
&n_covered_regions, &heap_rs);
if (UseSharedSpaces) {
if (!heap_rs.is_reserved() || heap_address != heap_rs.base()) {
if (heap_rs.is_reserved()) {
heap_rs.release();
}
FileMapInfo* mapinfo = FileMapInfo::current_info();
mapinfo->fail_continue("Unable to reserve shared region.");
! allocate(heap_alignment, perm_gen_spec, &total_reserved, &n_covered_regions,
&heap_rs);
}
}
if (!heap_rs.is_reserved()) {
*** 205,237 ****
size_t total_reserved = 0;
int n_covered_regions = 0;
const size_t pageSize = UseLargePages ?
os::large_page_size() : os::vm_page_size();
for (int i = 0; i < _n_gens; i++) {
total_reserved = add_and_check_overflow(total_reserved, _gen_specs[i]->max_size());
n_covered_regions += _gen_specs[i]->n_covered_regions();
}
! assert(total_reserved % pageSize == 0,
! err_msg("Gen size; total_reserved=" SIZE_FORMAT ", pageSize="
! SIZE_FORMAT, total_reserved, pageSize));
total_reserved = add_and_check_overflow(total_reserved, perm_gen_spec->max_size());
! assert(total_reserved % pageSize == 0,
! err_msg("Perm size; total_reserved=" SIZE_FORMAT ", pageSize="
SIZE_FORMAT ", perm gen max=" SIZE_FORMAT, total_reserved,
! pageSize, perm_gen_spec->max_size()));
n_covered_regions += perm_gen_spec->n_covered_regions();
// Add the size of the data area which shares the same reserved area
// as the heap, but which is not actually part of the heap.
size_t misc = perm_gen_spec->misc_data_size() + perm_gen_spec->misc_code_size();
total_reserved = add_and_check_overflow(total_reserved, misc);
if (UseLargePages) {
assert(total_reserved != 0, "total_reserved cannot be 0");
total_reserved = round_up_and_check_overflow(total_reserved, os::large_page_size());
}
// Calculate the address at which the heap must reside in order for
// the shared data to be at the required address.
--- 207,243 ----
size_t total_reserved = 0;
int n_covered_regions = 0;
const size_t pageSize = UseLargePages ?
os::large_page_size() : os::vm_page_size();
+ assert(alignment % pageSize == 0, "Must be");
+
for (int i = 0; i < _n_gens; i++) {
total_reserved = add_and_check_overflow(total_reserved, _gen_specs[i]->max_size());
n_covered_regions += _gen_specs[i]->n_covered_regions();
}
! assert(total_reserved % alignment == 0,
! err_msg("Gen size; total_reserved=" SIZE_FORMAT ", alignment="
! SIZE_FORMAT, total_reserved, alignment));
total_reserved = add_and_check_overflow(total_reserved, perm_gen_spec->max_size());
! assert(total_reserved % alignment == 0,
! err_msg("Perm size; total_reserved=" SIZE_FORMAT ", alignment="
SIZE_FORMAT ", perm gen max=" SIZE_FORMAT, total_reserved,
! alignment, perm_gen_spec->max_size()));
n_covered_regions += perm_gen_spec->n_covered_regions();
// Add the size of the data area which shares the same reserved area
// as the heap, but which is not actually part of the heap.
size_t misc = perm_gen_spec->misc_data_size() + perm_gen_spec->misc_code_size();
total_reserved = add_and_check_overflow(total_reserved, misc);
if (UseLargePages) {
+ assert(misc == 0, "CDS does not support Large Pages");
assert(total_reserved != 0, "total_reserved cannot be 0");
+ assert(is_size_aligned(total_reserved, os::large_page_size()), "Must be");
total_reserved = round_up_and_check_overflow(total_reserved, os::large_page_size());
}
// Calculate the address at which the heap must reside in order for
// the shared data to be at the required address.
*** 248,274 ****
// Calculate the address of the first word of the heap.
heap_address -= total_reserved;
} else {
heap_address = NULL; // any address will do.
if (UseCompressedOops) {
! heap_address = Universe::preferred_heap_base(total_reserved, Universe::UnscaledNarrowOop);
*_total_reserved = total_reserved;
*_n_covered_regions = n_covered_regions;
*heap_rs = ReservedHeapSpace(total_reserved, alignment,
UseLargePages, heap_address);
if (heap_address != NULL && !heap_rs->is_reserved()) {
// Failed to reserve at specified address - the requested memory
// region is taken already, for example, by 'java' launcher.
// Try again to reserver heap higher.
! heap_address = Universe::preferred_heap_base(total_reserved, Universe::ZeroBasedNarrowOop);
*heap_rs = ReservedHeapSpace(total_reserved, alignment,
UseLargePages, heap_address);
if (heap_address != NULL && !heap_rs->is_reserved()) {
// Failed to reserve at specified address again - give up.
! heap_address = Universe::preferred_heap_base(total_reserved, Universe::HeapBasedNarrowOop);
assert(heap_address == NULL, "");
*heap_rs = ReservedHeapSpace(total_reserved, alignment,
UseLargePages, heap_address);
}
}
--- 254,280 ----
// Calculate the address of the first word of the heap.
heap_address -= total_reserved;
} else {
heap_address = NULL; // any address will do.
if (UseCompressedOops) {
! heap_address = Universe::preferred_heap_base(total_reserved, alignment, Universe::UnscaledNarrowOop);
*_total_reserved = total_reserved;
*_n_covered_regions = n_covered_regions;
*heap_rs = ReservedHeapSpace(total_reserved, alignment,
UseLargePages, heap_address);
if (heap_address != NULL && !heap_rs->is_reserved()) {
// Failed to reserve at specified address - the requested memory
// region is taken already, for example, by 'java' launcher.
// Try again to reserver heap higher.
! heap_address = Universe::preferred_heap_base(total_reserved, alignment, Universe::ZeroBasedNarrowOop);
*heap_rs = ReservedHeapSpace(total_reserved, alignment,
UseLargePages, heap_address);
if (heap_address != NULL && !heap_rs->is_reserved()) {
// Failed to reserve at specified address again - give up.
! heap_address = Universe::preferred_heap_base(total_reserved, alignment, Universe::HeapBasedNarrowOop);
assert(heap_address == NULL, "");
*heap_rs = ReservedHeapSpace(total_reserved, alignment,
UseLargePages, heap_address);
}
}
src/share/vm/memory/genCollectedHeap.cpp
Index
Unified diffs
Context diffs
Sdiffs
Patch
New
Old
Previous File
Next File