< prev index next >

src/hotspot/share/memory/virtualspace.cpp

Print this page

        

*** 64,79 **** bool large, char* requested_address) : _fd_for_heap(-1) { initialize(size, alignment, large, requested_address, false); } - ReservedSpace::ReservedSpace(size_t size, size_t alignment, - bool large, - bool executable) : _fd_for_heap(-1) { - initialize(size, alignment, large, NULL, executable); - } - ReservedSpace::ReservedSpace(char* base, size_t size, size_t alignment, bool special, bool executable) : _fd_for_heap(-1) { assert((size % os::vm_allocation_granularity()) == 0, "size not allocation aligned"); _base = base; --- 64,73 ----
*** 644,655 **** // Reserve space for code segment. Same as Java heap only we mark this as // executable. ReservedCodeSpace::ReservedCodeSpace(size_t r_size, size_t rs_align, ! bool large) : ! ReservedSpace(r_size, rs_align, large, /*executable*/ true) { MemTracker::record_virtual_memory_type((address)base(), mtCode); } // VirtualSpace --- 638,649 ---- // Reserve space for code segment. Same as Java heap only we mark this as // executable. ReservedCodeSpace::ReservedCodeSpace(size_t r_size, size_t rs_align, ! bool large) : ReservedSpace() { ! initialize(r_size, rs_align, large, /*requested address*/ NULL, /*executable*/ true); MemTracker::record_virtual_memory_type((address)base(), mtCode); } // VirtualSpace
*** 1125,1135 **** assert(is_aligned(size, os::vm_allocation_granularity()), "Must be at least AG aligned"); assert(is_aligned(size, alignment), "Must be at least aligned against alignment"); bool large = maybe_large && UseLargePages && size >= os::large_page_size(); ! ReservedSpace rs(size, alignment, large, false); assert(rs.base() != NULL, "Must be"); assert(rs.size() == size, "Must be"); if (rs.special()) { --- 1119,1129 ---- assert(is_aligned(size, os::vm_allocation_granularity()), "Must be at least AG aligned"); assert(is_aligned(size, alignment), "Must be at least aligned against alignment"); bool large = maybe_large && UseLargePages && size >= os::large_page_size(); ! ReservedSpace rs(size, alignment, large); assert(rs.base() != NULL, "Must be"); assert(rs.size() == size, "Must be"); if (rs.special()) {
*** 1253,1263 **** return ReservedSpace(reserve_size_aligned); case Disable: case Commit: return ReservedSpace(reserve_size_aligned, os::vm_allocation_granularity(), ! /* large */ false, /* exec */ false); } } static bool initialize_virtual_space(VirtualSpace& vs, ReservedSpace rs, TestLargePages mode) { switch(mode) { --- 1247,1257 ---- return ReservedSpace(reserve_size_aligned); case Disable: case Commit: return ReservedSpace(reserve_size_aligned, os::vm_allocation_granularity(), ! /* large */ false); } } static bool initialize_virtual_space(VirtualSpace& vs, ReservedSpace rs, TestLargePages mode) { switch(mode) {
*** 1308,1318 **** return; } size_t large_page_size = os::large_page_size(); ! ReservedSpace reserved(large_page_size, large_page_size, true, false); assert(reserved.is_reserved(), "Must be"); VirtualSpace vs; bool initialized = vs.initialize(reserved, 0); --- 1302,1312 ---- return; } size_t large_page_size = os::large_page_size(); ! ReservedSpace reserved(large_page_size, large_page_size, true); assert(reserved.is_reserved(), "Must be"); VirtualSpace vs; bool initialized = vs.initialize(reserved, 0);
< prev index next >