< prev index next >

src/os/aix/vm/os_aix.cpp

Print this page

        

*** 1934,1944 **** if (os::Aix::on_pase_V5R4_or_older()) { ShouldNotReachHere(); } // Align size of shm up to 64K to avoid errors if we later try to change the page size. ! const size_t size = align_size_up(bytes, 64*K); // Reserve the shared segment. int shmid = shmget(IPC_PRIVATE, size, IPC_CREAT | S_IRUSR | S_IWUSR); if (shmid == -1) { trcVerbose("shmget(.., " UINTX_FORMAT ", ..) failed (errno: %d).", size, errno); --- 1934,1944 ---- if (os::Aix::on_pase_V5R4_or_older()) { ShouldNotReachHere(); } // Align size of shm up to 64K to avoid errors if we later try to change the page size. ! const size_t size = align_up(bytes, 64*K); // Reserve the shared segment. int shmid = shmget(IPC_PRIVATE, size, IPC_CREAT | S_IRUSR | S_IWUSR); if (shmid == -1) { trcVerbose("shmget(.., " UINTX_FORMAT ", ..) failed (errno: %d).", size, errno);
*** 2075,2085 **** } else { alignment_hint = os::vm_page_size(); } // Size shall always be a multiple of os::vm_page_size (esp. in 64K mode). ! const size_t size = align_size_up(bytes, os::vm_page_size()); // alignment: Allocate memory large enough to include an aligned range of the right size and // cut off the leading and trailing waste pages. assert0(alignment_hint != 0 && is_aligned_to(alignment_hint, os::vm_page_size())); // see above const size_t extra_size = size + alignment_hint; --- 2075,2085 ---- } else { alignment_hint = os::vm_page_size(); } // Size shall always be a multiple of os::vm_page_size (esp. in 64K mode). ! const size_t size = align_up(bytes, os::vm_page_size()); // alignment: Allocate memory large enough to include an aligned range of the right size and // cut off the leading and trailing waste pages. assert0(alignment_hint != 0 && is_aligned_to(alignment_hint, os::vm_page_size())); // see above const size_t extra_size = size + alignment_hint;
*** 2108,2118 **** trcVerbose("mmap(" PTR_FORMAT ", " UINTX_FORMAT ", ..) failed (%d)", requested_addr, size, errno); return NULL; } // Handle alignment. ! char* const addr_aligned = align_ptr_up(addr, alignment_hint); const size_t waste_pre = addr_aligned - addr; char* const addr_aligned_end = addr_aligned + size; const size_t waste_post = extra_size - waste_pre - size; if (waste_pre > 0) { ::munmap(addr, waste_pre); --- 2108,2118 ---- trcVerbose("mmap(" PTR_FORMAT ", " UINTX_FORMAT ", ..) failed (%d)", requested_addr, size, errno); return NULL; } // Handle alignment. ! char* const addr_aligned = align_up(addr, alignment_hint); const size_t waste_pre = addr_aligned - addr; char* const addr_aligned_end = addr_aligned + size; const size_t waste_post = extra_size - waste_pre - size; if (waste_pre > 0) { ::munmap(addr, waste_pre);
*** 2334,2346 **** // Instead of mimicking the dangerous coding of the other platforms, here I // just ignore the request address (release) or assert(debug). assert0(requested_addr == NULL); // Always round to os::vm_page_size(), which may be larger than 4K. ! bytes = align_size_up(bytes, os::vm_page_size()); const size_t alignment_hint0 = ! alignment_hint ? align_size_up(alignment_hint, os::vm_page_size()) : 0; // In 4K mode always use mmap. // In 64K mode allocate small sizes with mmap, large ones with 64K shmatted. if (os::vm_page_size() == 4*K) { return reserve_mmaped_memory(bytes, requested_addr, alignment_hint); --- 2334,2346 ---- // Instead of mimicking the dangerous coding of the other platforms, here I // just ignore the request address (release) or assert(debug). assert0(requested_addr == NULL); // Always round to os::vm_page_size(), which may be larger than 4K. ! bytes = align_up(bytes, os::vm_page_size()); const size_t alignment_hint0 = ! alignment_hint ? align_up(alignment_hint, os::vm_page_size()) : 0; // In 4K mode always use mmap. // In 64K mode allocate small sizes with mmap, large ones with 64K shmatted. if (os::vm_page_size() == 4*K) { return reserve_mmaped_memory(bytes, requested_addr, alignment_hint);
*** 2358,2369 **** // Dynamically do different things for mmap/shmat. vmembk_t* const vmi = vmembk_find(addr); guarantee0(vmi); // Always round to os::vm_page_size(), which may be larger than 4K. ! size = align_size_up(size, os::vm_page_size()); ! addr = align_ptr_up(addr, os::vm_page_size()); bool rc = false; bool remove_bookkeeping = false; if (vmi->type == VMEM_SHMATED) { // For shmatted memory, we do: --- 2358,2369 ---- // Dynamically do different things for mmap/shmat. vmembk_t* const vmi = vmembk_find(addr); guarantee0(vmi); // Always round to os::vm_page_size(), which may be larger than 4K. ! size = align_up(size, os::vm_page_size()); ! addr = align_up(addr, os::vm_page_size()); bool rc = false; bool remove_bookkeeping = false; if (vmi->type == VMEM_SHMATED) { // For shmatted memory, we do:
*** 2525,2535 **** // available (and not reserved for something else). char* os::pd_attempt_reserve_memory_at(size_t bytes, char* requested_addr) { char* addr = NULL; // Always round to os::vm_page_size(), which may be larger than 4K. ! bytes = align_size_up(bytes, os::vm_page_size()); // In 4K mode always use mmap. // In 64K mode allocate small sizes with mmap, large ones with 64K shmatted. if (os::vm_page_size() == 4*K) { return reserve_mmaped_memory(bytes, requested_addr, 0); --- 2525,2535 ---- // available (and not reserved for something else). char* os::pd_attempt_reserve_memory_at(size_t bytes, char* requested_addr) { char* addr = NULL; // Always round to os::vm_page_size(), which may be larger than 4K. ! bytes = align_up(bytes, os::vm_page_size()); // In 4K mode always use mmap. // In 64K mode allocate small sizes with mmap, large ones with 64K shmatted. if (os::vm_page_size() == 4*K) { return reserve_mmaped_memory(bytes, requested_addr, 0);
*** 4310,4320 **** // Align the returned stack size such that the stack low address // is aligned to page size (Note: base is usually not and we do not care). // We need to do this because caller code will assume stack low address is // page aligned and will place guard pages without checking. address low = bounds.base - bounds.size; ! address low_aligned = (address)align_ptr_up(low, os::vm_page_size()); size_t s = bounds.base - low_aligned; return s; } extern char** environ; --- 4310,4320 ---- // Align the returned stack size such that the stack low address // is aligned to page size (Note: base is usually not and we do not care). // We need to do this because caller code will assume stack low address is // page aligned and will place guard pages without checking. address low = bounds.base - bounds.size; ! address low_aligned = (address)align_up(low, os::vm_page_size()); size_t s = bounds.base - low_aligned; return s; } extern char** environ;
< prev index next >