< prev index next >

src/os/linux/vm/os_linux.cpp

Print this page

        

*** 576,586 **** size_t size; volatile char *p; // Adjust bottom to point to the largest address within the same page, it // gives us a one-page buffer if alloca() allocates slightly more memory. ! bottom = (address)align_size_down((uintptr_t)bottom, os::Linux::page_size()); bottom += os::Linux::page_size() - 1; // sp might be slightly above current stack pointer; if that's the case, we // will alloca() a little more space than necessary, which is OK. Don't use // os::current_stack_pointer(), as its result can be slightly below current --- 576,586 ---- size_t size; volatile char *p; // Adjust bottom to point to the largest address within the same page, it // gives us a one-page buffer if alloca() allocates slightly more memory. ! bottom = (address)align_down((uintptr_t)bottom, os::Linux::page_size()); bottom += os::Linux::page_size() - 1; // sp might be slightly above current stack pointer; if that's the case, we // will alloca() a little more space than necessary, which is OK. Don't use // os::current_stack_pointer(), as its result can be slightly below current
*** 713,723 **** // of zero due to overflow. Don't add the guard page in that case. size_t guard_size = os::Linux::default_guard_size(thr_type); if (stack_size <= SIZE_MAX - guard_size) { stack_size += guard_size; } ! assert(is_size_aligned(stack_size, os::vm_page_size()), "stack_size not aligned"); int status = pthread_attr_setstacksize(&attr, stack_size); assert_status(status == 0, status, "pthread_attr_setstacksize"); // Configure glibc guard page. --- 713,723 ---- // of zero due to overflow. Don't add the guard page in that case. size_t guard_size = os::Linux::default_guard_size(thr_type); if (stack_size <= SIZE_MAX - guard_size) { stack_size += guard_size; } ! assert(is_aligned(stack_size, os::vm_page_size()), "stack_size not aligned"); int status = pthread_attr_setstacksize(&attr, stack_size); assert_status(status == 0, status, "pthread_attr_setstacksize"); // Configure glibc guard page.
*** 1099,1119 **** stack_top = stack_start; stack_size -= 16 * page_size(); } // stack_top could be partially down the page so align it ! stack_top = align_size_up(stack_top, page_size()); // Allowed stack value is minimum of max_size and what we derived from rlimit if (max_size > 0) { _initial_thread_stack_size = MIN2(max_size, stack_size); } else { // Accept the rlimit max, but if stack is unlimited then it will be huge, so // clamp it at 8MB as we do on Solaris _initial_thread_stack_size = MIN2(stack_size, 8*M); } ! _initial_thread_stack_size = align_size_down(_initial_thread_stack_size, page_size()); _initial_thread_stack_bottom = (address)stack_top - _initial_thread_stack_size; assert(_initial_thread_stack_bottom < (address)stack_top, "overflow!"); if (log_is_enabled(Info, os, thread)) { --- 1099,1119 ---- stack_top = stack_start; stack_size -= 16 * page_size(); } // stack_top could be partially down the page so align it ! stack_top = align_up(stack_top, page_size()); // Allowed stack value is minimum of max_size and what we derived from rlimit if (max_size > 0) { _initial_thread_stack_size = MIN2(max_size, stack_size); } else { // Accept the rlimit max, but if stack is unlimited then it will be huge, so // clamp it at 8MB as we do on Solaris _initial_thread_stack_size = MIN2(stack_size, 8*M); } ! _initial_thread_stack_size = align_down(_initial_thread_stack_size, page_size()); _initial_thread_stack_bottom = (address)stack_top - _initial_thread_stack_size; assert(_initial_thread_stack_bottom < (address)stack_top, "overflow!"); if (log_is_enabled(Info, os, thread)) {
*** 3168,3178 **** if (start != req_addr) { ::munmap(start, extra_size); start = NULL; } } else { ! char* const start_aligned = align_ptr_up(start, alignment); char* const end_aligned = start_aligned + bytes; char* const end = start + extra_size; if (start_aligned > start) { ::munmap(start, start_aligned - start); } --- 3168,3178 ---- if (start != req_addr) { ::munmap(start, extra_size); start = NULL; } } else { ! char* const start_aligned = align_up(start, alignment); char* const end_aligned = start_aligned + bytes; char* const end = start + extra_size; if (start_aligned > start) { ::munmap(start, start_aligned - start); }
*** 3198,3217 **** return anon_munmap(addr, size); } static bool linux_mprotect(char* addr, size_t size, int prot) { // Linux wants the mprotect address argument to be page aligned. ! char* bottom = (char*)align_size_down((intptr_t)addr, os::Linux::page_size()); // According to SUSv3, mprotect() should only be used with mappings // established by mmap(), and mmap() always maps whole pages. Unaligned // 'addr' likely indicates problem in the VM (e.g. trying to change // protection of malloc'ed or statically allocated memory). Check the // caller if you hit this assert. assert(addr == bottom, "sanity check"); ! size = align_size_up(pointer_delta(addr, bottom, 1) + size, os::Linux::page_size()); return ::mprotect(bottom, size, prot) == 0; } // Set protections specified bool os::protect_memory(char* addr, size_t bytes, ProtType prot, --- 3198,3217 ---- return anon_munmap(addr, size); } static bool linux_mprotect(char* addr, size_t size, int prot) { // Linux wants the mprotect address argument to be page aligned. ! char* bottom = (char*)align_down((intptr_t)addr, os::Linux::page_size()); // According to SUSv3, mprotect() should only be used with mappings // established by mmap(), and mmap() always maps whole pages. Unaligned // 'addr' likely indicates problem in the VM (e.g. trying to change // protection of malloc'ed or statically allocated memory). Check the // caller if you hit this assert. assert(addr == bottom, "sanity check"); ! size = align_up(pointer_delta(addr, bottom, 1) + size, os::Linux::page_size()); return ::mprotect(bottom, size, prot) == 0; } // Set protections specified bool os::protect_memory(char* addr, size_t bytes, ProtType prot,
*** 3242,3252 **** bool result = false; void *p = mmap(NULL, page_size * 2, PROT_READ|PROT_WRITE, MAP_ANONYMOUS|MAP_PRIVATE, -1, 0); if (p != MAP_FAILED) { ! void *aligned_p = align_ptr_up(p, page_size); result = madvise(aligned_p, page_size, MADV_HUGEPAGE) == 0; munmap(p, page_size * 2); } --- 3242,3252 ---- bool result = false; void *p = mmap(NULL, page_size * 2, PROT_READ|PROT_WRITE, MAP_ANONYMOUS|MAP_PRIVATE, -1, 0); if (p != MAP_FAILED) { ! void *aligned_p = align_up(p, page_size); result = madvise(aligned_p, page_size, MADV_HUGEPAGE) == 0; munmap(p, page_size * 2); }
*** 3485,3497 **** int err = errno; \ shm_warning_format(str " (error = %d)", err); \ } while (0) static char* shmat_with_alignment(int shmid, size_t bytes, size_t alignment) { ! assert(is_size_aligned(bytes, alignment), "Must be divisible by the alignment"); ! if (!is_size_aligned(alignment, SHMLBA)) { assert(false, "Code below assumes that alignment is at least SHMLBA aligned"); return NULL; } // To ensure that we get 'alignment' aligned memory from shmat, --- 3485,3497 ---- int err = errno; \ shm_warning_format(str " (error = %d)", err); \ } while (0) static char* shmat_with_alignment(int shmid, size_t bytes, size_t alignment) { ! assert(is_aligned(bytes, alignment), "Must be divisible by the alignment"); ! if (!is_aligned(alignment, SHMLBA)) { assert(false, "Code below assumes that alignment is at least SHMLBA aligned"); return NULL; } // To ensure that we get 'alignment' aligned memory from shmat,
*** 3523,3533 **** return addr; } static char* shmat_at_address(int shmid, char* req_addr) { ! if (!is_ptr_aligned(req_addr, SHMLBA)) { assert(false, "Requested address needs to be SHMLBA aligned"); return NULL; } char* addr = (char*)shmat(shmid, req_addr, 0); --- 3523,3533 ---- return addr; } static char* shmat_at_address(int shmid, char* req_addr) { ! if (!is_aligned(req_addr, SHMLBA)) { assert(false, "Requested address needs to be SHMLBA aligned"); return NULL; } char* addr = (char*)shmat(shmid, req_addr, 0);
*** 3541,3561 **** } static char* shmat_large_pages(int shmid, size_t bytes, size_t alignment, char* req_addr) { // If a req_addr has been provided, we assume that the caller has already aligned the address. if (req_addr != NULL) { ! assert(is_ptr_aligned(req_addr, os::large_page_size()), "Must be divisible by the large page size"); ! assert(is_ptr_aligned(req_addr, alignment), "Must be divisible by given alignment"); return shmat_at_address(shmid, req_addr); } // Since shmid has been setup with SHM_HUGETLB, shmat will automatically // return large page size aligned memory addresses when req_addr == NULL. // However, if the alignment is larger than the large page size, we have // to manually ensure that the memory returned is 'alignment' aligned. if (alignment > os::large_page_size()) { ! assert(is_size_aligned(alignment, os::large_page_size()), "Must be divisible by the large page size"); return shmat_with_alignment(shmid, bytes, alignment); } else { return shmat_at_address(shmid, NULL); } } --- 3541,3561 ---- } static char* shmat_large_pages(int shmid, size_t bytes, size_t alignment, char* req_addr) { // If a req_addr has been provided, we assume that the caller has already aligned the address. if (req_addr != NULL) { ! assert(is_aligned(req_addr, os::large_page_size()), "Must be divisible by the large page size"); ! assert(is_aligned(req_addr, alignment), "Must be divisible by given alignment"); return shmat_at_address(shmid, req_addr); } // Since shmid has been setup with SHM_HUGETLB, shmat will automatically // return large page size aligned memory addresses when req_addr == NULL. // However, if the alignment is larger than the large page size, we have // to manually ensure that the memory returned is 'alignment' aligned. if (alignment > os::large_page_size()) { ! assert(is_aligned(alignment, os::large_page_size()), "Must be divisible by the large page size"); return shmat_with_alignment(shmid, bytes, alignment); } else { return shmat_at_address(shmid, NULL); } }
*** 3563,3576 **** char* os::Linux::reserve_memory_special_shm(size_t bytes, size_t alignment, char* req_addr, bool exec) { // "exec" is passed in but not used. Creating the shared image for // the code cache doesn't have an SHM_X executable permission to check. assert(UseLargePages && UseSHM, "only for SHM large pages"); ! assert(is_ptr_aligned(req_addr, os::large_page_size()), "Unaligned address"); ! assert(is_ptr_aligned(req_addr, alignment), "Unaligned address"); ! if (!is_size_aligned(bytes, os::large_page_size())) { return NULL; // Fallback to small pages. } // Create a large shared memory region to attach to based on size. // Currently, size is the total size of the heap. --- 3563,3576 ---- char* os::Linux::reserve_memory_special_shm(size_t bytes, size_t alignment, char* req_addr, bool exec) { // "exec" is passed in but not used. Creating the shared image for // the code cache doesn't have an SHM_X executable permission to check. assert(UseLargePages && UseSHM, "only for SHM large pages"); ! assert(is_aligned(req_addr, os::large_page_size()), "Unaligned address"); ! assert(is_aligned(req_addr, alignment), "Unaligned address"); ! if (!is_aligned(bytes, os::large_page_size())) { return NULL; // Fallback to small pages. } // Create a large shared memory region to attach to based on size. // Currently, size is the total size of the heap.
*** 3625,3636 **** char* os::Linux::reserve_memory_special_huge_tlbfs_only(size_t bytes, char* req_addr, bool exec) { assert(UseLargePages && UseHugeTLBFS, "only for Huge TLBFS large pages"); ! assert(is_size_aligned(bytes, os::large_page_size()), "Unaligned size"); ! assert(is_ptr_aligned(req_addr, os::large_page_size()), "Unaligned address"); int prot = exec ? PROT_READ|PROT_WRITE|PROT_EXEC : PROT_READ|PROT_WRITE; char* addr = (char*)::mmap(req_addr, bytes, prot, MAP_PRIVATE|MAP_ANONYMOUS|MAP_HUGETLB, -1, 0); --- 3625,3636 ---- char* os::Linux::reserve_memory_special_huge_tlbfs_only(size_t bytes, char* req_addr, bool exec) { assert(UseLargePages && UseHugeTLBFS, "only for Huge TLBFS large pages"); ! assert(is_aligned(bytes, os::large_page_size()), "Unaligned size"); ! assert(is_aligned(req_addr, os::large_page_size()), "Unaligned address"); int prot = exec ? PROT_READ|PROT_WRITE|PROT_EXEC : PROT_READ|PROT_WRITE; char* addr = (char*)::mmap(req_addr, bytes, prot, MAP_PRIVATE|MAP_ANONYMOUS|MAP_HUGETLB, -1, 0);
*** 3638,3648 **** if (addr == MAP_FAILED) { warn_on_large_pages_failure(req_addr, bytes, errno); return NULL; } ! assert(is_ptr_aligned(addr, os::large_page_size()), "Must be"); return addr; } // Reserve memory using mmap(MAP_HUGETLB). --- 3638,3648 ---- if (addr == MAP_FAILED) { warn_on_large_pages_failure(req_addr, bytes, errno); return NULL; } ! assert(is_aligned(addr, os::large_page_size()), "Must be"); return addr; } // Reserve memory using mmap(MAP_HUGETLB).
*** 3657,3687 **** char* req_addr, bool exec) { size_t large_page_size = os::large_page_size(); assert(bytes >= large_page_size, "Shouldn't allocate large pages for small sizes"); ! assert(is_ptr_aligned(req_addr, alignment), "Must be"); ! assert(is_size_aligned(bytes, alignment), "Must be"); // First reserve - but not commit - the address range in small pages. char* const start = anon_mmap_aligned(bytes, alignment, req_addr); if (start == NULL) { return NULL; } ! assert(is_ptr_aligned(start, alignment), "Must be"); char* end = start + bytes; // Find the regions of the allocated chunk that can be promoted to large pages. ! char* lp_start = align_ptr_up(start, large_page_size); ! char* lp_end = align_ptr_down(end, large_page_size); size_t lp_bytes = lp_end - lp_start; ! assert(is_size_aligned(lp_bytes, large_page_size), "Must be"); if (lp_bytes == 0) { // The mapped region doesn't even span the start and the end of a large page. // Fall back to allocate a non-special area. ::munmap(start, end - start); --- 3657,3687 ---- char* req_addr, bool exec) { size_t large_page_size = os::large_page_size(); assert(bytes >= large_page_size, "Shouldn't allocate large pages for small sizes"); ! assert(is_aligned(req_addr, alignment), "Must be"); ! assert(is_aligned(bytes, alignment), "Must be"); // First reserve - but not commit - the address range in small pages. char* const start = anon_mmap_aligned(bytes, alignment, req_addr); if (start == NULL) { return NULL; } ! assert(is_aligned(start, alignment), "Must be"); char* end = start + bytes; // Find the regions of the allocated chunk that can be promoted to large pages. ! char* lp_start = align_up(start, large_page_size); ! char* lp_end = align_down(end, large_page_size); size_t lp_bytes = lp_end - lp_start; ! assert(is_aligned(lp_bytes, large_page_size), "Must be"); if (lp_bytes == 0) { // The mapped region doesn't even span the start and the end of a large page. // Fall back to allocate a non-special area. ::munmap(start, end - start);
*** 3738,3753 **** char* os::Linux::reserve_memory_special_huge_tlbfs(size_t bytes, size_t alignment, char* req_addr, bool exec) { assert(UseLargePages && UseHugeTLBFS, "only for Huge TLBFS large pages"); ! assert(is_ptr_aligned(req_addr, alignment), "Must be"); ! assert(is_size_aligned(alignment, os::vm_allocation_granularity()), "Must be"); assert(is_power_of_2(os::large_page_size()), "Must be"); assert(bytes >= os::large_page_size(), "Shouldn't allocate large pages for small sizes"); ! if (is_size_aligned(bytes, os::large_page_size()) && alignment <= os::large_page_size()) { return reserve_memory_special_huge_tlbfs_only(bytes, req_addr, exec); } else { return reserve_memory_special_huge_tlbfs_mixed(bytes, alignment, req_addr, exec); } } --- 3738,3753 ---- char* os::Linux::reserve_memory_special_huge_tlbfs(size_t bytes, size_t alignment, char* req_addr, bool exec) { assert(UseLargePages && UseHugeTLBFS, "only for Huge TLBFS large pages"); ! assert(is_aligned(req_addr, alignment), "Must be"); ! assert(is_aligned(alignment, os::vm_allocation_granularity()), "Must be"); assert(is_power_of_2(os::large_page_size()), "Must be"); assert(bytes >= os::large_page_size(), "Shouldn't allocate large pages for small sizes"); ! if (is_aligned(bytes, os::large_page_size()) && alignment <= os::large_page_size()) { return reserve_memory_special_huge_tlbfs_only(bytes, req_addr, exec); } else { return reserve_memory_special_huge_tlbfs_mixed(bytes, alignment, req_addr, exec); } }
*** 5965,5980 **** test_log("%s, req_addr NULL:", __FUNCTION__); test_log("size align result"); for (int i = 0; i < num_sizes; i++) { const size_t size = sizes[i]; ! for (size_t alignment = ag; is_size_aligned(size, alignment); alignment *= 2) { char* p = os::Linux::reserve_memory_special_huge_tlbfs_mixed(size, alignment, NULL, false); test_log(SIZE_FORMAT_HEX " " SIZE_FORMAT_HEX " -> " PTR_FORMAT " %s", size, alignment, p2i(p), (p != NULL ? "" : "(failed)")); if (p != NULL) { ! assert(is_ptr_aligned(p, alignment), "must be"); small_page_write(p, size); os::Linux::release_memory_special_huge_tlbfs(p, size); } } } --- 5965,5980 ---- test_log("%s, req_addr NULL:", __FUNCTION__); test_log("size align result"); for (int i = 0; i < num_sizes; i++) { const size_t size = sizes[i]; ! for (size_t alignment = ag; is_aligned(size, alignment); alignment *= 2) { char* p = os::Linux::reserve_memory_special_huge_tlbfs_mixed(size, alignment, NULL, false); test_log(SIZE_FORMAT_HEX " " SIZE_FORMAT_HEX " -> " PTR_FORMAT " %s", size, alignment, p2i(p), (p != NULL ? "" : "(failed)")); if (p != NULL) { ! assert(is_aligned(p, alignment), "must be"); small_page_write(p, size); os::Linux::release_memory_special_huge_tlbfs(p, size); } } }
*** 5983,5994 **** test_log("%s, req_addr non-NULL:", __FUNCTION__); test_log("size align req_addr result"); for (int i = 0; i < num_sizes; i++) { const size_t size = sizes[i]; ! for (size_t alignment = ag; is_size_aligned(size, alignment); alignment *= 2) { ! char* const req_addr = align_ptr_up(mapping1, alignment); char* p = os::Linux::reserve_memory_special_huge_tlbfs_mixed(size, alignment, req_addr, false); test_log(SIZE_FORMAT_HEX " " SIZE_FORMAT_HEX " " PTR_FORMAT " -> " PTR_FORMAT " %s", size, alignment, p2i(req_addr), p2i(p), ((p != NULL ? (p == req_addr ? "(exact match)" : "") : "(failed)"))); if (p != NULL) { --- 5983,5994 ---- test_log("%s, req_addr non-NULL:", __FUNCTION__); test_log("size align req_addr result"); for (int i = 0; i < num_sizes; i++) { const size_t size = sizes[i]; ! for (size_t alignment = ag; is_aligned(size, alignment); alignment *= 2) { ! char* const req_addr = align_up(mapping1, alignment); char* p = os::Linux::reserve_memory_special_huge_tlbfs_mixed(size, alignment, req_addr, false); test_log(SIZE_FORMAT_HEX " " SIZE_FORMAT_HEX " " PTR_FORMAT " -> " PTR_FORMAT " %s", size, alignment, p2i(req_addr), p2i(p), ((p != NULL ? (p == req_addr ? "(exact match)" : "") : "(failed)"))); if (p != NULL) {
*** 6003,6014 **** test_log("%s, req_addr non-NULL with preexisting mapping:", __FUNCTION__); test_log("size align req_addr result"); for (int i = 0; i < num_sizes; i++) { const size_t size = sizes[i]; ! for (size_t alignment = ag; is_size_aligned(size, alignment); alignment *= 2) { ! char* const req_addr = align_ptr_up(mapping2, alignment); char* p = os::Linux::reserve_memory_special_huge_tlbfs_mixed(size, alignment, req_addr, false); test_log(SIZE_FORMAT_HEX " " SIZE_FORMAT_HEX " " PTR_FORMAT " -> " PTR_FORMAT " %s", size, alignment, p2i(req_addr), p2i(p), ((p != NULL ? "" : "(failed)"))); // as the area around req_addr contains already existing mappings, the API should always // return NULL (as per contract, it cannot return another address) --- 6003,6014 ---- test_log("%s, req_addr non-NULL with preexisting mapping:", __FUNCTION__); test_log("size align req_addr result"); for (int i = 0; i < num_sizes; i++) { const size_t size = sizes[i]; ! for (size_t alignment = ag; is_aligned(size, alignment); alignment *= 2) { ! char* const req_addr = align_up(mapping2, alignment); char* p = os::Linux::reserve_memory_special_huge_tlbfs_mixed(size, alignment, req_addr, false); test_log(SIZE_FORMAT_HEX " " SIZE_FORMAT_HEX " " PTR_FORMAT " -> " PTR_FORMAT " %s", size, alignment, p2i(req_addr), p2i(p), ((p != NULL ? "" : "(failed)"))); // as the area around req_addr contains already existing mappings, the API should always // return NULL (as per contract, it cannot return another address)
*** 6037,6048 **** test_log("test_reserve_memory_special_shm(" SIZE_FORMAT ", " SIZE_FORMAT ")", size, alignment); char* addr = os::Linux::reserve_memory_special_shm(size, alignment, NULL, false); if (addr != NULL) { ! assert(is_ptr_aligned(addr, alignment), "Check"); ! assert(is_ptr_aligned(addr, os::large_page_size()), "Check"); small_page_write(addr, size); os::Linux::release_memory_special_shm(addr, size); } --- 6037,6048 ---- test_log("test_reserve_memory_special_shm(" SIZE_FORMAT ", " SIZE_FORMAT ")", size, alignment); char* addr = os::Linux::reserve_memory_special_shm(size, alignment, NULL, false); if (addr != NULL) { ! assert(is_aligned(addr, alignment), "Check"); ! assert(is_aligned(addr, os::large_page_size()), "Check"); small_page_write(addr, size); os::Linux::release_memory_special_shm(addr, size); }
*** 6051,6061 **** static void test_reserve_memory_special_shm() { size_t lp = os::large_page_size(); size_t ag = os::vm_allocation_granularity(); for (size_t size = ag; size < lp * 3; size += ag) { ! for (size_t alignment = ag; is_size_aligned(size, alignment); alignment *= 2) { test_reserve_memory_special_shm(size, alignment); } } } --- 6051,6061 ---- static void test_reserve_memory_special_shm() { size_t lp = os::large_page_size(); size_t ag = os::vm_allocation_granularity(); for (size_t size = ag; size < lp * 3; size += ag) { ! for (size_t alignment = ag; is_aligned(size, alignment); alignment *= 2) { test_reserve_memory_special_shm(size, alignment); } } }
< prev index next >