< prev index next >

src/hotspot/os/linux/os_linux.cpp

Print this page




3441 os::Linux::numa_max_node_func_t os::Linux::_numa_max_node;
3442 os::Linux::numa_num_configured_nodes_func_t os::Linux::_numa_num_configured_nodes;
3443 os::Linux::numa_available_func_t os::Linux::_numa_available;
3444 os::Linux::numa_tonode_memory_func_t os::Linux::_numa_tonode_memory;
3445 os::Linux::numa_interleave_memory_func_t os::Linux::_numa_interleave_memory;
3446 os::Linux::numa_interleave_memory_v2_func_t os::Linux::_numa_interleave_memory_v2;
3447 os::Linux::numa_set_bind_policy_func_t os::Linux::_numa_set_bind_policy;
3448 os::Linux::numa_bitmask_isbitset_func_t os::Linux::_numa_bitmask_isbitset;
3449 os::Linux::numa_distance_func_t os::Linux::_numa_distance;
3450 os::Linux::numa_get_membind_func_t os::Linux::_numa_get_membind;
3451 os::Linux::numa_get_interleave_mask_func_t os::Linux::_numa_get_interleave_mask;
3452 os::Linux::numa_move_pages_func_t os::Linux::_numa_move_pages;
3453 os::Linux::numa_set_preferred_func_t os::Linux::_numa_set_preferred;
3454 os::Linux::NumaAllocationPolicy os::Linux::_current_numa_policy;
3455 unsigned long* os::Linux::_numa_all_nodes;
3456 struct bitmask* os::Linux::_numa_all_nodes_ptr;
3457 struct bitmask* os::Linux::_numa_nodes_ptr;
3458 struct bitmask* os::Linux::_numa_interleave_bitmask;
3459 struct bitmask* os::Linux::_numa_membind_bitmask;
3460 
3461 bool os::pd_uncommit_memory(char* addr, size_t size) {
3462   uintptr_t res = (uintptr_t) ::mmap(addr, size, PROT_NONE,
3463                                      MAP_PRIVATE|MAP_FIXED|MAP_NORESERVE|MAP_ANONYMOUS, -1, 0);
3464   return res  != (uintptr_t) MAP_FAILED;
3465 }
3466 
3467 static address get_stack_commited_bottom(address bottom, size_t size) {
3468   address nbot = bottom;
3469   address ntop = bottom + size;
3470 
3471   size_t page_sz = os::vm_page_size();
3472   unsigned pages = size / page_sz;
3473 
3474   unsigned char vec[1];
3475   unsigned imin = 1, imax = pages + 1, imid;
3476   int mincore_return_value = 0;
3477 
3478   assert(imin <= imax, "Unexpected page size");
3479 
3480   while (imin < imax) {
3481     imid = (imax + imin) / 2;


3626       ::munmap((void*)stack_extent, (uintptr_t)(addr - stack_extent));
3627     }
3628   }
3629 
3630   return os::commit_memory(addr, size, !ExecMem);
3631 }
3632 
3633 // If this is a growable mapping, remove the guard pages entirely by
3634 // munmap()ping them.  If not, just call uncommit_memory(). This only
3635 // affects the main/primordial thread, but guard against future OS changes.
3636 // It's safe to always unmap guard pages for primordial thread because we
3637 // always place it right after end of the mapped region.
3638 
3639 bool os::remove_stack_guard_pages(char* addr, size_t size) {
3640   uintptr_t stack_extent, stack_base;
3641 
3642   if (os::is_primordial_thread()) {
3643     return ::munmap(addr, size) == 0;
3644   }
3645 
3646   return os::uncommit_memory(addr, size);
3647 }
3648 
3649 // If 'fixed' is true, anon_mmap() will attempt to reserve anonymous memory
3650 // at 'requested_addr'. If there are existing memory mappings at the same
3651 // location, however, they will be overwritten. If 'fixed' is false,
3652 // 'requested_addr' is only treated as a hint, the return value may or
3653 // may not start from the requested address. Unlike Linux mmap(), this
3654 // function returns NULL to indicate failure.
3655 static char* anon_mmap(char* requested_addr, size_t bytes, bool fixed) {
3656   char * addr;
3657   int flags;
3658 
3659   flags = MAP_PRIVATE | MAP_NORESERVE | MAP_ANONYMOUS;
3660   if (fixed) {
3661     assert((uintptr_t)requested_addr % os::Linux::page_size() == 0, "unaligned address");
3662     flags |= MAP_FIXED;
3663   }
3664 
3665   // Map reserved/uncommitted pages PROT_NONE so we fail early if we
3666   // touch an uncommitted page. Otherwise, the read/write might


3701       char* const start_aligned = align_up(start, alignment);
3702       char* const end_aligned = start_aligned + bytes;
3703       char* const end = start + extra_size;
3704       if (start_aligned > start) {
3705         ::munmap(start, start_aligned - start);
3706       }
3707       if (end_aligned < end) {
3708         ::munmap(end_aligned, end - end_aligned);
3709       }
3710       start = start_aligned;
3711     }
3712   }
3713   return start;
3714 }
3715 
3716 static int anon_munmap(char * addr, size_t size) {
3717   return ::munmap(addr, size) == 0;
3718 }
3719 
3720 char* os::pd_reserve_memory(size_t bytes, char* requested_addr,
3721                             size_t alignment_hint) {

3722   return anon_mmap(requested_addr, bytes, (requested_addr != NULL));
3723 }
3724 
3725 bool os::pd_release_memory(char* addr, size_t size) {
3726   return anon_munmap(addr, size);
3727 }
3728 
3729 #ifdef CAN_SHOW_REGISTERS_ON_ASSERT
3730 extern char* g_assert_poison; // assertion poison page address
3731 #endif
3732 
3733 static bool linux_mprotect(char* addr, size_t size, int prot) {
3734   // Linux wants the mprotect address argument to be page aligned.
3735   char* bottom = (char*)align_down((intptr_t)addr, os::Linux::page_size());
3736 
3737   // According to SUSv3, mprotect() should only be used with mappings
3738   // established by mmap(), and mmap() always maps whole pages. Unaligned
3739   // 'addr' likely indicates problem in the VM (e.g. trying to change
3740   // protection of malloc'ed or statically allocated memory). Check the
3741   // caller if you hit this assert.




3441 os::Linux::numa_max_node_func_t os::Linux::_numa_max_node;
3442 os::Linux::numa_num_configured_nodes_func_t os::Linux::_numa_num_configured_nodes;
3443 os::Linux::numa_available_func_t os::Linux::_numa_available;
3444 os::Linux::numa_tonode_memory_func_t os::Linux::_numa_tonode_memory;
3445 os::Linux::numa_interleave_memory_func_t os::Linux::_numa_interleave_memory;
3446 os::Linux::numa_interleave_memory_v2_func_t os::Linux::_numa_interleave_memory_v2;
3447 os::Linux::numa_set_bind_policy_func_t os::Linux::_numa_set_bind_policy;
3448 os::Linux::numa_bitmask_isbitset_func_t os::Linux::_numa_bitmask_isbitset;
3449 os::Linux::numa_distance_func_t os::Linux::_numa_distance;
3450 os::Linux::numa_get_membind_func_t os::Linux::_numa_get_membind;
3451 os::Linux::numa_get_interleave_mask_func_t os::Linux::_numa_get_interleave_mask;
3452 os::Linux::numa_move_pages_func_t os::Linux::_numa_move_pages;
3453 os::Linux::numa_set_preferred_func_t os::Linux::_numa_set_preferred;
3454 os::Linux::NumaAllocationPolicy os::Linux::_current_numa_policy;
3455 unsigned long* os::Linux::_numa_all_nodes;
3456 struct bitmask* os::Linux::_numa_all_nodes_ptr;
3457 struct bitmask* os::Linux::_numa_nodes_ptr;
3458 struct bitmask* os::Linux::_numa_interleave_bitmask;
3459 struct bitmask* os::Linux::_numa_membind_bitmask;
3460 
3461 bool os::pd_uncommit_memory(char* addr, size_t size, bool exec) {
3462   uintptr_t res = (uintptr_t) ::mmap(addr, size, PROT_NONE,
3463                                      MAP_PRIVATE|MAP_FIXED|MAP_NORESERVE|MAP_ANONYMOUS, -1, 0);
3464   return res  != (uintptr_t) MAP_FAILED;
3465 }
3466 
3467 static address get_stack_commited_bottom(address bottom, size_t size) {
3468   address nbot = bottom;
3469   address ntop = bottom + size;
3470 
3471   size_t page_sz = os::vm_page_size();
3472   unsigned pages = size / page_sz;
3473 
3474   unsigned char vec[1];
3475   unsigned imin = 1, imax = pages + 1, imid;
3476   int mincore_return_value = 0;
3477 
3478   assert(imin <= imax, "Unexpected page size");
3479 
3480   while (imin < imax) {
3481     imid = (imax + imin) / 2;


3626       ::munmap((void*)stack_extent, (uintptr_t)(addr - stack_extent));
3627     }
3628   }
3629 
3630   return os::commit_memory(addr, size, !ExecMem);
3631 }
3632 
3633 // If this is a growable mapping, remove the guard pages entirely by
3634 // munmap()ping them.  If not, just call uncommit_memory(). This only
3635 // affects the main/primordial thread, but guard against future OS changes.
3636 // It's safe to always unmap guard pages for primordial thread because we
3637 // always place it right after end of the mapped region.
3638 
3639 bool os::remove_stack_guard_pages(char* addr, size_t size) {
3640   uintptr_t stack_extent, stack_base;
3641 
3642   if (os::is_primordial_thread()) {
3643     return ::munmap(addr, size) == 0;
3644   }
3645 
3646   return os::uncommit_memory(addr, size, !ExecMem);
3647 }
3648 
3649 // If 'fixed' is true, anon_mmap() will attempt to reserve anonymous memory
3650 // at 'requested_addr'. If there are existing memory mappings at the same
3651 // location, however, they will be overwritten. If 'fixed' is false,
3652 // 'requested_addr' is only treated as a hint, the return value may or
3653 // may not start from the requested address. Unlike Linux mmap(), this
3654 // function returns NULL to indicate failure.
3655 static char* anon_mmap(char* requested_addr, size_t bytes, bool fixed) {
3656   char * addr;
3657   int flags;
3658 
3659   flags = MAP_PRIVATE | MAP_NORESERVE | MAP_ANONYMOUS;
3660   if (fixed) {
3661     assert((uintptr_t)requested_addr % os::Linux::page_size() == 0, "unaligned address");
3662     flags |= MAP_FIXED;
3663   }
3664 
3665   // Map reserved/uncommitted pages PROT_NONE so we fail early if we
3666   // touch an uncommitted page. Otherwise, the read/write might


3701       char* const start_aligned = align_up(start, alignment);
3702       char* const end_aligned = start_aligned + bytes;
3703       char* const end = start + extra_size;
3704       if (start_aligned > start) {
3705         ::munmap(start, start_aligned - start);
3706       }
3707       if (end_aligned < end) {
3708         ::munmap(end_aligned, end - end_aligned);
3709       }
3710       start = start_aligned;
3711     }
3712   }
3713   return start;
3714 }
3715 
3716 static int anon_munmap(char * addr, size_t size) {
3717   return ::munmap(addr, size) == 0;
3718 }
3719 
3720 char* os::pd_reserve_memory(size_t bytes, char* requested_addr,
3721                             size_t alignment_hint,
3722                             bool executable) {
3723   return anon_mmap(requested_addr, bytes, (requested_addr != NULL));
3724 }
3725 
3726 bool os::pd_release_memory(char* addr, size_t size) {
3727   return anon_munmap(addr, size);
3728 }
3729 
3730 #ifdef CAN_SHOW_REGISTERS_ON_ASSERT
3731 extern char* g_assert_poison; // assertion poison page address
3732 #endif
3733 
3734 static bool linux_mprotect(char* addr, size_t size, int prot) {
3735   // Linux wants the mprotect address argument to be page aligned.
3736   char* bottom = (char*)align_down((intptr_t)addr, os::Linux::page_size());
3737 
3738   // According to SUSv3, mprotect() should only be used with mappings
3739   // established by mmap(), and mmap() always maps whole pages. Unaligned
3740   // 'addr' likely indicates problem in the VM (e.g. trying to change
3741   // protection of malloc'ed or statically allocated memory). Check the
3742   // caller if you hit this assert.


< prev index next >