1890
1891 // Solaris allocates memory by pages.
1892 int os::vm_allocation_granularity() {
1893 assert(os::Bsd::page_size() != -1, "must call os::init");
1894 return os::Bsd::page_size();
1895 }
1896
1897 static void warn_fail_commit_memory(char* addr, size_t size, bool exec,
1898 int err) {
1899 warning("INFO: os::commit_memory(" INTPTR_FORMAT ", " SIZE_FORMAT
1900 ", %d) failed; error='%s' (errno=%d)", (intptr_t)addr, size, exec,
1901 os::errno_name(err), err);
1902 }
1903
1904 // NOTE: Bsd kernel does not really reserve the pages for us.
1905 // All it does is to check if there are enough free pages
1906 // left at the time of mmap(). This could be a potential
1907 // problem.
1908 bool os::pd_commit_memory(char* addr, size_t size, bool exec) {
1909 int prot = exec ? PROT_READ|PROT_WRITE|PROT_EXEC : PROT_READ|PROT_WRITE;
1910 #ifdef __OpenBSD__
1911 // XXX: Work-around mmap/MAP_FIXED bug temporarily on OpenBSD
1912 Events::log(NULL, "Protecting memory [" INTPTR_FORMAT "," INTPTR_FORMAT "] with protection modes %x", p2i(addr), p2i(addr+size), prot);
1913 if (::mprotect(addr, size, prot) == 0) {
1914 return true;
1915 }
1916 #else
1917 uintptr_t res = (uintptr_t) ::mmap(addr, size, prot,
1918 MAP_PRIVATE|MAP_FIXED|MAP_ANONYMOUS, -1, 0);
1919 if (res != (uintptr_t) MAP_FAILED) {
1920 return true;
1921 }
1922 #endif
1923
1924 // Warn about any commit errors we see in non-product builds just
1925 // in case mmap() doesn't work as described on the man page.
1926 NOT_PRODUCT(warn_fail_commit_memory(addr, size, exec, errno);)
1927
1928 return false;
1929 }
1930
1931 bool os::pd_commit_memory(char* addr, size_t size, size_t alignment_hint,
1932 bool exec) {
1933 // alignment_hint is ignored on this OS
1934 return pd_commit_memory(addr, size, exec);
1935 }
1978 if (size > 0) {
1979 ids[0] = 0;
1980 return 1;
1981 }
1982 return 0;
1983 }
1984
1985 int os::numa_get_group_id_for_address(const void* address) {
1986 return 0;
1987 }
1988
1989 bool os::get_page_info(char *start, page_info* info) {
1990 return false;
1991 }
1992
1993 char *os::scan_pages(char *start, char* end, page_info* page_expected, page_info* page_found) {
1994 return end;
1995 }
1996
1997
1998 bool os::pd_uncommit_memory(char* addr, size_t size) {
1999 #ifdef __OpenBSD__
2000 // XXX: Work-around mmap/MAP_FIXED bug temporarily on OpenBSD
2001 Events::log(NULL, "Protecting memory [" INTPTR_FORMAT "," INTPTR_FORMAT "] with PROT_NONE", p2i(addr), p2i(addr+size));
2002 return ::mprotect(addr, size, PROT_NONE) == 0;
2003 #else
2004 uintptr_t res = (uintptr_t) ::mmap(addr, size, PROT_NONE,
2005 MAP_PRIVATE|MAP_FIXED|MAP_NORESERVE|MAP_ANONYMOUS, -1, 0);
2006 return res != (uintptr_t) MAP_FAILED;
2007 #endif
2008 }
2009
2010 bool os::pd_create_stack_guard_pages(char* addr, size_t size) {
2011 return os::commit_memory(addr, size, !ExecMem);
2012 }
2013
2014 // If this is a growable mapping, remove the guard pages entirely by
2015 // munmap()ping them. If not, just call uncommit_memory().
2016 bool os::remove_stack_guard_pages(char* addr, size_t size) {
2017 return os::uncommit_memory(addr, size);
2018 }
2019
2020 // If 'fixed' is true, anon_mmap() will attempt to reserve anonymous memory
2021 // at 'requested_addr'. If there are existing memory mappings at the same
2022 // location, however, they will be overwritten. If 'fixed' is false,
2023 // 'requested_addr' is only treated as a hint, the return value may or
2024 // may not start from the requested address. Unlike Bsd mmap(), this
2025 // function returns NULL to indicate failure.
2026 static char* anon_mmap(char* requested_addr, size_t bytes, bool fixed) {
2027 char * addr;
2028 int flags;
2029
2030 flags = MAP_PRIVATE | MAP_NORESERVE | MAP_ANONYMOUS;
2031 if (fixed) {
2032 assert((uintptr_t)requested_addr % os::Bsd::page_size() == 0, "unaligned address");
2033 flags |= MAP_FIXED;
2034 }
2035
2036 // Map reserved/uncommitted pages PROT_NONE so we fail early if we
2037 // touch an uncommitted page. Otherwise, the read/write might
2038 // succeed if we have enough swap space to back the physical page.
2039 addr = (char*)::mmap(requested_addr, bytes, PROT_NONE,
2040 flags, -1, 0);
2041
2042 return addr == MAP_FAILED ? NULL : addr;
2043 }
2044
2045 static int anon_munmap(char * addr, size_t size) {
2046 return ::munmap(addr, size) == 0;
2047 }
2048
2049 char* os::pd_reserve_memory(size_t bytes, char* requested_addr,
2050 size_t alignment_hint) {
2051 return anon_mmap(requested_addr, bytes, (requested_addr != NULL));
2052 }
2053
2054 bool os::pd_release_memory(char* addr, size_t size) {
2055 return anon_munmap(addr, size);
2056 }
2057
2058 static bool bsd_mprotect(char* addr, size_t size, int prot) {
2059 // Bsd wants the mprotect address argument to be page aligned.
2060 char* bottom = (char*)align_down((intptr_t)addr, os::Bsd::page_size());
2061
2062 // According to SUSv3, mprotect() should only be used with mappings
2063 // established by mmap(), and mmap() always maps whole pages. Unaligned
2064 // 'addr' likely indicates problem in the VM (e.g. trying to change
2065 // protection of malloc'ed or statically allocated memory). Check the
2066 // caller if you hit this assert.
2067 assert(addr == bottom, "sanity check");
2068
2069 size = align_up(pointer_delta(addr, bottom, 1) + size, os::Bsd::page_size());
2070 Events::log(NULL, "Protecting memory [" INTPTR_FORMAT "," INTPTR_FORMAT "] with protection modes %x", p2i(bottom), p2i(bottom+size), prot);
2071 return ::mprotect(bottom, size, prot) == 0;
2141 }
2142 return result;
2143 }
2144
2145 // Reserve memory at an arbitrary address, only if that area is
2146 // available (and not reserved for something else).
2147
2148 char* os::pd_attempt_reserve_memory_at(size_t bytes, char* requested_addr) {
2149 // Assert only that the size is a multiple of the page size, since
2150 // that's all that mmap requires, and since that's all we really know
2151 // about at this low abstraction level. If we need higher alignment,
2152 // we can either pass an alignment to this method or verify alignment
2153 // in one of the methods further up the call chain. See bug 5044738.
2154 assert(bytes % os::vm_page_size() == 0, "reserving unexpected size block");
2155
2156 // Repeatedly allocate blocks until the block is allocated at the
2157 // right spot.
2158
2159 // Bsd mmap allows caller to pass an address as hint; give it a try first,
2160 // if kernel honors the hint then we can return immediately.
2161 char * addr = anon_mmap(requested_addr, bytes, false);
2162 if (addr == requested_addr) {
2163 return requested_addr;
2164 }
2165
2166 if (addr != NULL) {
2167 // mmap() is successful but it fails to reserve at the requested address
2168 anon_munmap(addr, bytes);
2169 }
2170
2171 return NULL;
2172 }
2173
2174 // Sleep forever; naked call to OS-specific sleep; use with CAUTION
2175 void os::infinite_sleep() {
2176 while (true) { // sleep forever ...
2177 ::sleep(100); // ... 100 seconds at a time
2178 }
2179 }
2180
2181 // Used to convert frequent JVM_Yield() to nops
|
1890
1891 // Solaris allocates memory by pages.
1892 int os::vm_allocation_granularity() {
1893 assert(os::Bsd::page_size() != -1, "must call os::init");
1894 return os::Bsd::page_size();
1895 }
1896
1897 static void warn_fail_commit_memory(char* addr, size_t size, bool exec,
1898 int err) {
1899 warning("INFO: os::commit_memory(" INTPTR_FORMAT ", " SIZE_FORMAT
1900 ", %d) failed; error='%s' (errno=%d)", (intptr_t)addr, size, exec,
1901 os::errno_name(err), err);
1902 }
1903
1904 // NOTE: Bsd kernel does not really reserve the pages for us.
1905 // All it does is to check if there are enough free pages
1906 // left at the time of mmap(). This could be a potential
1907 // problem.
1908 bool os::pd_commit_memory(char* addr, size_t size, bool exec) {
1909 int prot = exec ? PROT_READ|PROT_WRITE|PROT_EXEC : PROT_READ|PROT_WRITE;
1910 #if defined(__OpenBSD__)
1911 // XXX: Work-around mmap/MAP_FIXED bug temporarily on OpenBSD
1912 Events::log(NULL, "Protecting memory [" INTPTR_FORMAT "," INTPTR_FORMAT "] with protection modes %x", p2i(addr), p2i(addr+size), prot);
1913 if (::mprotect(addr, size, prot) == 0) {
1914 return true;
1915 }
1916 #elif defined(__APPLE__)
1917 if (::mprotect(addr, size, prot) == 0) {
1918 return true;
1919 }
1920 #else
1921 uintptr_t res = (uintptr_t) ::mmap(addr, size, prot,
1922 MAP_PRIVATE|MAP_FIXED|MAP_ANONYMOUS, -1, 0);
1923 if (res != (uintptr_t) MAP_FAILED) {
1924 return true;
1925 }
1926 #endif
1927
1928 // Warn about any commit errors we see in non-product builds just
1929 // in case mmap() doesn't work as described on the man page.
1930 NOT_PRODUCT(warn_fail_commit_memory(addr, size, exec, errno);)
1931
1932 return false;
1933 }
1934
1935 bool os::pd_commit_memory(char* addr, size_t size, size_t alignment_hint,
1936 bool exec) {
1937 // alignment_hint is ignored on this OS
1938 return pd_commit_memory(addr, size, exec);
1939 }
1982 if (size > 0) {
1983 ids[0] = 0;
1984 return 1;
1985 }
1986 return 0;
1987 }
1988
1989 int os::numa_get_group_id_for_address(const void* address) {
1990 return 0;
1991 }
1992
1993 bool os::get_page_info(char *start, page_info* info) {
1994 return false;
1995 }
1996
1997 char *os::scan_pages(char *start, char* end, page_info* page_expected, page_info* page_found) {
1998 return end;
1999 }
2000
2001
2002 bool os::pd_uncommit_memory(char* addr, size_t size, bool exec) {
2003 #if defined(__OpenBSD__)
2004 // XXX: Work-around mmap/MAP_FIXED bug temporarily on OpenBSD
2005 Events::log(NULL, "Protecting memory [" INTPTR_FORMAT "," INTPTR_FORMAT "] with PROT_NONE", p2i(addr), p2i(addr+size));
2006 return ::mprotect(addr, size, PROT_NONE) == 0;
2007 #elif defined(__APPLE__)
2008 if (exec) {
2009 if (::madvise(addr, size, MADV_FREE) != 0) {
2010 return false;
2011 }
2012 return ::mprotect(addr, size, PROT_NONE) == 0;
2013 } else {
2014 uintptr_t res = (uintptr_t) ::mmap(addr, size, PROT_NONE,
2015 MAP_PRIVATE|MAP_FIXED|MAP_NORESERVE|MAP_ANONYMOUS, -1, 0);
2016 return res != (uintptr_t) MAP_FAILED;
2017 }
2018 #else
2019 uintptr_t res = (uintptr_t) ::mmap(addr, size, PROT_NONE,
2020 MAP_PRIVATE|MAP_FIXED|MAP_NORESERVE|MAP_ANONYMOUS, -1, 0);
2021 return res != (uintptr_t) MAP_FAILED;
2022 #endif
2023 }
2024
2025 bool os::pd_create_stack_guard_pages(char* addr, size_t size) {
2026 return os::commit_memory(addr, size, !ExecMem);
2027 }
2028
2029 // If this is a growable mapping, remove the guard pages entirely by
2030 // munmap()ping them. If not, just call uncommit_memory().
2031 bool os::remove_stack_guard_pages(char* addr, size_t size) {
2032 return os::uncommit_memory(addr, size, !ExecMem);
2033 }
2034
2035 // If 'fixed' is true, anon_mmap() will attempt to reserve anonymous memory
2036 // at 'requested_addr'. If there are existing memory mappings at the same
2037 // location, however, they will be overwritten. If 'fixed' is false,
2038 // 'requested_addr' is only treated as a hint, the return value may or
2039 // may not start from the requested address. Unlike Bsd mmap(), this
2040 // function returns NULL to indicate failure.
2041 static char* anon_mmap(char* requested_addr, size_t bytes, bool fixed, bool executable) {
2042 char * addr;
2043 int flags;
2044
2045 flags = MAP_PRIVATE | MAP_NORESERVE | MAP_ANONYMOUS;
2046 #ifdef __APPLE__
2047 if (executable) {
2048 guarantee(!fixed, "MAP_JIT (for execute) is incompatible with MAP_FIXED");
2049 flags |= MAP_JIT;
2050 }
2051 #endif
2052 if (fixed) {
2053 assert((uintptr_t)requested_addr % os::Bsd::page_size() == 0, "unaligned address");
2054 flags |= MAP_FIXED;
2055 }
2056
2057 // Map reserved/uncommitted pages PROT_NONE so we fail early if we
2058 // touch an uncommitted page. Otherwise, the read/write might
2059 // succeed if we have enough swap space to back the physical page.
2060 addr = (char*)::mmap(requested_addr, bytes, PROT_NONE,
2061 flags, -1, 0);
2062
2063 return addr == MAP_FAILED ? NULL : addr;
2064 }
2065
2066 static int anon_munmap(char * addr, size_t size) {
2067 return ::munmap(addr, size) == 0;
2068 }
2069
2070 char* os::pd_reserve_memory(size_t bytes, char* requested_addr,
2071 size_t alignment_hint,
2072 bool executable) {
2073 return anon_mmap(requested_addr, bytes, (requested_addr != NULL), executable);
2074 }
2075
2076 bool os::pd_release_memory(char* addr, size_t size) {
2077 return anon_munmap(addr, size);
2078 }
2079
2080 static bool bsd_mprotect(char* addr, size_t size, int prot) {
2081 // Bsd wants the mprotect address argument to be page aligned.
2082 char* bottom = (char*)align_down((intptr_t)addr, os::Bsd::page_size());
2083
2084 // According to SUSv3, mprotect() should only be used with mappings
2085 // established by mmap(), and mmap() always maps whole pages. Unaligned
2086 // 'addr' likely indicates problem in the VM (e.g. trying to change
2087 // protection of malloc'ed or statically allocated memory). Check the
2088 // caller if you hit this assert.
2089 assert(addr == bottom, "sanity check");
2090
2091 size = align_up(pointer_delta(addr, bottom, 1) + size, os::Bsd::page_size());
2092 Events::log(NULL, "Protecting memory [" INTPTR_FORMAT "," INTPTR_FORMAT "] with protection modes %x", p2i(bottom), p2i(bottom+size), prot);
2093 return ::mprotect(bottom, size, prot) == 0;
2163 }
2164 return result;
2165 }
2166
2167 // Reserve memory at an arbitrary address, only if that area is
2168 // available (and not reserved for something else).
2169
2170 char* os::pd_attempt_reserve_memory_at(size_t bytes, char* requested_addr) {
2171 // Assert only that the size is a multiple of the page size, since
2172 // that's all that mmap requires, and since that's all we really know
2173 // about at this low abstraction level. If we need higher alignment,
2174 // we can either pass an alignment to this method or verify alignment
2175 // in one of the methods further up the call chain. See bug 5044738.
2176 assert(bytes % os::vm_page_size() == 0, "reserving unexpected size block");
2177
2178 // Repeatedly allocate blocks until the block is allocated at the
2179 // right spot.
2180
2181 // Bsd mmap allows caller to pass an address as hint; give it a try first,
2182 // if kernel honors the hint then we can return immediately.
2183 char * addr = anon_mmap(requested_addr, bytes, false/*fixed*/, false/*executable*/);
2184 if (addr == requested_addr) {
2185 return requested_addr;
2186 }
2187
2188 if (addr != NULL) {
2189 // mmap() is successful but it fails to reserve at the requested address
2190 anon_munmap(addr, bytes);
2191 }
2192
2193 return NULL;
2194 }
2195
2196 // Sleep forever; naked call to OS-specific sleep; use with CAUTION
2197 void os::infinite_sleep() {
2198 while (true) { // sleep forever ...
2199 ::sleep(100); // ... 100 seconds at a time
2200 }
2201 }
2202
2203 // Used to convert frequent JVM_Yield() to nops
|