< prev index next >

src/hotspot/os/bsd/os_bsd.cpp

Print this page
rev 51957 : 8224221: add memprotect calls to event log
Reviewed-by: dholmes, mdoerr


1939     ::close(fd);
1940     unlink(buf);
1941   }
1942 }
1943 
1944 static void warn_fail_commit_memory(char* addr, size_t size, bool exec,
1945                                     int err) {
1946   warning("INFO: os::commit_memory(" INTPTR_FORMAT ", " SIZE_FORMAT
1947           ", %d) failed; error='%s' (errno=%d)", (intptr_t)addr, size, exec,
1948            os::errno_name(err), err);
1949 }
1950 
1951 // NOTE: Bsd kernel does not really reserve the pages for us.
1952 //       All it does is to check if there are enough free pages
1953 //       left at the time of mmap(). This could be a potential
1954 //       problem.
1955 bool os::pd_commit_memory(char* addr, size_t size, bool exec) {
1956   int prot = exec ? PROT_READ|PROT_WRITE|PROT_EXEC : PROT_READ|PROT_WRITE;
1957 #ifdef __OpenBSD__
1958   // XXX: Work-around mmap/MAP_FIXED bug temporarily on OpenBSD

1959   if (::mprotect(addr, size, prot) == 0) {
1960     return true;
1961   }
1962 #else
1963   uintptr_t res = (uintptr_t) ::mmap(addr, size, prot,
1964                                      MAP_PRIVATE|MAP_FIXED|MAP_ANONYMOUS, -1, 0);
1965   if (res != (uintptr_t) MAP_FAILED) {
1966     return true;
1967   }
1968 #endif
1969 
1970   // Warn about any commit errors we see in non-product builds just
1971   // in case mmap() doesn't work as described on the man page.
1972   NOT_PRODUCT(warn_fail_commit_memory(addr, size, exec, errno);)
1973 
1974   return false;
1975 }
1976 
1977 bool os::pd_commit_memory(char* addr, size_t size, size_t alignment_hint,
1978                           bool exec) {


2023 size_t os::numa_get_leaf_groups(int *ids, size_t size) {
2024   if (size > 0) {
2025     ids[0] = 0;
2026     return 1;
2027   }
2028   return 0;
2029 }
2030 
2031 bool os::get_page_info(char *start, page_info* info) {
2032   return false;
2033 }
2034 
2035 char *os::scan_pages(char *start, char* end, page_info* page_expected, page_info* page_found) {
2036   return end;
2037 }
2038 
2039 
2040 bool os::pd_uncommit_memory(char* addr, size_t size) {
2041 #ifdef __OpenBSD__
2042   // XXX: Work-around mmap/MAP_FIXED bug temporarily on OpenBSD

2043   return ::mprotect(addr, size, PROT_NONE) == 0;
2044 #else
2045   uintptr_t res = (uintptr_t) ::mmap(addr, size, PROT_NONE,
2046                                      MAP_PRIVATE|MAP_FIXED|MAP_NORESERVE|MAP_ANONYMOUS, -1, 0);
2047   return res  != (uintptr_t) MAP_FAILED;
2048 #endif
2049 }
2050 
2051 bool os::pd_create_stack_guard_pages(char* addr, size_t size) {
2052   return os::commit_memory(addr, size, !ExecMem);
2053 }
2054 
2055 // If this is a growable mapping, remove the guard pages entirely by
2056 // munmap()ping them.  If not, just call uncommit_memory().
2057 bool os::remove_stack_guard_pages(char* addr, size_t size) {
2058   return os::uncommit_memory(addr, size);
2059 }
2060 
2061 // If 'fixed' is true, anon_mmap() will attempt to reserve anonymous memory
2062 // at 'requested_addr'. If there are existing memory mappings at the same


2091                             size_t alignment_hint) {
2092   return anon_mmap(requested_addr, bytes, (requested_addr != NULL));
2093 }
2094 
2095 bool os::pd_release_memory(char* addr, size_t size) {
2096   return anon_munmap(addr, size);
2097 }
2098 
2099 static bool bsd_mprotect(char* addr, size_t size, int prot) {
2100   // Bsd wants the mprotect address argument to be page aligned.
2101   char* bottom = (char*)align_down((intptr_t)addr, os::Bsd::page_size());
2102 
2103   // According to SUSv3, mprotect() should only be used with mappings
2104   // established by mmap(), and mmap() always maps whole pages. Unaligned
2105   // 'addr' likely indicates problem in the VM (e.g. trying to change
2106   // protection of malloc'ed or statically allocated memory). Check the
2107   // caller if you hit this assert.
2108   assert(addr == bottom, "sanity check");
2109 
2110   size = align_up(pointer_delta(addr, bottom, 1) + size, os::Bsd::page_size());

2111   return ::mprotect(bottom, size, prot) == 0;
2112 }
2113 
2114 // Set protections specified
2115 bool os::protect_memory(char* addr, size_t bytes, ProtType prot,
2116                         bool is_committed) {
2117   unsigned int p = 0;
2118   switch (prot) {
2119   case MEM_PROT_NONE: p = PROT_NONE; break;
2120   case MEM_PROT_READ: p = PROT_READ; break;
2121   case MEM_PROT_RW:   p = PROT_READ|PROT_WRITE; break;
2122   case MEM_PROT_RWX:  p = PROT_READ|PROT_WRITE|PROT_EXEC; break;
2123   default:
2124     ShouldNotReachHere();
2125   }
2126   // is_committed is unused.
2127   return bsd_mprotect(addr, bytes, p);
2128 }
2129 
2130 bool os::guard_memory(char* addr, size_t size) {




1939     ::close(fd);
1940     unlink(buf);
1941   }
1942 }
1943 
1944 static void warn_fail_commit_memory(char* addr, size_t size, bool exec,
1945                                     int err) {
1946   warning("INFO: os::commit_memory(" INTPTR_FORMAT ", " SIZE_FORMAT
1947           ", %d) failed; error='%s' (errno=%d)", (intptr_t)addr, size, exec,
1948            os::errno_name(err), err);
1949 }
1950 
1951 // NOTE: Bsd kernel does not really reserve the pages for us.
1952 //       All it does is to check if there are enough free pages
1953 //       left at the time of mmap(). This could be a potential
1954 //       problem.
1955 bool os::pd_commit_memory(char* addr, size_t size, bool exec) {
1956   int prot = exec ? PROT_READ|PROT_WRITE|PROT_EXEC : PROT_READ|PROT_WRITE;
1957 #ifdef __OpenBSD__
1958   // XXX: Work-around mmap/MAP_FIXED bug temporarily on OpenBSD
1959   Events::log(NULL, "Protecting memory [" INTPTR_FORMAT "," INTPTR_FORMAT "] with protection modes %x", p2i(addr), p2i(addr+size), prot);
1960   if (::mprotect(addr, size, prot) == 0) {
1961     return true;
1962   }
1963 #else
1964   uintptr_t res = (uintptr_t) ::mmap(addr, size, prot,
1965                                      MAP_PRIVATE|MAP_FIXED|MAP_ANONYMOUS, -1, 0);
1966   if (res != (uintptr_t) MAP_FAILED) {
1967     return true;
1968   }
1969 #endif
1970 
1971   // Warn about any commit errors we see in non-product builds just
1972   // in case mmap() doesn't work as described on the man page.
1973   NOT_PRODUCT(warn_fail_commit_memory(addr, size, exec, errno);)
1974 
1975   return false;
1976 }
1977 
1978 bool os::pd_commit_memory(char* addr, size_t size, size_t alignment_hint,
1979                           bool exec) {


2024 size_t os::numa_get_leaf_groups(int *ids, size_t size) {
2025   if (size > 0) {
2026     ids[0] = 0;
2027     return 1;
2028   }
2029   return 0;
2030 }
2031 
2032 bool os::get_page_info(char *start, page_info* info) {
2033   return false;
2034 }
2035 
2036 char *os::scan_pages(char *start, char* end, page_info* page_expected, page_info* page_found) {
2037   return end;
2038 }
2039 
2040 
2041 bool os::pd_uncommit_memory(char* addr, size_t size) {
2042 #ifdef __OpenBSD__
2043   // XXX: Work-around mmap/MAP_FIXED bug temporarily on OpenBSD
2044   Events::log(NULL, "Protecting memory [" INTPTR_FORMAT "," INTPTR_FORMAT "] with PROT_NONE", p2i(addr), p2i(addr+size));
2045   return ::mprotect(addr, size, PROT_NONE) == 0;
2046 #else
2047   uintptr_t res = (uintptr_t) ::mmap(addr, size, PROT_NONE,
2048                                      MAP_PRIVATE|MAP_FIXED|MAP_NORESERVE|MAP_ANONYMOUS, -1, 0);
2049   return res  != (uintptr_t) MAP_FAILED;
2050 #endif
2051 }
2052 
2053 bool os::pd_create_stack_guard_pages(char* addr, size_t size) {
2054   return os::commit_memory(addr, size, !ExecMem);
2055 }
2056 
2057 // If this is a growable mapping, remove the guard pages entirely by
2058 // munmap()ping them.  If not, just call uncommit_memory().
2059 bool os::remove_stack_guard_pages(char* addr, size_t size) {
2060   return os::uncommit_memory(addr, size);
2061 }
2062 
2063 // If 'fixed' is true, anon_mmap() will attempt to reserve anonymous memory
2064 // at 'requested_addr'. If there are existing memory mappings at the same


2093                             size_t alignment_hint) {
2094   return anon_mmap(requested_addr, bytes, (requested_addr != NULL));
2095 }
2096 
2097 bool os::pd_release_memory(char* addr, size_t size) {
2098   return anon_munmap(addr, size);
2099 }
2100 
2101 static bool bsd_mprotect(char* addr, size_t size, int prot) {
2102   // Bsd wants the mprotect address argument to be page aligned.
2103   char* bottom = (char*)align_down((intptr_t)addr, os::Bsd::page_size());
2104 
2105   // According to SUSv3, mprotect() should only be used with mappings
2106   // established by mmap(), and mmap() always maps whole pages. Unaligned
2107   // 'addr' likely indicates problem in the VM (e.g. trying to change
2108   // protection of malloc'ed or statically allocated memory). Check the
2109   // caller if you hit this assert.
2110   assert(addr == bottom, "sanity check");
2111 
2112   size = align_up(pointer_delta(addr, bottom, 1) + size, os::Bsd::page_size());
2113   Events::log(NULL, "Protecting memory [" INTPTR_FORMAT "," INTPTR_FORMAT "] with protection modes %x", p2i(bottom), p2i(bottom+size), prot);
2114   return ::mprotect(bottom, size, prot) == 0;
2115 }
2116 
2117 // Set protections specified
2118 bool os::protect_memory(char* addr, size_t bytes, ProtType prot,
2119                         bool is_committed) {
2120   unsigned int p = 0;
2121   switch (prot) {
2122   case MEM_PROT_NONE: p = PROT_NONE; break;
2123   case MEM_PROT_READ: p = PROT_READ; break;
2124   case MEM_PROT_RW:   p = PROT_READ|PROT_WRITE; break;
2125   case MEM_PROT_RWX:  p = PROT_READ|PROT_WRITE|PROT_EXEC; break;
2126   default:
2127     ShouldNotReachHere();
2128   }
2129   // is_committed is unused.
2130   return bsd_mprotect(addr, bytes, p);
2131 }
2132 
2133 bool os::guard_memory(char* addr, size_t size) {


< prev index next >