< prev index next >

src/os/bsd/vm/os_bsd.cpp

Print this page




2255                        flags, -1, 0);
2256 
2257   return addr == MAP_FAILED ? NULL : addr;
2258 }
2259 
2260 static int anon_munmap(char * addr, size_t size) {
2261   return ::munmap(addr, size) == 0;
2262 }
2263 
2264 char* os::pd_reserve_memory(size_t bytes, char* requested_addr,
2265                             size_t alignment_hint) {
2266   return anon_mmap(requested_addr, bytes, (requested_addr != NULL));
2267 }
2268 
2269 bool os::pd_release_memory(char* addr, size_t size) {
2270   return anon_munmap(addr, size);
2271 }
2272 
2273 static bool bsd_mprotect(char* addr, size_t size, int prot) {
2274   // Bsd wants the mprotect address argument to be page aligned.
2275   char* bottom = (char*)align_size_down((intptr_t)addr, os::Bsd::page_size());
2276 
2277   // According to SUSv3, mprotect() should only be used with mappings
2278   // established by mmap(), and mmap() always maps whole pages. Unaligned
2279   // 'addr' likely indicates problem in the VM (e.g. trying to change
2280   // protection of malloc'ed or statically allocated memory). Check the
2281   // caller if you hit this assert.
2282   assert(addr == bottom, "sanity check");
2283 
2284   size = align_size_up(pointer_delta(addr, bottom, 1) + size, os::Bsd::page_size());
2285   return ::mprotect(bottom, size, prot) == 0;
2286 }
2287 
2288 // Set protections specified
2289 bool os::protect_memory(char* addr, size_t bytes, ProtType prot,
2290                         bool is_committed) {
2291   unsigned int p = 0;
2292   switch (prot) {
2293   case MEM_PROT_NONE: p = PROT_NONE; break;
2294   case MEM_PROT_READ: p = PROT_READ; break;
2295   case MEM_PROT_RW:   p = PROT_READ|PROT_WRITE; break;
2296   case MEM_PROT_RWX:  p = PROT_READ|PROT_WRITE|PROT_EXEC; break;
2297   default:
2298     ShouldNotReachHere();
2299   }
2300   // is_committed is unused.
2301   return bsd_mprotect(addr, bytes, p);
2302 }
2303 
2304 bool os::guard_memory(char* addr, size_t size) {




2255                        flags, -1, 0);
2256 
2257   return addr == MAP_FAILED ? NULL : addr;
2258 }
2259 
2260 static int anon_munmap(char * addr, size_t size) {
2261   return ::munmap(addr, size) == 0;
2262 }
2263 
2264 char* os::pd_reserve_memory(size_t bytes, char* requested_addr,
2265                             size_t alignment_hint) {
2266   return anon_mmap(requested_addr, bytes, (requested_addr != NULL));
2267 }
2268 
2269 bool os::pd_release_memory(char* addr, size_t size) {
2270   return anon_munmap(addr, size);
2271 }
2272 
2273 static bool bsd_mprotect(char* addr, size_t size, int prot) {
2274   // Bsd wants the mprotect address argument to be page aligned.
2275   char* bottom = (char*)align_down((intptr_t)addr, os::Bsd::page_size());
2276 
2277   // According to SUSv3, mprotect() should only be used with mappings
2278   // established by mmap(), and mmap() always maps whole pages. Unaligned
2279   // 'addr' likely indicates problem in the VM (e.g. trying to change
2280   // protection of malloc'ed or statically allocated memory). Check the
2281   // caller if you hit this assert.
2282   assert(addr == bottom, "sanity check");
2283 
2284   size = align_up(pointer_delta(addr, bottom, 1) + size, os::Bsd::page_size());
2285   return ::mprotect(bottom, size, prot) == 0;
2286 }
2287 
2288 // Set protections specified
2289 bool os::protect_memory(char* addr, size_t bytes, ProtType prot,
2290                         bool is_committed) {
2291   unsigned int p = 0;
2292   switch (prot) {
2293   case MEM_PROT_NONE: p = PROT_NONE; break;
2294   case MEM_PROT_READ: p = PROT_READ; break;
2295   case MEM_PROT_RW:   p = PROT_READ|PROT_WRITE; break;
2296   case MEM_PROT_RWX:  p = PROT_READ|PROT_WRITE|PROT_EXEC; break;
2297   default:
2298     ShouldNotReachHere();
2299   }
2300   // is_committed is unused.
2301   return bsd_mprotect(addr, bytes, p);
2302 }
2303 
2304 bool os::guard_memory(char* addr, size_t size) {


< prev index next >