< prev index next >

src/hotspot/os/linux/os_linux.cpp

Print this page
rev 51957 : 8224221: add memprotect calls to event log
Reviewed-by: dholmes, mdoerr


3466                             size_t alignment_hint) {
3467   return anon_mmap(requested_addr, bytes, (requested_addr != NULL));
3468 }
3469 
3470 bool os::pd_release_memory(char* addr, size_t size) {
3471   return anon_munmap(addr, size);
3472 }
3473 
3474 static bool linux_mprotect(char* addr, size_t size, int prot) {
3475   // Linux wants the mprotect address argument to be page aligned.
3476   char* bottom = (char*)align_down((intptr_t)addr, os::Linux::page_size());
3477 
3478   // According to SUSv3, mprotect() should only be used with mappings
3479   // established by mmap(), and mmap() always maps whole pages. Unaligned
3480   // 'addr' likely indicates problem in the VM (e.g. trying to change
3481   // protection of malloc'ed or statically allocated memory). Check the
3482   // caller if you hit this assert.
3483   assert(addr == bottom, "sanity check");
3484 
3485   size = align_up(pointer_delta(addr, bottom, 1) + size, os::Linux::page_size());

3486   return ::mprotect(bottom, size, prot) == 0;
3487 }
3488 
3489 // Set protections specified
3490 bool os::protect_memory(char* addr, size_t bytes, ProtType prot,
3491                         bool is_committed) {
3492   unsigned int p = 0;
3493   switch (prot) {
3494   case MEM_PROT_NONE: p = PROT_NONE; break;
3495   case MEM_PROT_READ: p = PROT_READ; break;
3496   case MEM_PROT_RW:   p = PROT_READ|PROT_WRITE; break;
3497   case MEM_PROT_RWX:  p = PROT_READ|PROT_WRITE|PROT_EXEC; break;
3498   default:
3499     ShouldNotReachHere();
3500   }
3501   // is_committed is unused.
3502   return linux_mprotect(addr, bytes, p);
3503 }
3504 
3505 bool os::guard_memory(char* addr, size_t size) {




3466                             size_t alignment_hint) {
3467   return anon_mmap(requested_addr, bytes, (requested_addr != NULL));
3468 }
3469 
3470 bool os::pd_release_memory(char* addr, size_t size) {
3471   return anon_munmap(addr, size);
3472 }
3473 
3474 static bool linux_mprotect(char* addr, size_t size, int prot) {
3475   // Linux wants the mprotect address argument to be page aligned.
3476   char* bottom = (char*)align_down((intptr_t)addr, os::Linux::page_size());
3477 
3478   // According to SUSv3, mprotect() should only be used with mappings
3479   // established by mmap(), and mmap() always maps whole pages. Unaligned
3480   // 'addr' likely indicates problem in the VM (e.g. trying to change
3481   // protection of malloc'ed or statically allocated memory). Check the
3482   // caller if you hit this assert.
3483   assert(addr == bottom, "sanity check");
3484 
3485   size = align_up(pointer_delta(addr, bottom, 1) + size, os::Linux::page_size());
3486   Events::log(NULL, "Protecting memory [" INTPTR_FORMAT "," INTPTR_FORMAT "] with protection modes %x", p2i(bottom), p2i(bottom+size), prot);
3487   return ::mprotect(bottom, size, prot) == 0;
3488 }
3489 
3490 // Set protections specified
3491 bool os::protect_memory(char* addr, size_t bytes, ProtType prot,
3492                         bool is_committed) {
3493   unsigned int p = 0;
3494   switch (prot) {
3495   case MEM_PROT_NONE: p = PROT_NONE; break;
3496   case MEM_PROT_READ: p = PROT_READ; break;
3497   case MEM_PROT_RW:   p = PROT_READ|PROT_WRITE; break;
3498   case MEM_PROT_RWX:  p = PROT_READ|PROT_WRITE|PROT_EXEC; break;
3499   default:
3500     ShouldNotReachHere();
3501   }
3502   // is_committed is unused.
3503   return linux_mprotect(addr, bytes, p);
3504 }
3505 
3506 bool os::guard_memory(char* addr, size_t size) {


< prev index next >