< prev index next >

src/os/windows/vm/os_windows.cpp

Print this page
rev 9751 : [mq]: webrev.00
rev 9752 : [mq]: webrev.01


3398     return true;
3399   }
3400   assert((size_t) addr % os::vm_page_size() == 0, "uncommit on page boundaries");
3401   assert(bytes % os::vm_page_size() == 0, "uncommit in page-sized chunks");
3402   return (VirtualFree(addr, bytes, MEM_DECOMMIT) != 0);
3403 }
3404 
3405 bool os::pd_release_memory(char* addr, size_t bytes) {
3406   return VirtualFree(addr, 0, MEM_RELEASE) != 0;
3407 }
3408 
3409 bool os::pd_create_stack_guard_pages(char* addr, size_t size) {
3410   return os::commit_memory(addr, size, !ExecMem);
3411 }
3412 
3413 bool os::remove_stack_guard_pages(char* addr, size_t size) {
3414   return os::uncommit_memory(addr, size);
3415 }
3416 
3417 static bool protect_pages_individually(char* addr, size_t bytes, unsigned int p, DWORD *old_status) {
3418   size_t page_size = UseLargePages ? _large_page_size : os::vm_allocation_granularity();
3419   size_t chunk_size = UseNUMAInterleaving ? NUMAInterleaveGranularity : page_size;
3420 
3421   uint count = 0;
3422   bool ret = false;
3423   size_t bytes_remaining = bytes;
3424   char * next_protect_addr = addr;
3425 
3426   // Below split reflects address and size when we reserved the memory by allocate_pages_individually().
3427   while (bytes_remaining) {
3428     size_t bytes_to_protect = MIN2(bytes_remaining, chunk_size - ((size_t)next_protect_addr % chunk_size));


3429     // We used different API at allocate_pages_individually() based on UseNUMAInterleaving,
3430     // but we don't distinguish here as both cases are protected by same API.
3431     ret = VirtualProtect(next_protect_addr, bytes_to_protect, p, old_status) != 0;
3432     assert(ret, "Failed protecting chunk #" UINT32_FORMAT, count);
3433     if (!ret) return false;


3434 
3435     bytes_remaining -= bytes_to_protect;
3436     next_protect_addr += bytes_to_protect;
3437     count++;
3438   }
3439   return ret;
3440 }
3441 
3442 // Set protections specified
3443 bool os::protect_memory(char* addr, size_t bytes, ProtType prot,
3444                         bool is_committed) {
3445   unsigned int p = 0;
3446   switch (prot) {
3447   case MEM_PROT_NONE: p = PAGE_NOACCESS; break;
3448   case MEM_PROT_READ: p = PAGE_READONLY; break;
3449   case MEM_PROT_RW:   p = PAGE_READWRITE; break;
3450   case MEM_PROT_RWX:  p = PAGE_EXECUTE_READWRITE; break;
3451   default:
3452     ShouldNotReachHere();
3453   }




3398     return true;
3399   }
3400   assert((size_t) addr % os::vm_page_size() == 0, "uncommit on page boundaries");
3401   assert(bytes % os::vm_page_size() == 0, "uncommit in page-sized chunks");
3402   return (VirtualFree(addr, bytes, MEM_DECOMMIT) != 0);
3403 }
3404 
3405 bool os::pd_release_memory(char* addr, size_t bytes) {
3406   return VirtualFree(addr, 0, MEM_RELEASE) != 0;
3407 }
3408 
3409 bool os::pd_create_stack_guard_pages(char* addr, size_t size) {
3410   return os::commit_memory(addr, size, !ExecMem);
3411 }
3412 
3413 bool os::remove_stack_guard_pages(char* addr, size_t size) {
3414   return os::uncommit_memory(addr, size);
3415 }
3416 
3417 static bool protect_pages_individually(char* addr, size_t bytes, unsigned int p, DWORD *old_status) {



3418   uint count = 0;
3419   bool ret = false;
3420   size_t bytes_remaining = bytes;
3421   char * next_protect_addr = addr;
3422 
3423   // Use VirtualQuery() to get the chunk size.
3424   while (bytes_remaining) {
3425     MEMORY_BASIC_INFORMATION alloc_info;
3426     VirtualQuery(next_protect_addr, &alloc_info, sizeof(alloc_info));
3427     size_t bytes_to_protect = MIN2(bytes_remaining, (size_t)alloc_info.RegionSize);
3428     // We used different API at allocate_pages_individually() based on UseNUMAInterleaving,
3429     // but we don't distinguish here as both cases are protected by same API.
3430     ret = VirtualProtect(next_protect_addr, bytes_to_protect, p, old_status) != 0;
3431     assert(ret, "Failed protecting chunk #%u", count);
3432     if (!ret) {
3433       return false;
3434     }
3435 
3436     bytes_remaining -= bytes_to_protect;
3437     next_protect_addr += bytes_to_protect;
3438     count++;
3439   }
3440   return ret;
3441 }
3442 
3443 // Set protections specified
3444 bool os::protect_memory(char* addr, size_t bytes, ProtType prot,
3445                         bool is_committed) {
3446   unsigned int p = 0;
3447   switch (prot) {
3448   case MEM_PROT_NONE: p = PAGE_NOACCESS; break;
3449   case MEM_PROT_READ: p = PAGE_READONLY; break;
3450   case MEM_PROT_RW:   p = PAGE_READWRITE; break;
3451   case MEM_PROT_RWX:  p = PAGE_EXECUTE_READWRITE; break;
3452   default:
3453     ShouldNotReachHere();
3454   }


< prev index next >