< prev index next >

src/os/windows/vm/os_windows.cpp

Print this page
rev 9751 : [mq]: webrev.00
rev 9752 : [mq]: webrev.01

@@ -3413,26 +3413,27 @@
 bool os::remove_stack_guard_pages(char* addr, size_t size) {
   return os::uncommit_memory(addr, size);
 }
 
 static bool protect_pages_individually(char* addr, size_t bytes, unsigned int p, DWORD *old_status) {
-  size_t page_size = UseLargePages ? _large_page_size : os::vm_allocation_granularity();
-  size_t chunk_size = UseNUMAInterleaving ? NUMAInterleaveGranularity : page_size;
-
   uint count = 0;
   bool ret = false;
   size_t bytes_remaining = bytes;
   char * next_protect_addr = addr;
 
-  // Below split reflects address and size when we reserved the memory by allocate_pages_individually().
+  // Use VirtualQuery() to get the chunk size.
   while (bytes_remaining) {
-    size_t bytes_to_protect = MIN2(bytes_remaining, chunk_size - ((size_t)next_protect_addr % chunk_size));
+    MEMORY_BASIC_INFORMATION alloc_info;
+    VirtualQuery(next_protect_addr, &alloc_info, sizeof(alloc_info));
+    size_t bytes_to_protect = MIN2(bytes_remaining, (size_t)alloc_info.RegionSize);
     // We used different API at allocate_pages_individually() based on UseNUMAInterleaving,
     // but we don't distinguish here as both cases are protected by same API.
     ret = VirtualProtect(next_protect_addr, bytes_to_protect, p, old_status) != 0;
-    assert(ret, "Failed protecting chunk #" UINT32_FORMAT, count);
-    if (!ret) return false;
+    assert(ret, "Failed protecting chunk #%u", count);
+    if (!ret) {
+      return false;
+    }
 
     bytes_remaining -= bytes_to_protect;
     next_protect_addr += bytes_to_protect;
     count++;
   }
< prev index next >