< prev index next >

src/hotspot/os/windows/os_windows.cpp

Print this page




3240     char* extra_base = os::reserve_memory(extra_size, NULL, alignment, file_desc);
3241     if (extra_base == NULL) {
3242       return NULL;
3243     }
3244     // Do manual alignment
3245     aligned_base = align_up(extra_base, alignment);
3246 
3247     if (file_desc != -1) {
3248       os::unmap_memory(extra_base, extra_size);
3249     } else {
3250       os::release_memory(extra_base, extra_size);
3251     }
3252 
3253     aligned_base = os::reserve_memory(size, aligned_base, 0, file_desc);
3254 
3255   } while (aligned_base == NULL);
3256 
3257   return aligned_base;
3258 }
3259 
3260 char* os::pd_reserve_memory(size_t bytes, char* addr, size_t alignment_hint) {
3261   assert((size_t)addr % os::vm_allocation_granularity() == 0,
3262          "reserve alignment");
3263   assert(bytes % os::vm_page_size() == 0, "reserve page size");
3264   char* res;
3265   // note that if UseLargePages is on, all the areas that require interleaving
3266   // will go thru reserve_memory_special rather than thru here.
3267   bool use_individual = (UseNUMAInterleaving && !UseLargePages);
3268   if (!use_individual) {
3269     res = (char*)VirtualAlloc(addr, bytes, MEM_RESERVE, PAGE_READWRITE);
3270   } else {
3271     elapsedTimer reserveTimer;
3272     if (Verbose && PrintMiscellaneous) reserveTimer.start();
3273     // in numa interleaving, we have to allocate pages individually
3274     // (well really chunks of NUMAInterleaveGranularity size)
3275     res = allocate_pages_individually(bytes, addr, MEM_RESERVE, PAGE_READWRITE);
3276     if (res == NULL) {
3277       warning("NUMA page allocation failed");
3278     }
3279     if (Verbose && PrintMiscellaneous) {
3280       reserveTimer.stop();


3443   // alignment_hint is ignored on this OS
3444   return pd_commit_memory(addr, size, exec);
3445 }
3446 
3447 void os::pd_commit_memory_or_exit(char* addr, size_t size, bool exec,
3448                                   const char* mesg) {
3449   assert(mesg != NULL, "mesg must be specified");
3450   if (!pd_commit_memory(addr, size, exec)) {
3451     warn_fail_commit_memory(addr, size, exec);
3452     vm_exit_out_of_memory(size, OOM_MMAP_ERROR, "%s", mesg);
3453   }
3454 }
3455 
3456 void os::pd_commit_memory_or_exit(char* addr, size_t size,
3457                                   size_t alignment_hint, bool exec,
3458                                   const char* mesg) {
3459   // alignment_hint is ignored on this OS
3460   pd_commit_memory_or_exit(addr, size, exec, mesg);
3461 }
3462 
3463 bool os::pd_uncommit_memory(char* addr, size_t bytes) {
3464   if (bytes == 0) {
3465     // Don't bother the OS with noops.
3466     return true;
3467   }
3468   assert((size_t) addr % os::vm_page_size() == 0, "uncommit on page boundaries");
3469   assert(bytes % os::vm_page_size() == 0, "uncommit in page-sized chunks");
3470   return (VirtualFree(addr, bytes, MEM_DECOMMIT) != 0);
3471 }
3472 
3473 bool os::pd_release_memory(char* addr, size_t bytes) {
3474   return VirtualFree(addr, 0, MEM_RELEASE) != 0;
3475 }
3476 
3477 bool os::pd_create_stack_guard_pages(char* addr, size_t size) {
3478   return os::commit_memory(addr, size, !ExecMem);
3479 }
3480 
3481 bool os::remove_stack_guard_pages(char* addr, size_t size) {
3482   return os::uncommit_memory(addr, size);
3483 }
3484 
3485 static bool protect_pages_individually(char* addr, size_t bytes, unsigned int p, DWORD *old_status) {
3486   uint count = 0;
3487   bool ret = false;
3488   size_t bytes_remaining = bytes;
3489   char * next_protect_addr = addr;
3490 
3491   // Use VirtualQuery() to get the chunk size.
3492   while (bytes_remaining) {
3493     MEMORY_BASIC_INFORMATION alloc_info;
3494     if (VirtualQuery(next_protect_addr, &alloc_info, sizeof(alloc_info)) == 0) {
3495       return false;
3496     }
3497 
3498     size_t bytes_to_protect = MIN2(bytes_remaining, (size_t)alloc_info.RegionSize);
3499     // We used different API at allocate_pages_individually() based on UseNUMAInterleaving,
3500     // but we don't distinguish here as both cases are protected by same API.
3501     ret = VirtualProtect(next_protect_addr, bytes_to_protect, p, old_status) != 0;
3502     warning("Failed protecting pages individually for chunk #%u", count);




3240     char* extra_base = os::reserve_memory(extra_size, NULL, alignment, file_desc);
3241     if (extra_base == NULL) {
3242       return NULL;
3243     }
3244     // Do manual alignment
3245     aligned_base = align_up(extra_base, alignment);
3246 
3247     if (file_desc != -1) {
3248       os::unmap_memory(extra_base, extra_size);
3249     } else {
3250       os::release_memory(extra_base, extra_size);
3251     }
3252 
3253     aligned_base = os::reserve_memory(size, aligned_base, 0, file_desc);
3254 
3255   } while (aligned_base == NULL);
3256 
3257   return aligned_base;
3258 }
3259 
3260 char* os::pd_reserve_memory(size_t bytes, char* addr, size_t alignment_hint, bool executable) {
3261   assert((size_t)addr % os::vm_allocation_granularity() == 0,
3262          "reserve alignment");
3263   assert(bytes % os::vm_page_size() == 0, "reserve page size");
3264   char* res;
3265   // note that if UseLargePages is on, all the areas that require interleaving
3266   // will go thru reserve_memory_special rather than thru here.
3267   bool use_individual = (UseNUMAInterleaving && !UseLargePages);
3268   if (!use_individual) {
3269     res = (char*)VirtualAlloc(addr, bytes, MEM_RESERVE, PAGE_READWRITE);
3270   } else {
3271     elapsedTimer reserveTimer;
3272     if (Verbose && PrintMiscellaneous) reserveTimer.start();
3273     // in numa interleaving, we have to allocate pages individually
3274     // (well really chunks of NUMAInterleaveGranularity size)
3275     res = allocate_pages_individually(bytes, addr, MEM_RESERVE, PAGE_READWRITE);
3276     if (res == NULL) {
3277       warning("NUMA page allocation failed");
3278     }
3279     if (Verbose && PrintMiscellaneous) {
3280       reserveTimer.stop();


3443   // alignment_hint is ignored on this OS
3444   return pd_commit_memory(addr, size, exec);
3445 }
3446 
3447 void os::pd_commit_memory_or_exit(char* addr, size_t size, bool exec,
3448                                   const char* mesg) {
3449   assert(mesg != NULL, "mesg must be specified");
3450   if (!pd_commit_memory(addr, size, exec)) {
3451     warn_fail_commit_memory(addr, size, exec);
3452     vm_exit_out_of_memory(size, OOM_MMAP_ERROR, "%s", mesg);
3453   }
3454 }
3455 
3456 void os::pd_commit_memory_or_exit(char* addr, size_t size,
3457                                   size_t alignment_hint, bool exec,
3458                                   const char* mesg) {
3459   // alignment_hint is ignored on this OS
3460   pd_commit_memory_or_exit(addr, size, exec, mesg);
3461 }
3462 
3463 bool os::pd_uncommit_memory(char* addr, size_t bytes, bool exec) {
3464   if (bytes == 0) {
3465     // Don't bother the OS with noops.
3466     return true;
3467   }
3468   assert((size_t) addr % os::vm_page_size() == 0, "uncommit on page boundaries");
3469   assert(bytes % os::vm_page_size() == 0, "uncommit in page-sized chunks");
3470   return (VirtualFree(addr, bytes, MEM_DECOMMIT) != 0);
3471 }
3472 
3473 bool os::pd_release_memory(char* addr, size_t bytes) {
3474   return VirtualFree(addr, 0, MEM_RELEASE) != 0;
3475 }
3476 
3477 bool os::pd_create_stack_guard_pages(char* addr, size_t size) {
3478   return os::commit_memory(addr, size, !ExecMem);
3479 }
3480 
3481 bool os::remove_stack_guard_pages(char* addr, size_t size) {
3482   return os::uncommit_memory(addr, size, !ExecMem);
3483 }
3484 
3485 static bool protect_pages_individually(char* addr, size_t bytes, unsigned int p, DWORD *old_status) {
3486   uint count = 0;
3487   bool ret = false;
3488   size_t bytes_remaining = bytes;
3489   char * next_protect_addr = addr;
3490 
3491   // Use VirtualQuery() to get the chunk size.
3492   while (bytes_remaining) {
3493     MEMORY_BASIC_INFORMATION alloc_info;
3494     if (VirtualQuery(next_protect_addr, &alloc_info, sizeof(alloc_info)) == 0) {
3495       return false;
3496     }
3497 
3498     size_t bytes_to_protect = MIN2(bytes_remaining, (size_t)alloc_info.RegionSize);
3499     // We used different API at allocate_pages_individually() based on UseNUMAInterleaving,
3500     // but we don't distinguish here as both cases are protected by same API.
3501     ret = VirtualProtect(next_protect_addr, bytes_to_protect, p, old_status) != 0;
3502     warning("Failed protecting pages individually for chunk #%u", count);


< prev index next >