< prev index next >

src/os/windows/vm/os_windows.cpp

Print this page




2993         assert(numa_node_list_holder.get_count() > 0, "Multiple NUMA nodes expected");
2994         DWORD node = numa_node_list_holder.get_node_list_entry(count % numa_node_list_holder.get_count());
2995         p_new = (char *)VirtualAllocExNuma(hProc, next_alloc_addr, bytes_to_rq, flags, prot, node);
2996       }
2997     }
2998 
2999     if (p_new == NULL) {
3000       // Free any allocated pages
3001       if (next_alloc_addr > p_buf) {
3002         // Some memory was committed so release it.
3003         size_t bytes_to_release = bytes - bytes_remaining;
3004         // NMT has yet to record any individual blocks, so it
3005         // need to create a dummy 'reserve' record to match
3006         // the release.
3007         MemTracker::record_virtual_memory_reserve((address)p_buf,
3008                                                   bytes_to_release, CALLER_PC);
3009         os::release_memory(p_buf, bytes_to_release);
3010       }
3011 #ifdef ASSERT
3012       if (should_inject_error) {
3013         if (TracePageSizes && Verbose) {
3014           tty->print_cr("Reserving pages individually failed.");
3015         }
3016       }
3017 #endif
3018       return NULL;
3019     }
3020 
3021     bytes_remaining -= bytes_to_rq;
3022     next_alloc_addr += bytes_to_rq;
3023     count++;
3024   }
3025   // Although the memory is allocated individually, it is returned as one.
3026   // NMT records it as one block.
3027   if ((flags & MEM_COMMIT) != 0) {
3028     MemTracker::record_virtual_memory_reserve_and_commit((address)p_buf, bytes, CALLER_PC);
3029   } else {
3030     MemTracker::record_virtual_memory_reserve((address)p_buf, bytes, CALLER_PC);
3031   }
3032 
3033   // made it this far, success
3034   return p_buf;
3035 }


3179 
3180 bool os::can_execute_large_page_memory() {
3181   return true;
3182 }
3183 
3184 char* os::reserve_memory_special(size_t bytes, size_t alignment, char* addr,
3185                                  bool exec) {
3186   assert(UseLargePages, "only for large pages");
3187 
3188   if (!is_size_aligned(bytes, os::large_page_size()) || alignment > os::large_page_size()) {
3189     return NULL; // Fallback to small pages.
3190   }
3191 
3192   const DWORD prot = exec ? PAGE_EXECUTE_READWRITE : PAGE_READWRITE;
3193   const DWORD flags = MEM_RESERVE | MEM_COMMIT | MEM_LARGE_PAGES;
3194 
3195   // with large pages, there are two cases where we need to use Individual Allocation
3196   // 1) the UseLargePagesIndividualAllocation flag is set (set by default on WS2003)
3197   // 2) NUMA Interleaving is enabled, in which case we use a different node for each page
3198   if (UseLargePagesIndividualAllocation || UseNUMAInterleaving) {
3199     if (TracePageSizes && Verbose) {
3200       tty->print_cr("Reserving large pages individually.");
3201     }
3202     char * p_buf = allocate_pages_individually(bytes, addr, flags, prot, LargePagesIndividualAllocationInjectError);
3203     if (p_buf == NULL) {
3204       // give an appropriate warning message
3205       if (UseNUMAInterleaving) {
3206         warning("NUMA large page allocation failed, UseLargePages flag ignored");
3207       }
3208       if (UseLargePagesIndividualAllocation) {
3209         warning("Individually allocated large pages failed, "
3210                 "use -XX:-UseLargePagesIndividualAllocation to turn off");
3211       }
3212       return NULL;
3213     }
3214 
3215     return p_buf;
3216 
3217   } else {
3218     if (TracePageSizes && Verbose) {
3219       tty->print_cr("Reserving large pages in a single large chunk.");
3220     }
3221     // normal policy just allocate it all at once
3222     DWORD flag = MEM_RESERVE | MEM_COMMIT | MEM_LARGE_PAGES;
3223     char * res = (char *)VirtualAlloc(addr, bytes, flag, prot);
3224     if (res != NULL) {
3225       MemTracker::record_virtual_memory_reserve_and_commit((address)res, bytes, CALLER_PC);
3226     }
3227 
3228     return res;
3229   }
3230 }
3231 
3232 bool os::release_memory_special(char* base, size_t bytes) {
3233   assert(base != NULL, "Sanity check");
3234   return release_memory(base, bytes);
3235 }
3236 
3237 void os::print_statistics() {
3238 }
3239 
3240 static void warn_fail_commit_memory(char* addr, size_t bytes, bool exec) {




2993         assert(numa_node_list_holder.get_count() > 0, "Multiple NUMA nodes expected");
2994         DWORD node = numa_node_list_holder.get_node_list_entry(count % numa_node_list_holder.get_count());
2995         p_new = (char *)VirtualAllocExNuma(hProc, next_alloc_addr, bytes_to_rq, flags, prot, node);
2996       }
2997     }
2998 
2999     if (p_new == NULL) {
3000       // Free any allocated pages
3001       if (next_alloc_addr > p_buf) {
3002         // Some memory was committed so release it.
3003         size_t bytes_to_release = bytes - bytes_remaining;
3004         // NMT has yet to record any individual blocks, so it
3005         // need to create a dummy 'reserve' record to match
3006         // the release.
3007         MemTracker::record_virtual_memory_reserve((address)p_buf,
3008                                                   bytes_to_release, CALLER_PC);
3009         os::release_memory(p_buf, bytes_to_release);
3010       }
3011 #ifdef ASSERT
3012       if (should_inject_error) {
3013         log_develop_info(pagesizes)("Reserving pages individually failed.");


3014       }
3015 #endif
3016       return NULL;
3017     }
3018 
3019     bytes_remaining -= bytes_to_rq;
3020     next_alloc_addr += bytes_to_rq;
3021     count++;
3022   }
3023   // Although the memory is allocated individually, it is returned as one.
3024   // NMT records it as one block.
3025   if ((flags & MEM_COMMIT) != 0) {
3026     MemTracker::record_virtual_memory_reserve_and_commit((address)p_buf, bytes, CALLER_PC);
3027   } else {
3028     MemTracker::record_virtual_memory_reserve((address)p_buf, bytes, CALLER_PC);
3029   }
3030 
3031   // made it this far, success
3032   return p_buf;
3033 }


3177 
3178 bool os::can_execute_large_page_memory() {
3179   return true;
3180 }
3181 
3182 char* os::reserve_memory_special(size_t bytes, size_t alignment, char* addr,
3183                                  bool exec) {
3184   assert(UseLargePages, "only for large pages");
3185 
3186   if (!is_size_aligned(bytes, os::large_page_size()) || alignment > os::large_page_size()) {
3187     return NULL; // Fallback to small pages.
3188   }
3189 
3190   const DWORD prot = exec ? PAGE_EXECUTE_READWRITE : PAGE_READWRITE;
3191   const DWORD flags = MEM_RESERVE | MEM_COMMIT | MEM_LARGE_PAGES;
3192 
3193   // with large pages, there are two cases where we need to use Individual Allocation
3194   // 1) the UseLargePagesIndividualAllocation flag is set (set by default on WS2003)
3195   // 2) NUMA Interleaving is enabled, in which case we use a different node for each page
3196   if (UseLargePagesIndividualAllocation || UseNUMAInterleaving) {
3197     log_debug(pagesizes)("Reserving large pages individually.");
3198 

3199     char * p_buf = allocate_pages_individually(bytes, addr, flags, prot, LargePagesIndividualAllocationInjectError);
3200     if (p_buf == NULL) {
3201       // give an appropriate warning message
3202       if (UseNUMAInterleaving) {
3203         warning("NUMA large page allocation failed, UseLargePages flag ignored");
3204       }
3205       if (UseLargePagesIndividualAllocation) {
3206         warning("Individually allocated large pages failed, "
3207                 "use -XX:-UseLargePagesIndividualAllocation to turn off");
3208       }
3209       return NULL;
3210     }
3211 
3212     return p_buf;
3213 
3214   } else {
3215     log_debug(pagesizes)("Reserving large pages in a single large chunk.");
3216 

3217     // normal policy just allocate it all at once
3218     DWORD flag = MEM_RESERVE | MEM_COMMIT | MEM_LARGE_PAGES;
3219     char * res = (char *)VirtualAlloc(addr, bytes, flag, prot);
3220     if (res != NULL) {
3221       MemTracker::record_virtual_memory_reserve_and_commit((address)res, bytes, CALLER_PC);
3222     }
3223 
3224     return res;
3225   }
3226 }
3227 
3228 bool os::release_memory_special(char* base, size_t bytes) {
3229   assert(base != NULL, "Sanity check");
3230   return release_memory(base, bytes);
3231 }
3232 
3233 void os::print_statistics() {
3234 }
3235 
3236 static void warn_fail_commit_memory(char* addr, size_t bytes, bool exec) {


< prev index next >