src/os/windows/vm/os_windows.cpp
Index Unified diffs Context diffs Sdiffs Wdiffs Patch New Old Previous File Next File hotspot Sdiff src/os/windows/vm

src/os/windows/vm/os_windows.cpp

Print this page




2830   size_t page_size = UseLargePages ? _large_page_size : os::vm_allocation_granularity();
2831   size_t chunk_size = UseNUMAInterleaving ? NUMAInterleaveGranularity : page_size;
2832 
2833   // first reserve enough address space in advance since we want to be
2834   // able to break a single contiguous virtual address range into multiple
2835   // large page commits but WS2003 does not allow reserving large page space
2836   // so we just use 4K pages for reserve, this gives us a legal contiguous
2837   // address space. then we will deallocate that reservation, and re alloc
2838   // using large pages
2839   const size_t size_of_reserve = bytes + chunk_size;
2840   if (bytes > size_of_reserve) {
2841     // Overflowed.
2842     return NULL;
2843   }
2844   p_buf = (char *) VirtualAlloc(addr,
2845                                 size_of_reserve,  // size of Reserve
2846                                 MEM_RESERVE,
2847                                 PAGE_READWRITE);
2848   // If reservation failed, return NULL
2849   if (p_buf == NULL) return NULL;
2850   MemTracker::record_virtual_memory_reserve((address)p_buf, size_of_reserve, CALLER_PC);


2851   os::release_memory(p_buf, bytes + chunk_size);
2852 
2853   // we still need to round up to a page boundary (in case we are using large pages)
2854   // but not to a chunk boundary (in case InterleavingGranularity doesn't align with page size)
2855   // instead we handle this in the bytes_to_rq computation below
2856   p_buf = (char *) align_size_up((size_t)p_buf, page_size);
2857 
2858   // now go through and allocate one chunk at a time until all bytes are
2859   // allocated
2860   size_t  bytes_remaining = bytes;
2861   // An overflow of align_size_up() would have been caught above
2862   // in the calculation of size_of_reserve.
2863   char * next_alloc_addr = p_buf;
2864   HANDLE hProc = GetCurrentProcess();
2865 
2866 #ifdef ASSERT
2867   // Variable for the failure injection
2868   long ran_num = os::random();
2869   size_t fail_after = ran_num % bytes;
2870 #endif


2895         // get the next node to use from the used_node_list
2896         assert(numa_node_list_holder.get_count() > 0, "Multiple NUMA nodes expected");
2897         DWORD node = numa_node_list_holder.get_node_list_entry(count % numa_node_list_holder.get_count());
2898         p_new = (char *)os::Kernel32Dll::VirtualAllocExNuma(hProc,
2899                                                             next_alloc_addr,
2900                                                             bytes_to_rq,
2901                                                             flags,
2902                                                             prot,
2903                                                             node);
2904       }
2905     }
2906 
2907     if (p_new == NULL) {
2908       // Free any allocated pages
2909       if (next_alloc_addr > p_buf) {
2910         // Some memory was committed so release it.
2911         size_t bytes_to_release = bytes - bytes_remaining;
2912         // NMT has yet to record any individual blocks, so it
2913         // need to create a dummy 'reserve' record to match
2914         // the release.
2915         MemTracker::record_virtual_memory_reserve((address)p_buf,
2916           bytes_to_release, CALLER_PC);

2917         os::release_memory(p_buf, bytes_to_release);
2918       }
2919 #ifdef ASSERT
2920       if (should_inject_error) {
2921         if (TracePageSizes && Verbose) {
2922           tty->print_cr("Reserving pages individually failed.");
2923         }
2924       }
2925 #endif
2926       return NULL;
2927     }
2928 
2929     bytes_remaining -= bytes_to_rq;
2930     next_alloc_addr += bytes_to_rq;
2931     count++;
2932   }
2933   // Although the memory is allocated individually, it is returned as one.
2934   // NMT records it as one block.
2935   address pc = CALLER_PC;
2936   MemTracker::record_virtual_memory_reserve((address)p_buf, bytes, pc);
2937   if ((flags & MEM_COMMIT) != 0) {
2938     MemTracker::record_virtual_memory_commit((address)p_buf, bytes, pc);
2939   }
2940 
2941   // made it this far, success
2942   return p_buf;
2943 }
2944 
2945 
2946 
2947 void os::large_page_init() {
2948   if (!UseLargePages) return;
2949 
2950   // print a warning if any large page related flag is specified on command line
2951   bool warn_on_failure = !FLAG_IS_DEFAULT(UseLargePages) ||
2952                          !FLAG_IS_DEFAULT(LargePageSizeInBytes);
2953   bool success = false;
2954 
2955 # define WARN(msg) if (warn_on_failure) { warning(msg); }
2956   if (resolve_functions_for_large_page_init()) {
2957     if (request_lock_memory_privilege()) {
2958       size_t s = os::Kernel32Dll::GetLargePageMinimum();
2959       if (s) {


3108     char * p_buf = allocate_pages_individually(bytes, addr, flags, prot, LargePagesIndividualAllocationInjectError);
3109     if (p_buf == NULL) {
3110       // give an appropriate warning message
3111       if (UseNUMAInterleaving) {
3112         warning("NUMA large page allocation failed, UseLargePages flag ignored");
3113       }
3114       if (UseLargePagesIndividualAllocation) {
3115         warning("Individually allocated large pages failed, "
3116                 "use -XX:-UseLargePagesIndividualAllocation to turn off");
3117       }
3118       return NULL;
3119     }
3120 
3121     return p_buf;
3122 
3123   } else {
3124     // normal policy just allocate it all at once
3125     DWORD flag = MEM_RESERVE | MEM_COMMIT | MEM_LARGE_PAGES;
3126     char * res = (char *)VirtualAlloc(NULL, bytes, flag, prot);
3127     if (res != NULL) {
3128       address pc = CALLER_PC;
3129       MemTracker::record_virtual_memory_reserve((address)res, bytes, pc);
3130       MemTracker::record_virtual_memory_commit((address)res, bytes, pc);
3131     }
3132 
3133     return res;
3134   }
3135 }
3136 
3137 bool os::release_memory_special(char* base, size_t bytes) {
3138   assert(base != NULL, "Sanity check");
3139   // Memory allocated via reserve_memory_special() is committed
3140   MemTracker::record_virtual_memory_uncommit((address)base, bytes);
3141   return release_memory(base, bytes);
3142 }
3143 
3144 void os::print_statistics() {
3145 }
3146 
3147 bool os::pd_commit_memory(char* addr, size_t bytes, bool exec) {
3148   if (bytes == 0) {
3149     // Don't bother the OS with noops.
3150     return true;
3151   }
3152   assert((size_t) addr % os::vm_page_size() == 0, "commit on page boundaries");
3153   assert(bytes % os::vm_page_size() == 0, "commit in page-sized chunks");
3154   // Don't attempt to print anything if the OS call fails. We're
3155   // probably low on resources, so the print itself may cause crashes.
3156 
3157   // unless we have NUMAInterleaving enabled, the range of a commit
3158   // is always within a reserve covered by a single VirtualAlloc
3159   // in that case we can just do a single commit for the requested size
3160   if (!UseNUMAInterleaving) {




2830   size_t page_size = UseLargePages ? _large_page_size : os::vm_allocation_granularity();
2831   size_t chunk_size = UseNUMAInterleaving ? NUMAInterleaveGranularity : page_size;
2832 
2833   // first reserve enough address space in advance since we want to be
2834   // able to break a single contiguous virtual address range into multiple
2835   // large page commits but WS2003 does not allow reserving large page space
2836   // so we just use 4K pages for reserve, this gives us a legal contiguous
2837   // address space. then we will deallocate that reservation, and re alloc
2838   // using large pages
2839   const size_t size_of_reserve = bytes + chunk_size;
2840   if (bytes > size_of_reserve) {
2841     // Overflowed.
2842     return NULL;
2843   }
2844   p_buf = (char *) VirtualAlloc(addr,
2845                                 size_of_reserve,  // size of Reserve
2846                                 MEM_RESERVE,
2847                                 PAGE_READWRITE);
2848   // If reservation failed, return NULL
2849   if (p_buf == NULL) return NULL;
2850   NMTTrackOp op(NMTTrackOp::ReserveOp);
2851   op.execute_op((address)p_buf, size_of_reserve, 0, CALLER_PC);
2852 
2853   os::release_memory(p_buf, bytes + chunk_size);
2854 
2855   // we still need to round up to a page boundary (in case we are using large pages)
2856   // but not to a chunk boundary (in case InterleavingGranularity doesn't align with page size)
2857   // instead we handle this in the bytes_to_rq computation below
2858   p_buf = (char *) align_size_up((size_t)p_buf, page_size);
2859 
2860   // now go through and allocate one chunk at a time until all bytes are
2861   // allocated
2862   size_t  bytes_remaining = bytes;
2863   // An overflow of align_size_up() would have been caught above
2864   // in the calculation of size_of_reserve.
2865   char * next_alloc_addr = p_buf;
2866   HANDLE hProc = GetCurrentProcess();
2867 
2868 #ifdef ASSERT
2869   // Variable for the failure injection
2870   long ran_num = os::random();
2871   size_t fail_after = ran_num % bytes;
2872 #endif


2897         // get the next node to use from the used_node_list
2898         assert(numa_node_list_holder.get_count() > 0, "Multiple NUMA nodes expected");
2899         DWORD node = numa_node_list_holder.get_node_list_entry(count % numa_node_list_holder.get_count());
2900         p_new = (char *)os::Kernel32Dll::VirtualAllocExNuma(hProc,
2901                                                             next_alloc_addr,
2902                                                             bytes_to_rq,
2903                                                             flags,
2904                                                             prot,
2905                                                             node);
2906       }
2907     }
2908 
2909     if (p_new == NULL) {
2910       // Free any allocated pages
2911       if (next_alloc_addr > p_buf) {
2912         // Some memory was committed so release it.
2913         size_t bytes_to_release = bytes - bytes_remaining;
2914         // NMT has yet to record any individual blocks, so it
2915         // need to create a dummy 'reserve' record to match
2916         // the release.
2917         NMTTrackOp res_op(NMTTrackOp::ReserveOp);
2918         res_op.execute_op((address)p_buf, bytes_to_release, 0, CALLER_PC);
2919 
2920         os::release_memory(p_buf, bytes_to_release);
2921       }
2922 #ifdef ASSERT
2923       if (should_inject_error) {
2924         if (TracePageSizes && Verbose) {
2925           tty->print_cr("Reserving pages individually failed.");
2926         }
2927       }
2928 #endif
2929       return NULL;
2930     }
2931 
2932     bytes_remaining -= bytes_to_rq;
2933     next_alloc_addr += bytes_to_rq;
2934     count++;
2935   }
2936   // Although the memory is allocated individually, it is returned as one.
2937   // NMT records it as one block.
2938   NMTTrackOp ind_op(((flags & MEM_COMMIT) != 0) ?
2939      NMTTrackOp::ReserveAndCommitOp : NMTTrackOp::ReserveOp);
2940   ind_op.execute_op((address)p_buf, bytes, 0, CALLER_PC);


2941 
2942   // made it this far, success
2943   return p_buf;
2944 }
2945 
2946 
2947 
2948 void os::large_page_init() {
2949   if (!UseLargePages) return;
2950 
2951   // print a warning if any large page related flag is specified on command line
2952   bool warn_on_failure = !FLAG_IS_DEFAULT(UseLargePages) ||
2953                          !FLAG_IS_DEFAULT(LargePageSizeInBytes);
2954   bool success = false;
2955 
2956 # define WARN(msg) if (warn_on_failure) { warning(msg); }
2957   if (resolve_functions_for_large_page_init()) {
2958     if (request_lock_memory_privilege()) {
2959       size_t s = os::Kernel32Dll::GetLargePageMinimum();
2960       if (s) {


3109     char * p_buf = allocate_pages_individually(bytes, addr, flags, prot, LargePagesIndividualAllocationInjectError);
3110     if (p_buf == NULL) {
3111       // give an appropriate warning message
3112       if (UseNUMAInterleaving) {
3113         warning("NUMA large page allocation failed, UseLargePages flag ignored");
3114       }
3115       if (UseLargePagesIndividualAllocation) {
3116         warning("Individually allocated large pages failed, "
3117                 "use -XX:-UseLargePagesIndividualAllocation to turn off");
3118       }
3119       return NULL;
3120     }
3121 
3122     return p_buf;
3123 
3124   } else {
3125     // normal policy just allocate it all at once
3126     DWORD flag = MEM_RESERVE | MEM_COMMIT | MEM_LARGE_PAGES;
3127     char * res = (char *)VirtualAlloc(NULL, bytes, flag, prot);
3128     if (res != NULL) {
3129       NMTTrackOp op(NMTTrackOp::ReserveAndCommitOp);
3130       op.execute_op((address)res, bytes, 0, CALLER_PC);

3131     }
3132 
3133     return res;
3134   }
3135 }
3136 
3137 bool os::release_memory_special(char* base, size_t bytes) {
3138   assert(base != NULL, "Sanity check");


3139   return release_memory(base, bytes);
3140 }
3141 
3142 void os::print_statistics() {
3143 }
3144 
3145 bool os::pd_commit_memory(char* addr, size_t bytes, bool exec) {
3146   if (bytes == 0) {
3147     // Don't bother the OS with noops.
3148     return true;
3149   }
3150   assert((size_t) addr % os::vm_page_size() == 0, "commit on page boundaries");
3151   assert(bytes % os::vm_page_size() == 0, "commit in page-sized chunks");
3152   // Don't attempt to print anything if the OS call fails. We're
3153   // probably low on resources, so the print itself may cause crashes.
3154 
3155   // unless we have NUMAInterleaving enabled, the range of a commit
3156   // is always within a reserve covered by a single VirtualAlloc
3157   // in that case we can just do a single commit for the requested size
3158   if (!UseNUMAInterleaving) {


src/os/windows/vm/os_windows.cpp
Index Unified diffs Context diffs Sdiffs Wdiffs Patch New Old Previous File Next File