< prev index next >

src/os/windows/vm/os_windows.cpp

Print this page




3096 #endif
3097     } else {
3098       WARN("Large page is not supported by the processor.");
3099     }
3100   } else {
3101     WARN("JVM cannot use large page memory because it does not have enough privilege to lock pages in memory.");
3102   }
3103 #undef WARN
3104 
3105   const size_t default_page_size = (size_t) vm_page_size();
3106   if (success && _large_page_size > default_page_size) {
3107     _page_sizes[0] = _large_page_size;
3108     _page_sizes[1] = default_page_size;
3109     _page_sizes[2] = 0;
3110   }
3111 
3112   cleanup_after_large_page_init();
3113   UseLargePages = success;
3114 }
3115 



























































3116 // On win32, one cannot release just a part of reserved memory, it's an
3117 // all or nothing deal.  When we split a reservation, we must break the
3118 // reservation into two reservations.
3119 void os::pd_split_reserved_memory(char *base, size_t size, size_t split,
3120                                   bool realloc) {
3121   if (size > 0) {
3122     release_memory(base, size);
3123     if (realloc) {
3124       reserve_memory(split, base);
3125     }
3126     if (size != split) {
3127       reserve_memory(size - split, base + split);
3128     }
3129   }
3130 }
3131 
















3132 // Multiple threads can race in this code but it's not possible to unmap small sections of
3133 // virtual space to get requested alignment, like posix-like os's.
3134 // Windows prevents multiple thread from remapping over each other so this loop is thread-safe.
3135 char* os::reserve_memory_aligned(size_t size, size_t alignment) {
3136   assert((alignment & (os::vm_allocation_granularity() - 1)) == 0,
3137          "Alignment must be a multiple of allocation granularity (page size)");
3138   assert((size & (alignment -1)) == 0, "size must be 'alignment' aligned");
3139 
3140   size_t extra_size = size + alignment;
3141   assert(extra_size >= size, "overflow, size is too large to allow alignment");
3142 
3143   char* aligned_base = NULL;
3144 
3145   do {
3146     char* extra_base = os::reserve_memory(extra_size, NULL, alignment);
3147     if (extra_base == NULL) {
3148       return NULL;
3149     }
3150     // Do manual alignment
3151     aligned_base = (char*) align_size_up((uintptr_t) extra_base, alignment);
3152 




3153     os::release_memory(extra_base, extra_size);

3154 
3155     aligned_base = os::reserve_memory(size, aligned_base);
3156 
3157   } while (aligned_base == NULL);
3158 
3159   return aligned_base;
3160 }
3161 
3162 char* os::pd_reserve_memory(size_t bytes, char* addr, size_t alignment_hint) {
3163   assert((size_t)addr % os::vm_allocation_granularity() == 0,
3164          "reserve alignment");
3165   assert(bytes % os::vm_page_size() == 0, "reserve page size");
3166   char* res;
3167   // note that if UseLargePages is on, all the areas that require interleaving
3168   // will go thru reserve_memory_special rather than thru here.
3169   bool use_individual = (UseNUMAInterleaving && !UseLargePages);
3170   if (!use_individual) {
3171     res = (char*)VirtualAlloc(addr, bytes, MEM_RESERVE, PAGE_READWRITE);
3172   } else {
3173     elapsedTimer reserveTimer;
3174     if (Verbose && PrintMiscellaneous) reserveTimer.start();
3175     // in numa interleaving, we have to allocate pages individually


3178     if (res == NULL) {
3179       warning("NUMA page allocation failed");
3180     }
3181     if (Verbose && PrintMiscellaneous) {
3182       reserveTimer.stop();
3183       tty->print_cr("reserve_memory of %Ix bytes took " JLONG_FORMAT " ms (" JLONG_FORMAT " ticks)", bytes,
3184                     reserveTimer.milliseconds(), reserveTimer.ticks());
3185     }
3186   }
3187   assert(res == NULL || addr == NULL || addr == res,
3188          "Unexpected address from reserve.");
3189 
3190   return res;
3191 }
3192 
3193 // Reserve memory at an arbitrary address, only if that area is
3194 // available (and not reserved for something else).
3195 char* os::pd_attempt_reserve_memory_at(size_t bytes, char* requested_addr) {
3196   // Windows os::reserve_memory() fails of the requested address range is
3197   // not avilable.
3198   return reserve_memory(bytes, requested_addr);
3199 }
3200 
3201 size_t os::large_page_size() {
3202   return _large_page_size;
3203 }
3204 
3205 bool os::can_commit_large_page_memory() {
3206   // Windows only uses large page memory when the entire region is reserved
3207   // and committed in a single VirtualAlloc() call. This may change in the
3208   // future, but with Windows 2003 it's not possible to commit on demand.
3209   return false;
3210 }
3211 
3212 bool os::can_execute_large_page_memory() {
3213   return true;
3214 }
3215 
3216 char* os::reserve_memory_special(size_t bytes, size_t alignment, char* addr,
3217                                  bool exec) {
3218   assert(UseLargePages, "only for large pages");


4890     DWORD old_protect;
4891     DWORD exec_access = read_only ? PAGE_EXECUTE_READ : PAGE_EXECUTE_READWRITE;
4892     bool res = VirtualProtect(base, bytes, exec_access, &old_protect) != 0;
4893 
4894     if (!res) {
4895       log_info(os)("VirtualProtect() failed: GetLastError->%ld.", GetLastError());
4896       // Don't consider this a hard error, on IA32 even if the
4897       // VirtualProtect fails, we should still be able to execute
4898       CloseHandle(hFile);
4899       return base;
4900     }
4901   }
4902 
4903   if (CloseHandle(hFile) == 0) {
4904     log_info(os)("CloseHandle(hFile) failed: GetLastError->%ld.", GetLastError());
4905     return base;
4906   }
4907 
4908   return base;
4909 }
4910 
4911 
4912 // Remap a block of memory.
4913 char* os::pd_remap_memory(int fd, const char* file_name, size_t file_offset,
4914                           char *addr, size_t bytes, bool read_only,
4915                           bool allow_exec) {
4916   // This OS does not allow existing memory maps to be remapped so we
4917   // have to unmap the memory before we remap it.
4918   if (!os::unmap_memory(addr, bytes)) {
4919     return NULL;
4920   }
4921 
4922   // There is a very small theoretical window between the unmap_memory()
4923   // call above and the map_memory() call below where a thread in native
4924   // code may be able to access an address that is no longer mapped.
4925 
4926   return os::map_memory(fd, file_name, file_offset, addr, bytes,
4927                         read_only, allow_exec);
4928 }
4929 
4930 




3096 #endif
3097     } else {
3098       WARN("Large page is not supported by the processor.");
3099     }
3100   } else {
3101     WARN("JVM cannot use large page memory because it does not have enough privilege to lock pages in memory.");
3102   }
3103 #undef WARN
3104 
3105   const size_t default_page_size = (size_t) vm_page_size();
3106   if (success && _large_page_size > default_page_size) {
3107     _page_sizes[0] = _large_page_size;
3108     _page_sizes[1] = default_page_size;
3109     _page_sizes[2] = 0;
3110   }
3111 
3112   cleanup_after_large_page_init();
3113   UseLargePages = success;
3114 }
3115 
3116 // Helper function to create a temp file in the given directory
3117 int os::create_file_for_heap(const char* dir, size_t size) {
3118 
3119   const char name_template[] = "/jvmheap.XXXXXX";
3120 
3121   char *fullname = (char*)_alloca(strlen(dir) + sizeof(name_template));
3122   (void)strcpy(fullname, dir);
3123   (void)strcat(fullname, name_template);
3124   os::native_path(fullname);
3125 
3126   char *path = _mktemp(fullname);
3127   if (path == NULL) {
3128     return -1;
3129   }
3130 
3131   int fd = _open(path, O_RDWR | O_CREAT | O_TEMPORARY | O_EXCL, S_IWRITE | S_IREAD);
3132 
3133   if (fd < 0) {
3134     warning("Failure to create file %s for heap", path);
3135     return -1;
3136   }
3137 
3138   return fd;
3139 }
3140 
3141 // if 'base' is not NULL, function will return NULL if it cannot get 'base'
3142 //
3143 char* os::map_memory_to_dax_file(char* base, size_t size, int fd) {
3144   assert(fd != -1, "File descriptor is not valid");
3145 
3146   HANDLE fh = (HANDLE)_get_osfhandle(fd);
3147   HANDLE fileMapping = CreateFileMapping(fh, NULL, PAGE_READWRITE,
3148                                          (DWORD)(size >> 32), (DWORD)(size & 0xFFFFFFFF), NULL);
3149   if (fileMapping == NULL) {
3150     if (GetLastError() == ERROR_DISK_FULL) {
3151       vm_exit_during_initialization(err_msg("Could not allocate sufficient disk space for heap"));
3152     } else {
3153       vm_exit_during_initialization(err_msg("Error in mapping Java heap at the given filesystem directory"));
3154     }
3155 
3156     return NULL;
3157   }
3158 
3159   LPVOID addr = MapViewOfFileEx(fileMapping, FILE_MAP_WRITE, 0, 0, size, base);
3160 
3161   CloseHandle(fileMapping);
3162 
3163   return (char*)addr;
3164 }
3165 
3166 char* os::replace_existing_mapping_with_dax_file_mapping(char* base, size_t size, int fd) {
3167   assert(fd != -1, "File descriptor is not valid");
3168   assert(base != NULL, "base cannot be NULL");
3169 
3170   release_memory(base, size);
3171   return map_memory_to_dax_file(base, size, fd);
3172 
3173 }
3174 
3175 // On win32, one cannot release just a part of reserved memory, it's an
3176 // all or nothing deal.  When we split a reservation, we must break the
3177 // reservation into two reservations.
3178 void os::pd_split_reserved_memory(char *base, size_t size, size_t split,
3179                                   bool realloc) {
3180   if (size > 0) {
3181     release_memory(base, size);
3182     if (realloc) {
3183       reserve_memory(split, base);
3184     }
3185     if (size != split) {
3186       reserve_memory(size - split, base + split);
3187     }
3188   }
3189 }
3190 
3191 char* os::attempt_reserve_memory_at(size_t bytes, char* addr, int file_desc) {
3192   char* result = NULL;
3193   if (file_desc != -1) {
3194     result = map_memory_to_dax_file(addr, bytes, file_desc);
3195     if (result != NULL) {
3196       MemTracker::record_virtual_memory_reserve_and_commit((address)result, bytes, CALLER_PC);
3197     }
3198   } else {
3199     result = pd_attempt_reserve_memory_at(bytes, addr);
3200     if (result != NULL) {
3201       MemTracker::record_virtual_memory_reserve((address)result, bytes, CALLER_PC);
3202     }
3203   }
3204   return result;
3205 }
3206 
3207 // Multiple threads can race in this code but it's not possible to unmap small sections of
3208 // virtual space to get requested alignment, like posix-like os's.
3209 // Windows prevents multiple thread from remapping over each other so this loop is thread-safe.
3210 char* os::reserve_memory_aligned(size_t size, size_t alignment, int file_desc) {
3211   assert((alignment & (os::vm_allocation_granularity() - 1)) == 0,
3212          "Alignment must be a multiple of allocation granularity (page size)");
3213   assert((size & (alignment -1)) == 0, "size must be 'alignment' aligned");
3214 
3215   size_t extra_size = size + alignment;
3216   assert(extra_size >= size, "overflow, size is too large to allow alignment");
3217 
3218   char* aligned_base = NULL;
3219 
3220   do {
3221     char* extra_base = os::reserve_memory(extra_size, NULL, alignment, file_desc);
3222     if (extra_base == NULL) {
3223       return NULL;
3224     }
3225     // Do manual alignment
3226     aligned_base = (char*) align_size_up((uintptr_t) extra_base, alignment);
3227 
3228     if (file_desc != -1) {
3229       os::unmap_memory(extra_base, extra_size);
3230     }
3231     else {
3232       os::release_memory(extra_base, extra_size);
3233     }
3234 
3235     aligned_base = os::reserve_memory(size, aligned_base, 0, file_desc);
3236 
3237   } while (aligned_base == NULL);
3238 
3239   return aligned_base;
3240 }
3241 
3242 char* os::pd_reserve_memory(size_t bytes, char* addr, size_t alignment_hint) {
3243   assert((size_t)addr % os::vm_allocation_granularity() == 0,
3244          "reserve alignment");
3245   assert(bytes % os::vm_page_size() == 0, "reserve page size");
3246   char* res;
3247   // note that if UseLargePages is on, all the areas that require interleaving
3248   // will go thru reserve_memory_special rather than thru here.
3249   bool use_individual = (UseNUMAInterleaving && !UseLargePages);
3250   if (!use_individual) {
3251     res = (char*)VirtualAlloc(addr, bytes, MEM_RESERVE, PAGE_READWRITE);
3252   } else {
3253     elapsedTimer reserveTimer;
3254     if (Verbose && PrintMiscellaneous) reserveTimer.start();
3255     // in numa interleaving, we have to allocate pages individually


3258     if (res == NULL) {
3259       warning("NUMA page allocation failed");
3260     }
3261     if (Verbose && PrintMiscellaneous) {
3262       reserveTimer.stop();
3263       tty->print_cr("reserve_memory of %Ix bytes took " JLONG_FORMAT " ms (" JLONG_FORMAT " ticks)", bytes,
3264                     reserveTimer.milliseconds(), reserveTimer.ticks());
3265     }
3266   }
3267   assert(res == NULL || addr == NULL || addr == res,
3268          "Unexpected address from reserve.");
3269 
3270   return res;
3271 }
3272 
3273 // Reserve memory at an arbitrary address, only if that area is
3274 // available (and not reserved for something else).
3275 char* os::pd_attempt_reserve_memory_at(size_t bytes, char* requested_addr) {
3276   // Windows os::reserve_memory() fails of the requested address range is
3277   // not avilable.
3278   return reserve_memory(bytes, requested_addr, 0);
3279 }
3280 
3281 size_t os::large_page_size() {
3282   return _large_page_size;
3283 }
3284 
3285 bool os::can_commit_large_page_memory() {
3286   // Windows only uses large page memory when the entire region is reserved
3287   // and committed in a single VirtualAlloc() call. This may change in the
3288   // future, but with Windows 2003 it's not possible to commit on demand.
3289   return false;
3290 }
3291 
3292 bool os::can_execute_large_page_memory() {
3293   return true;
3294 }
3295 
3296 char* os::reserve_memory_special(size_t bytes, size_t alignment, char* addr,
3297                                  bool exec) {
3298   assert(UseLargePages, "only for large pages");


4970     DWORD old_protect;
4971     DWORD exec_access = read_only ? PAGE_EXECUTE_READ : PAGE_EXECUTE_READWRITE;
4972     bool res = VirtualProtect(base, bytes, exec_access, &old_protect) != 0;
4973 
4974     if (!res) {
4975       log_info(os)("VirtualProtect() failed: GetLastError->%ld.", GetLastError());
4976       // Don't consider this a hard error, on IA32 even if the
4977       // VirtualProtect fails, we should still be able to execute
4978       CloseHandle(hFile);
4979       return base;
4980     }
4981   }
4982 
4983   if (CloseHandle(hFile) == 0) {
4984     log_info(os)("CloseHandle(hFile) failed: GetLastError->%ld.", GetLastError());
4985     return base;
4986   }
4987 
4988   return base;
4989 }

4990 
4991 // Remap a block of memory.
4992 char* os::pd_remap_memory(int fd, const char* file_name, size_t file_offset,
4993                           char *addr, size_t bytes, bool read_only,
4994                           bool allow_exec) {
4995   // This OS does not allow existing memory maps to be remapped so we
4996   // have to unmap the memory before we remap it.
4997   if (!os::unmap_memory(addr, bytes)) {
4998     return NULL;
4999   }
5000 
5001   // There is a very small theoretical window between the unmap_memory()
5002   // call above and the map_memory() call below where a thread in native
5003   // code may be able to access an address that is no longer mapped.
5004 
5005   return os::map_memory(fd, file_name, file_offset, addr, bytes,
5006                         read_only, allow_exec);
5007 }
5008 
5009 


< prev index next >