< prev index next >

src/os/windows/vm/os_windows.cpp

Print this page




3096 #endif
3097     } else {
3098       WARN("Large page is not supported by the processor.");
3099     }
3100   } else {
3101     WARN("JVM cannot use large page memory because it does not have enough privilege to lock pages in memory.");
3102   }
3103 #undef WARN
3104 
3105   const size_t default_page_size = (size_t) vm_page_size();
3106   if (success && _large_page_size > default_page_size) {
3107     _page_sizes[0] = _large_page_size;
3108     _page_sizes[1] = default_page_size;
3109     _page_sizes[2] = 0;
3110   }
3111 
3112   cleanup_after_large_page_init();
3113   UseLargePages = success;
3114 }
3115 





























































3116 // On win32, one cannot release just a part of reserved memory, it's an
3117 // all or nothing deal.  When we split a reservation, we must break the
3118 // reservation into two reservations.
3119 void os::pd_split_reserved_memory(char *base, size_t size, size_t split,
3120                                   bool realloc) {
3121   if (size > 0) {
3122     release_memory(base, size);
3123     if (realloc) {
3124       reserve_memory(split, base);
3125     }
3126     if (size != split) {
3127       reserve_memory(size - split, base + split);
3128     }
3129   }
3130 }
3131 
3132 // Multiple threads can race in this code but it's not possible to unmap small sections of
3133 // virtual space to get requested alignment, like posix-like os's.
3134 // Windows prevents multiple thread from remapping over each other so this loop is thread-safe.
3135 char* os::reserve_memory_aligned(size_t size, size_t alignment) {
3136   assert((alignment & (os::vm_allocation_granularity() - 1)) == 0,
3137          "Alignment must be a multiple of allocation granularity (page size)");
3138   assert((size & (alignment -1)) == 0, "size must be 'alignment' aligned");
3139 
3140   size_t extra_size = size + alignment;
3141   assert(extra_size >= size, "overflow, size is too large to allow alignment");
3142 
3143   char* aligned_base = NULL;
3144 
3145   do {
3146     char* extra_base = os::reserve_memory(extra_size, NULL, alignment);
3147     if (extra_base == NULL) {
3148       return NULL;
3149     }
3150     // Do manual alignment
3151     aligned_base = (char*) align_size_up((uintptr_t) extra_base, alignment);
3152 




3153     os::release_memory(extra_base, extra_size);

3154 
3155     aligned_base = os::reserve_memory(size, aligned_base);
3156 
3157   } while (aligned_base == NULL);
3158 
3159   return aligned_base;
3160 }
3161 
3162 char* os::pd_reserve_memory(size_t bytes, char* addr, size_t alignment_hint) {
3163   assert((size_t)addr % os::vm_allocation_granularity() == 0,
3164          "reserve alignment");
3165   assert(bytes % os::vm_page_size() == 0, "reserve page size");
3166   char* res;
3167   // note that if UseLargePages is on, all the areas that require interleaving
3168   // will go thru reserve_memory_special rather than thru here.
3169   bool use_individual = (UseNUMAInterleaving && !UseLargePages);

3170   if (!use_individual) {



3171     res = (char*)VirtualAlloc(addr, bytes, MEM_RESERVE, PAGE_READWRITE);

3172   } else {
3173     elapsedTimer reserveTimer;
3174     if (Verbose && PrintMiscellaneous) reserveTimer.start();
3175     // in numa interleaving, we have to allocate pages individually
3176     // (well really chunks of NUMAInterleaveGranularity size)
3177     res = allocate_pages_individually(bytes, addr, MEM_RESERVE, PAGE_READWRITE);
3178     if (res == NULL) {
3179       warning("NUMA page allocation failed");
3180     }
3181     if (Verbose && PrintMiscellaneous) {
3182       reserveTimer.stop();
3183       tty->print_cr("reserve_memory of %Ix bytes took " JLONG_FORMAT " ms (" JLONG_FORMAT " ticks)", bytes,
3184                     reserveTimer.milliseconds(), reserveTimer.ticks());
3185     }
3186   }
3187   assert(res == NULL || addr == NULL || addr == res,
3188          "Unexpected address from reserve.");
3189 
3190   return res;
3191 }
3192 
3193 // Reserve memory at an arbitrary address, only if that area is
3194 // available (and not reserved for something else).
3195 char* os::pd_attempt_reserve_memory_at(size_t bytes, char* requested_addr) {
3196   // Windows os::reserve_memory() fails of the requested address range is
3197   // not avilable.
3198   return reserve_memory(bytes, requested_addr);
3199 }
3200 
3201 size_t os::large_page_size() {
3202   return _large_page_size;
3203 }
3204 
3205 bool os::can_commit_large_page_memory() {
3206   // Windows only uses large page memory when the entire region is reserved
3207   // and committed in a single VirtualAlloc() call. This may change in the
3208   // future, but with Windows 2003 it's not possible to commit on demand.
3209   return false;
3210 }
3211 
3212 bool os::can_execute_large_page_memory() {
3213   return true;
3214 }
3215 
3216 char* os::reserve_memory_special(size_t bytes, size_t alignment, char* addr,
3217                                  bool exec) {
3218   assert(UseLargePages, "only for large pages");


4890     DWORD old_protect;
4891     DWORD exec_access = read_only ? PAGE_EXECUTE_READ : PAGE_EXECUTE_READWRITE;
4892     bool res = VirtualProtect(base, bytes, exec_access, &old_protect) != 0;
4893 
4894     if (!res) {
4895       log_info(os)("VirtualProtect() failed: GetLastError->%ld.", GetLastError());
4896       // Don't consider this a hard error, on IA32 even if the
4897       // VirtualProtect fails, we should still be able to execute
4898       CloseHandle(hFile);
4899       return base;
4900     }
4901   }
4902 
4903   if (CloseHandle(hFile) == 0) {
4904     log_info(os)("CloseHandle(hFile) failed: GetLastError->%ld.", GetLastError());
4905     return base;
4906   }
4907 
4908   return base;
4909 }
4910 
4911 
4912 // Remap a block of memory.
4913 char* os::pd_remap_memory(int fd, const char* file_name, size_t file_offset,
4914                           char *addr, size_t bytes, bool read_only,
4915                           bool allow_exec) {
4916   // This OS does not allow existing memory maps to be remapped so we
4917   // have to unmap the memory before we remap it.
4918   if (!os::unmap_memory(addr, bytes)) {
4919     return NULL;
4920   }
4921 
4922   // There is a very small theoretical window between the unmap_memory()
4923   // call above and the map_memory() call below where a thread in native
4924   // code may be able to access an address that is no longer mapped.
4925 
4926   return os::map_memory(fd, file_name, file_offset, addr, bytes,
4927                         read_only, allow_exec);
4928 }
4929 
4930 




3096 #endif
3097     } else {
3098       WARN("Large page is not supported by the processor.");
3099     }
3100   } else {
3101     WARN("JVM cannot use large page memory because it does not have enough privilege to lock pages in memory.");
3102   }
3103 #undef WARN
3104 
3105   const size_t default_page_size = (size_t) vm_page_size();
3106   if (success && _large_page_size > default_page_size) {
3107     _page_sizes[0] = _large_page_size;
3108     _page_sizes[1] = default_page_size;
3109     _page_sizes[2] = 0;
3110   }
3111 
3112   cleanup_after_large_page_init();
3113   UseLargePages = success;
3114 }
3115 
3116 // Helper function to create a temp file in the given directory
3117 int os::create_file_for_heap(const char* dir, size_t size) {
3118 
3119   const char name_template[] = "/jvmheap.XXXXXX";
3120 
3121   char *fullname = (char*)alloca(strlen(dir) + sizeof(name_template));
3122   (void)strcpy(fullname, dir);
3123   (void)strcat(fullname, name_template);
3124   os::native_path(fullname);
3125 
3126   char *path = _mktemp(fullname);
3127   if (path == NULL) {
3128     return -1;
3129   }
3130 
3131   int fd = _open(path, O_RDWR | O_CREAT | O_EXCL, S_IWRITE | S_IREAD);
3132 
3133   if (fd < 0) {
3134     warning("Failure to create file %s for heap", path);
3135     return -1;
3136   }
3137 
3138   // Delete the name from the filesystem. When 'fd' is closed, the file (and space) will be deleted
3139   _unlink(fullname);
3140 
3141   return fd;
3142 }
3143 
3144 // if 'base' is not NULL, function will return NULL if it cannot get 'base'
3145 //
3146 char* os::map_memory_to_file(char* base, size_t size, int fd) {
3147   assert(fd != -1, "File descriptor is not valid");
3148 
3149   HANDLE fh = (HANDLE)_get_osfhandle(fd);
3150   HANDLE fileMapping = CreateFileMapping(fh, NULL, PAGE_READWRITE,
3151                                          (DWORD)(size >> 32), (DWORD)(size & 0xFFFFFFFF), NULL);
3152   if (fileMapping == NULL) {
3153     if (GetLastError() == ERROR_DISK_FULL) {
3154       vm_exit_during_initialization(err_msg("Could not allocate sufficient disk space for heap"));
3155     } else {
3156       vm_exit_during_initialization(err_msg("Error in mapping Java heap at the given filesystem directory"));
3157     }
3158 
3159     return NULL;
3160   }
3161 
3162   LPVOID addr = MapViewOfFileEx(fileMapping, FILE_MAP_WRITE, 0, 0, size, base);
3163 
3164   CloseHandle(fileMapping);
3165   if (addr == NULL || (base != NULL && addr != base)) {
3166     if (addr != NULL) {
3167       if(!pd_unmap_memory((char*)addr, size)) {
3168         warning("Could not release memory on unsuccessful file mapping");
3169       }
3170     }
3171     return NULL;
3172   }
3173 
3174   return (char*)addr;
3175 }
3176 
3177 // On win32, one cannot release just a part of reserved memory, it's an
3178 // all or nothing deal.  When we split a reservation, we must break the
3179 // reservation into two reservations.
3180 void os::pd_split_reserved_memory(char *base, size_t size, size_t split,
3181                                   bool realloc) {
3182   if (size > 0) {
3183     release_memory(base, size);
3184     if (realloc) {
3185       reserve_memory(split, base);
3186     }
3187     if (size != split) {
3188       reserve_memory(size - split, base + split);
3189     }
3190   }
3191 }
3192 
3193 // Multiple threads can race in this code but it's not possible to unmap small sections of
3194 // virtual space to get requested alignment, like posix-like os's.
3195 // Windows prevents multiple thread from remapping over each other so this loop is thread-safe.
3196 char* os::reserve_memory_aligned(size_t size, size_t alignment, int file_desc) {
3197   assert((alignment & (os::vm_allocation_granularity() - 1)) == 0,
3198          "Alignment must be a multiple of allocation granularity (page size)");
3199   assert((size & (alignment -1)) == 0, "size must be 'alignment' aligned");
3200 
3201   size_t extra_size = size + alignment;
3202   assert(extra_size >= size, "overflow, size is too large to allow alignment");
3203 
3204   char* aligned_base = NULL;
3205 
3206   do {
3207     char* extra_base = os::reserve_memory(extra_size, NULL, alignment, file_desc);
3208     if (extra_base == NULL) {
3209       return NULL;
3210     }
3211     // Do manual alignment
3212     aligned_base = (char*) align_size_up((uintptr_t) extra_base, alignment);
3213 
3214     if (file_desc != -1) {
3215       os::unmap_memory(extra_base, extra_size);
3216     }
3217     else {
3218       os::release_memory(extra_base, extra_size);
3219     }
3220 
3221     aligned_base = os::reserve_memory(size, aligned_base, 0, file_desc);
3222 
3223   } while (aligned_base == NULL);
3224 
3225   return aligned_base;
3226 }
3227 
3228 char* os::pd_reserve_memory(size_t bytes, char* addr, size_t alignment_hint, int file_desc) {
3229   assert((size_t)addr % os::vm_allocation_granularity() == 0,
3230          "reserve alignment");
3231   assert(bytes % os::vm_page_size() == 0, "reserve page size");
3232   char* res;
3233   // note that if UseLargePages is on, all the areas that require interleaving
3234   // will go thru reserve_memory_special rather than thru here.
3235   bool use_individual = (UseNUMAInterleaving && !UseLargePages);
3236   assert(file_desc == -1 || !use_individual,  "NUMA allocation not supported when HeapDir is used");
3237   if (!use_individual) {
3238     if (file_desc != -1) {
3239       res = map_memory_to_file(addr, bytes, file_desc);
3240     } else {
3241       res = (char*)VirtualAlloc(addr, bytes, MEM_RESERVE, PAGE_READWRITE);
3242     }
3243   } else {
3244     elapsedTimer reserveTimer;
3245     if (Verbose && PrintMiscellaneous) reserveTimer.start();
3246     // in numa interleaving, we have to allocate pages individually
3247     // (well really chunks of NUMAInterleaveGranularity size)
3248     res = allocate_pages_individually(bytes, addr, MEM_RESERVE, PAGE_READWRITE);
3249     if (res == NULL) {
3250       warning("NUMA page allocation failed");
3251     }
3252     if (Verbose && PrintMiscellaneous) {
3253       reserveTimer.stop();
3254       tty->print_cr("reserve_memory of %Ix bytes took " JLONG_FORMAT " ms (" JLONG_FORMAT " ticks)", bytes,
3255                     reserveTimer.milliseconds(), reserveTimer.ticks());
3256     }
3257   }
3258   assert(res == NULL || addr == NULL || addr == res,
3259          "Unexpected address from reserve.");
3260 
3261   return res;
3262 }
3263 
3264 // Reserve memory at an arbitrary address, only if that area is
3265 // available (and not reserved for something else).
3266 char* os::pd_attempt_reserve_memory_at(size_t bytes, char* requested_addr, int file_desc) {
3267   // Windows os::reserve_memory() fails of the requested address range is
3268   // not avilable.
3269   return reserve_memory(bytes, requested_addr, 0, file_desc);
3270 }
3271 
3272 size_t os::large_page_size() {
3273   return _large_page_size;
3274 }
3275 
3276 bool os::can_commit_large_page_memory() {
3277   // Windows only uses large page memory when the entire region is reserved
3278   // and committed in a single VirtualAlloc() call. This may change in the
3279   // future, but with Windows 2003 it's not possible to commit on demand.
3280   return false;
3281 }
3282 
3283 bool os::can_execute_large_page_memory() {
3284   return true;
3285 }
3286 
3287 char* os::reserve_memory_special(size_t bytes, size_t alignment, char* addr,
3288                                  bool exec) {
3289   assert(UseLargePages, "only for large pages");


4961     DWORD old_protect;
4962     DWORD exec_access = read_only ? PAGE_EXECUTE_READ : PAGE_EXECUTE_READWRITE;
4963     bool res = VirtualProtect(base, bytes, exec_access, &old_protect) != 0;
4964 
4965     if (!res) {
4966       log_info(os)("VirtualProtect() failed: GetLastError->%ld.", GetLastError());
4967       // Don't consider this a hard error, on IA32 even if the
4968       // VirtualProtect fails, we should still be able to execute
4969       CloseHandle(hFile);
4970       return base;
4971     }
4972   }
4973 
4974   if (CloseHandle(hFile) == 0) {
4975     log_info(os)("CloseHandle(hFile) failed: GetLastError->%ld.", GetLastError());
4976     return base;
4977   }
4978 
4979   return base;
4980 }

4981 
4982 // Remap a block of memory.
4983 char* os::pd_remap_memory(int fd, const char* file_name, size_t file_offset,
4984                           char *addr, size_t bytes, bool read_only,
4985                           bool allow_exec) {
4986   // This OS does not allow existing memory maps to be remapped so we
4987   // have to unmap the memory before we remap it.
4988   if (!os::unmap_memory(addr, bytes)) {
4989     return NULL;
4990   }
4991 
4992   // There is a very small theoretical window between the unmap_memory()
4993   // call above and the map_memory() call below where a thread in native
4994   // code may be able to access an address that is no longer mapped.
4995 
4996   return os::map_memory(fd, file_name, file_offset, addr, bytes,
4997                         read_only, allow_exec);
4998 }
4999 
5000 


< prev index next >