3086 #endif
3087 } else {
3088 WARN("Large page is not supported by the processor.");
3089 }
3090 } else {
3091 WARN("JVM cannot use large page memory because it does not have enough privilege to lock pages in memory.");
3092 }
3093 #undef WARN
3094
3095 const size_t default_page_size = (size_t) vm_page_size();
3096 if (success && _large_page_size > default_page_size) {
3097 _page_sizes[0] = _large_page_size;
3098 _page_sizes[1] = default_page_size;
3099 _page_sizes[2] = 0;
3100 }
3101
3102 cleanup_after_large_page_init();
3103 UseLargePages = success;
3104 }
3105
3106 // On win32, one cannot release just a part of reserved memory, it's an
3107 // all or nothing deal. When we split a reservation, we must break the
3108 // reservation into two reservations.
3109 void os::pd_split_reserved_memory(char *base, size_t size, size_t split,
3110 bool realloc) {
3111 if (size > 0) {
3112 release_memory(base, size);
3113 if (realloc) {
3114 reserve_memory(split, base);
3115 }
3116 if (size != split) {
3117 reserve_memory(size - split, base + split);
3118 }
3119 }
3120 }
3121
3122 // Multiple threads can race in this code but it's not possible to unmap small sections of
3123 // virtual space to get requested alignment, like posix-like os's.
3124 // Windows prevents multiple thread from remapping over each other so this loop is thread-safe.
3125 char* os::reserve_memory_aligned(size_t size, size_t alignment) {
3126 assert((alignment & (os::vm_allocation_granularity() - 1)) == 0,
3127 "Alignment must be a multiple of allocation granularity (page size)");
3128 assert((size & (alignment -1)) == 0, "size must be 'alignment' aligned");
3129
3130 size_t extra_size = size + alignment;
3131 assert(extra_size >= size, "overflow, size is too large to allow alignment");
3132
3133 char* aligned_base = NULL;
3134
3135 do {
3136 char* extra_base = os::reserve_memory(extra_size, NULL, alignment);
3137 if (extra_base == NULL) {
3138 return NULL;
3139 }
3140 // Do manual alignment
3141 aligned_base = (char*) align_size_up((uintptr_t) extra_base, alignment);
3142
3143 os::release_memory(extra_base, extra_size);
3144
3145 aligned_base = os::reserve_memory(size, aligned_base);
3146
3147 } while (aligned_base == NULL);
3148
3149 return aligned_base;
3150 }
3151
3152 char* os::pd_reserve_memory(size_t bytes, char* addr, size_t alignment_hint) {
3153 assert((size_t)addr % os::vm_allocation_granularity() == 0,
3154 "reserve alignment");
3155 assert(bytes % os::vm_page_size() == 0, "reserve page size");
3156 char* res;
3157 // note that if UseLargePages is on, all the areas that require interleaving
3158 // will go thru reserve_memory_special rather than thru here.
3159 bool use_individual = (UseNUMAInterleaving && !UseLargePages);
3160 if (!use_individual) {
3161 res = (char*)VirtualAlloc(addr, bytes, MEM_RESERVE, PAGE_READWRITE);
3162 } else {
3163 elapsedTimer reserveTimer;
3164 if (Verbose && PrintMiscellaneous) reserveTimer.start();
3165 // in numa interleaving, we have to allocate pages individually
3166 // (well really chunks of NUMAInterleaveGranularity size)
3167 res = allocate_pages_individually(bytes, addr, MEM_RESERVE, PAGE_READWRITE);
3168 if (res == NULL) {
3169 warning("NUMA page allocation failed");
3170 }
3171 if (Verbose && PrintMiscellaneous) {
3172 reserveTimer.stop();
3173 tty->print_cr("reserve_memory of %Ix bytes took " JLONG_FORMAT " ms (" JLONG_FORMAT " ticks)", bytes,
3174 reserveTimer.milliseconds(), reserveTimer.ticks());
3175 }
3176 }
3177 assert(res == NULL || addr == NULL || addr == res,
3178 "Unexpected address from reserve.");
3179
3180 return res;
3181 }
3182
3183 // Reserve memory at an arbitrary address, only if that area is
3184 // available (and not reserved for something else).
3185 char* os::pd_attempt_reserve_memory_at(size_t bytes, char* requested_addr) {
3186 // Windows os::reserve_memory() fails of the requested address range is
3187 // not avilable.
3188 return reserve_memory(bytes, requested_addr);
3189 }
3190
3191 size_t os::large_page_size() {
3192 return _large_page_size;
3193 }
3194
3195 bool os::can_commit_large_page_memory() {
3196 // Windows only uses large page memory when the entire region is reserved
3197 // and committed in a single VirtualAlloc() call. This may change in the
3198 // future, but with Windows 2003 it's not possible to commit on demand.
3199 return false;
3200 }
3201
3202 bool os::can_execute_large_page_memory() {
3203 return true;
3204 }
3205
3206 char* os::reserve_memory_special(size_t bytes, size_t alignment, char* addr,
3207 bool exec) {
3208 assert(UseLargePages, "only for large pages");
4855 DWORD old_protect;
4856 DWORD exec_access = read_only ? PAGE_EXECUTE_READ : PAGE_EXECUTE_READWRITE;
4857 bool res = VirtualProtect(base, bytes, exec_access, &old_protect) != 0;
4858
4859 if (!res) {
4860 log_info(os)("VirtualProtect() failed: GetLastError->%ld.", GetLastError());
4861 // Don't consider this a hard error, on IA32 even if the
4862 // VirtualProtect fails, we should still be able to execute
4863 CloseHandle(hFile);
4864 return base;
4865 }
4866 }
4867
4868 if (CloseHandle(hFile) == 0) {
4869 log_info(os)("CloseHandle(hFile) failed: GetLastError->%ld.", GetLastError());
4870 return base;
4871 }
4872
4873 return base;
4874 }
4875
4876
4877 // Remap a block of memory.
4878 char* os::pd_remap_memory(int fd, const char* file_name, size_t file_offset,
4879 char *addr, size_t bytes, bool read_only,
4880 bool allow_exec) {
4881 // This OS does not allow existing memory maps to be remapped so we
4882 // have to unmap the memory before we remap it.
4883 if (!os::unmap_memory(addr, bytes)) {
4884 return NULL;
4885 }
4886
4887 // There is a very small theoretical window between the unmap_memory()
4888 // call above and the map_memory() call below where a thread in native
4889 // code may be able to access an address that is no longer mapped.
4890
4891 return os::map_memory(fd, file_name, file_offset, addr, bytes,
4892 read_only, allow_exec);
4893 }
4894
4895
|
3086 #endif
3087 } else {
3088 WARN("Large page is not supported by the processor.");
3089 }
3090 } else {
3091 WARN("JVM cannot use large page memory because it does not have enough privilege to lock pages in memory.");
3092 }
3093 #undef WARN
3094
3095 const size_t default_page_size = (size_t) vm_page_size();
3096 if (success && _large_page_size > default_page_size) {
3097 _page_sizes[0] = _large_page_size;
3098 _page_sizes[1] = default_page_size;
3099 _page_sizes[2] = 0;
3100 }
3101
3102 cleanup_after_large_page_init();
3103 UseLargePages = success;
3104 }
3105
3106 // Helper function to create a temp file in the given directory
3107 int os::create_file_for_heap(const char* dir, size_t size) {
3108
3109 char name_template[] = "/jvmheap.XXXXXX";
3110
3111 char *fullname = (char*)alloca(strlen(dir) + sizeof(name_template));
3112 (void)strcpy(fullname, dir);
3113 (void)strcat(fullname, name_template);
3114 os::native_path(fullname);
3115
3116 char *path = _mktemp(fullname);
3117 if (path == NULL) {
3118 return -1;
3119 }
3120
3121 int fd = _open(path, O_RDWR | O_CREAT | O_EXCL, S_IWRITE | S_IREAD);
3122
3123 if (fd < 0) {
3124 warning("Could not create file for heap");
3125 return -1;
3126 }
3127
3128 // Delete the name from the filesystem. When 'fd' is closed, the file (and space) will be deleted
3129 _unlink(fullname);
3130
3131 return fd;
3132 }
3133
3134 // if 'base' is not NULL, function will return NULL if it cannot get 'base'
3135 //
3136 char* os::map_memory_to_file(char* base, size_t size, int fd) {
3137 assert(fd != -1, "File descriptor is not valid");
3138
3139 HANDLE fh = (HANDLE)_get_osfhandle(fd);
3140 HANDLE fileMapping = CreateFileMapping(fh, NULL, PAGE_READWRITE,
3141 (DWORD)(size >> 32), (DWORD)(size & 0xFFFFFFFF), NULL);
3142 if (fileMapping == NULL) {
3143 if (GetLastError() == ERROR_DISK_FULL) {
3144 vm_exit_during_initialization(err_msg("Could not allocate sufficient disk space for heap"));
3145 } else {
3146 vm_exit_during_initialization(err_msg("Error in mapping Java heap at the given filesystem directory"));
3147 }
3148
3149 return NULL;
3150 }
3151
3152 LPVOID addr = MapViewOfFileEx(fileMapping, FILE_MAP_WRITE, 0, 0, size, base);
3153
3154 CloseHandle(fileMapping);
3155 if (addr == NULL || (base != NULL && addr != base)) {
3156 if (addr != NULL) {
3157 if(!pd_unmap_memory((char*)addr, size)) {
3158 warning("Could not release memory on unsuccessful file mapping");
3159 }
3160 }
3161 return NULL;
3162 }
3163
3164 return (char*)addr;
3165 }
3166
3167 // On win32, one cannot release just a part of reserved memory, it's an
3168 // all or nothing deal. When we split a reservation, we must break the
3169 // reservation into two reservations.
3170 void os::pd_split_reserved_memory(char *base, size_t size, size_t split,
3171 bool realloc) {
3172 if (size > 0) {
3173 release_memory(base, size);
3174 if (realloc) {
3175 reserve_memory(split, base);
3176 }
3177 if (size != split) {
3178 reserve_memory(size - split, base + split);
3179 }
3180 }
3181 }
3182
3183 // Multiple threads can race in this code but it's not possible to unmap small sections of
3184 // virtual space to get requested alignment, like posix-like os's.
3185 // Windows prevents multiple thread from remapping over each other so this loop is thread-safe.
3186 char* os::reserve_memory_aligned(size_t size, size_t alignment, int file_desc) {
3187 assert((alignment & (os::vm_allocation_granularity() - 1)) == 0,
3188 "Alignment must be a multiple of allocation granularity (page size)");
3189 assert((size & (alignment -1)) == 0, "size must be 'alignment' aligned");
3190
3191 size_t extra_size = size + alignment;
3192 assert(extra_size >= size, "overflow, size is too large to allow alignment");
3193
3194 char* aligned_base = NULL;
3195
3196 do {
3197 char* extra_base = os::reserve_memory(extra_size, NULL, alignment, file_desc);
3198 if (extra_base == NULL) {
3199 return NULL;
3200 }
3201 // Do manual alignment
3202 aligned_base = (char*) align_size_up((uintptr_t) extra_base, alignment);
3203
3204 if (file_desc != -1) {
3205 os::unmap_memory(extra_base, extra_size);
3206 }
3207 else {
3208 os::release_memory(extra_base, extra_size);
3209 }
3210
3211 aligned_base = os::reserve_memory(size, aligned_base, 0, file_desc);
3212
3213 } while (aligned_base == NULL);
3214
3215 return aligned_base;
3216 }
3217
3218 char* os::pd_reserve_memory(size_t bytes, char* addr, size_t alignment_hint, int file_desc) {
3219 assert((size_t)addr % os::vm_allocation_granularity() == 0,
3220 "reserve alignment");
3221 assert(bytes % os::vm_page_size() == 0, "reserve page size");
3222 char* res;
3223 // note that if UseLargePages is on, all the areas that require interleaving
3224 // will go thru reserve_memory_special rather than thru here.
3225 bool use_individual = (UseNUMAInterleaving && !UseLargePages);
3226 assert(file_desc == -1 || !use_individual, "NUMA allocation not supported when HeapDir is used");
3227 if (!use_individual) {
3228 if (file_desc != -1) {
3229 res = map_memory_to_file(addr, bytes, file_desc);
3230 } else {
3231 res = (char*)VirtualAlloc(addr, bytes, MEM_RESERVE, PAGE_READWRITE);
3232 }
3233 } else {
3234 elapsedTimer reserveTimer;
3235 if (Verbose && PrintMiscellaneous) reserveTimer.start();
3236 // in numa interleaving, we have to allocate pages individually
3237 // (well really chunks of NUMAInterleaveGranularity size)
3238 res = allocate_pages_individually(bytes, addr, MEM_RESERVE, PAGE_READWRITE);
3239 if (res == NULL) {
3240 warning("NUMA page allocation failed");
3241 }
3242 if (Verbose && PrintMiscellaneous) {
3243 reserveTimer.stop();
3244 tty->print_cr("reserve_memory of %Ix bytes took " JLONG_FORMAT " ms (" JLONG_FORMAT " ticks)", bytes,
3245 reserveTimer.milliseconds(), reserveTimer.ticks());
3246 }
3247 }
3248 assert(res == NULL || addr == NULL || addr == res,
3249 "Unexpected address from reserve.");
3250
3251 return res;
3252 }
3253
3254 // Reserve memory at an arbitrary address, only if that area is
3255 // available (and not reserved for something else).
3256 char* os::pd_attempt_reserve_memory_at(size_t bytes, char* requested_addr, int file_desc) {
3257 // Windows os::reserve_memory() fails of the requested address range is
3258 // not avilable.
3259 return reserve_memory(bytes, requested_addr, 0, file_desc);
3260 }
3261
3262 size_t os::large_page_size() {
3263 return _large_page_size;
3264 }
3265
3266 bool os::can_commit_large_page_memory() {
3267 // Windows only uses large page memory when the entire region is reserved
3268 // and committed in a single VirtualAlloc() call. This may change in the
3269 // future, but with Windows 2003 it's not possible to commit on demand.
3270 return false;
3271 }
3272
3273 bool os::can_execute_large_page_memory() {
3274 return true;
3275 }
3276
3277 char* os::reserve_memory_special(size_t bytes, size_t alignment, char* addr,
3278 bool exec) {
3279 assert(UseLargePages, "only for large pages");
4926 DWORD old_protect;
4927 DWORD exec_access = read_only ? PAGE_EXECUTE_READ : PAGE_EXECUTE_READWRITE;
4928 bool res = VirtualProtect(base, bytes, exec_access, &old_protect) != 0;
4929
4930 if (!res) {
4931 log_info(os)("VirtualProtect() failed: GetLastError->%ld.", GetLastError());
4932 // Don't consider this a hard error, on IA32 even if the
4933 // VirtualProtect fails, we should still be able to execute
4934 CloseHandle(hFile);
4935 return base;
4936 }
4937 }
4938
4939 if (CloseHandle(hFile) == 0) {
4940 log_info(os)("CloseHandle(hFile) failed: GetLastError->%ld.", GetLastError());
4941 return base;
4942 }
4943
4944 return base;
4945 }
4946
4947 // Remap a block of memory.
4948 char* os::pd_remap_memory(int fd, const char* file_name, size_t file_offset,
4949 char *addr, size_t bytes, bool read_only,
4950 bool allow_exec) {
4951 // This OS does not allow existing memory maps to be remapped so we
4952 // have to unmap the memory before we remap it.
4953 if (!os::unmap_memory(addr, bytes)) {
4954 return NULL;
4955 }
4956
4957 // There is a very small theoretical window between the unmap_memory()
4958 // call above and the map_memory() call below where a thread in native
4959 // code may be able to access an address that is no longer mapped.
4960
4961 return os::map_memory(fd, file_name, file_offset, addr, bytes,
4962 read_only, allow_exec);
4963 }
4964
4965
|