< prev index next >

src/hotspot/os/windows/os_windows.cpp

Print this page




2865 #endif
2866     } else {
2867       WARN("Large page is not supported by the processor.");
2868     }
2869   } else {
2870     WARN("JVM cannot use large page memory because it does not have enough privilege to lock pages in memory.");
2871   }
2872 #undef WARN
2873 
2874   const size_t default_page_size = (size_t) vm_page_size();
2875   if (success && _large_page_size > default_page_size) {
2876     _page_sizes[0] = _large_page_size;
2877     _page_sizes[1] = default_page_size;
2878     _page_sizes[2] = 0;
2879   }
2880 
2881   cleanup_after_large_page_init();
2882   UseLargePages = success;
2883 }
2884 





































































2885 // On win32, one cannot release just a part of reserved memory, it's an
2886 // all or nothing deal.  When we split a reservation, we must break the
2887 // reservation into two reservations.
2888 void os::pd_split_reserved_memory(char *base, size_t size, size_t split,
2889                                   bool realloc) {
2890   if (size > 0) {
2891     release_memory(base, size);
2892     if (realloc) {
2893       reserve_memory(split, base);
2894     }
2895     if (size != split) {
2896       reserve_memory(size - split, base + split);
2897     }
2898   }
2899 }
2900 
2901 // Multiple threads can race in this code but it's not possible to unmap small sections of
2902 // virtual space to get requested alignment, like posix-like os's.
2903 // Windows prevents multiple thread from remapping over each other so this loop is thread-safe.
2904 char* os::reserve_memory_aligned(size_t size, size_t alignment) {
2905   assert((alignment & (os::vm_allocation_granularity() - 1)) == 0,
2906          "Alignment must be a multiple of allocation granularity (page size)");
2907   assert((size & (alignment -1)) == 0, "size must be 'alignment' aligned");
2908 
2909   size_t extra_size = size + alignment;
2910   assert(extra_size >= size, "overflow, size is too large to allow alignment");
2911 
2912   char* aligned_base = NULL;
2913 
2914   do {
2915     char* extra_base = os::reserve_memory(extra_size, NULL, alignment);
2916     if (extra_base == NULL) {
2917       return NULL;
2918     }
2919     // Do manual alignment
2920     aligned_base = align_up(extra_base, alignment);
2921 



2922     os::release_memory(extra_base, extra_size);

2923 
2924     aligned_base = os::reserve_memory(size, aligned_base);
2925 
2926   } while (aligned_base == NULL);
2927 
2928   return aligned_base;
2929 }
2930 
2931 char* os::pd_reserve_memory(size_t bytes, char* addr, size_t alignment_hint) {
2932   assert((size_t)addr % os::vm_allocation_granularity() == 0,
2933          "reserve alignment");
2934   assert(bytes % os::vm_page_size() == 0, "reserve page size");
2935   char* res;
2936   // note that if UseLargePages is on, all the areas that require interleaving
2937   // will go thru reserve_memory_special rather than thru here.
2938   bool use_individual = (UseNUMAInterleaving && !UseLargePages);
2939   if (!use_individual) {
2940     res = (char*)VirtualAlloc(addr, bytes, MEM_RESERVE, PAGE_READWRITE);
2941   } else {
2942     elapsedTimer reserveTimer;
2943     if (Verbose && PrintMiscellaneous) reserveTimer.start();
2944     // in numa interleaving, we have to allocate pages individually


2948       warning("NUMA page allocation failed");
2949     }
2950     if (Verbose && PrintMiscellaneous) {
2951       reserveTimer.stop();
2952       tty->print_cr("reserve_memory of %Ix bytes took " JLONG_FORMAT " ms (" JLONG_FORMAT " ticks)", bytes,
2953                     reserveTimer.milliseconds(), reserveTimer.ticks());
2954     }
2955   }
2956   assert(res == NULL || addr == NULL || addr == res,
2957          "Unexpected address from reserve.");
2958 
2959   return res;
2960 }
2961 
2962 // Reserve memory at an arbitrary address, only if that area is
2963 // available (and not reserved for something else).
2964 char* os::pd_attempt_reserve_memory_at(size_t bytes, char* requested_addr) {
2965   // Windows os::reserve_memory() fails of the requested address range is
2966   // not avilable.
2967   return reserve_memory(bytes, requested_addr);





2968 }
2969 
2970 size_t os::large_page_size() {
2971   return _large_page_size;
2972 }
2973 
2974 bool os::can_commit_large_page_memory() {
2975   // Windows only uses large page memory when the entire region is reserved
2976   // and committed in a single VirtualAlloc() call. This may change in the
2977   // future, but with Windows 2003 it's not possible to commit on demand.
2978   return false;
2979 }
2980 
2981 bool os::can_execute_large_page_memory() {
2982   return true;
2983 }
2984 
2985 char* os::reserve_memory_special(size_t bytes, size_t alignment, char* addr,
2986                                  bool exec) {
2987   assert(UseLargePages, "only for large pages");




2865 #endif
2866     } else {
2867       WARN("Large page is not supported by the processor.");
2868     }
2869   } else {
2870     WARN("JVM cannot use large page memory because it does not have enough privilege to lock pages in memory.");
2871   }
2872 #undef WARN
2873 
2874   const size_t default_page_size = (size_t) vm_page_size();
2875   if (success && _large_page_size > default_page_size) {
2876     _page_sizes[0] = _large_page_size;
2877     _page_sizes[1] = default_page_size;
2878     _page_sizes[2] = 0;
2879   }
2880 
2881   cleanup_after_large_page_init();
2882   UseLargePages = success;
2883 }
2884 
2885 int os::create_file_for_heap(const char* dir) {
2886 
2887   const char name_template[] = "/jvmheap.XXXXXX";
2888   char *fullname = (char*)os::malloc((strlen(dir) + strlen(name_template) + 1), mtInternal);
2889   if (fullname == NULL) {
2890     vm_exit_during_initialization(err_msg("Malloc failed during creation of backing file for heap (%s)", os::strerror(errno)));
2891     return -1;
2892   }
2893 
2894   (void)strncpy(fullname, dir, strlen(dir)+1);
2895   (void)strncat(fullname, name_template, strlen(name_template));
2896 
2897   os::native_path(fullname);
2898 
2899   char *path = _mktemp(fullname);
2900   if (path == NULL) {
2901     warning("_mktemp could not create file name from template %s (%s)", fullname, os::strerror(errno));
2902     os::free(fullname);
2903     return -1;
2904   }
2905 
2906   int fd = _open(path, O_RDWR | O_CREAT | O_TEMPORARY | O_EXCL, S_IWRITE | S_IREAD);
2907 
2908   os::free(fullname);
2909   if (fd < 0) {
2910     warning("Problem opening file for heap (%s)", os::strerror(errno));
2911     return -1;
2912   }
2913   return fd;
2914 }
2915 
2916 // If 'base' is not NULL, function will return NULL if it cannot get 'base'
2917 char* os::map_memory_to_file(char* base, size_t size, int fd) {
2918   assert(fd != -1, "File descriptor is not valid");
2919 
2920   HANDLE fh = (HANDLE)_get_osfhandle(fd);
2921 #ifdef _LP64
2922   HANDLE fileMapping = CreateFileMapping(fh, NULL, PAGE_READWRITE,
2923     (DWORD)(size >> 32), (DWORD)(size & 0xFFFFFFFF), NULL);
2924 #else
2925   HANDLE fileMapping = CreateFileMapping(fh, NULL, PAGE_READWRITE,
2926     0, (DWORD)size, NULL);
2927 #endif
2928   if (fileMapping == NULL) {
2929     if (GetLastError() == ERROR_DISK_FULL) {
2930       vm_exit_during_initialization(err_msg("Could not allocate sufficient disk space for Java heap"));
2931     }
2932     else {
2933       vm_exit_during_initialization(err_msg("Error in mapping Java heap at the given filesystem directory"));
2934     }
2935 
2936     return NULL;
2937   }
2938 
2939   LPVOID addr = MapViewOfFileEx(fileMapping, FILE_MAP_WRITE, 0, 0, size, base);
2940 
2941   CloseHandle(fileMapping);
2942 
2943   return (char*)addr;
2944 }
2945 
2946 char* os::replace_existing_mapping_with_file_mapping(char* base, size_t size, int fd) {
2947   assert(fd != -1, "File descriptor is not valid");
2948   assert(base != NULL, "Base address cannot be NULL");
2949 
2950   release_memory(base, size);
2951   return map_memory_to_file(base, size, fd);
2952 }
2953 
2954 // On win32, one cannot release just a part of reserved memory, it's an
2955 // all or nothing deal.  When we split a reservation, we must break the
2956 // reservation into two reservations.
2957 void os::pd_split_reserved_memory(char *base, size_t size, size_t split,
2958                                   bool realloc) {
2959   if (size > 0) {
2960     release_memory(base, size);
2961     if (realloc) {
2962       reserve_memory(split, base);
2963     }
2964     if (size != split) {
2965       reserve_memory(size - split, base + split);
2966     }
2967   }
2968 }
2969 
2970 // Multiple threads can race in this code but it's not possible to unmap small sections of
2971 // virtual space to get requested alignment, like posix-like os's.
2972 // Windows prevents multiple thread from remapping over each other so this loop is thread-safe.
2973 char* os::reserve_memory_aligned(size_t size, size_t alignment, int file_desc) {
2974   assert((alignment & (os::vm_allocation_granularity() - 1)) == 0,
2975          "Alignment must be a multiple of allocation granularity (page size)");
2976   assert((size & (alignment -1)) == 0, "size must be 'alignment' aligned");
2977 
2978   size_t extra_size = size + alignment;
2979   assert(extra_size >= size, "overflow, size is too large to allow alignment");
2980 
2981   char* aligned_base = NULL;
2982 
2983   do {
2984     char* extra_base = os::reserve_memory(extra_size, NULL, alignment, file_desc);
2985     if (extra_base == NULL) {
2986       return NULL;
2987     }
2988     // Do manual alignment
2989     aligned_base = align_up(extra_base, alignment);
2990 
2991     if (file_desc != -1) {
2992       os::unmap_memory(extra_base, extra_size);
2993     } else {
2994       os::release_memory(extra_base, extra_size);
2995     }
2996 
2997     aligned_base = os::reserve_memory(size, aligned_base, 0, file_desc);
2998 
2999   } while (aligned_base == NULL);
3000 
3001   return aligned_base;
3002 }
3003 
3004 char* os::pd_reserve_memory(size_t bytes, char* addr, size_t alignment_hint) {
3005   assert((size_t)addr % os::vm_allocation_granularity() == 0,
3006          "reserve alignment");
3007   assert(bytes % os::vm_page_size() == 0, "reserve page size");
3008   char* res;
3009   // note that if UseLargePages is on, all the areas that require interleaving
3010   // will go thru reserve_memory_special rather than thru here.
3011   bool use_individual = (UseNUMAInterleaving && !UseLargePages);
3012   if (!use_individual) {
3013     res = (char*)VirtualAlloc(addr, bytes, MEM_RESERVE, PAGE_READWRITE);
3014   } else {
3015     elapsedTimer reserveTimer;
3016     if (Verbose && PrintMiscellaneous) reserveTimer.start();
3017     // in numa interleaving, we have to allocate pages individually


3021       warning("NUMA page allocation failed");
3022     }
3023     if (Verbose && PrintMiscellaneous) {
3024       reserveTimer.stop();
3025       tty->print_cr("reserve_memory of %Ix bytes took " JLONG_FORMAT " ms (" JLONG_FORMAT " ticks)", bytes,
3026                     reserveTimer.milliseconds(), reserveTimer.ticks());
3027     }
3028   }
3029   assert(res == NULL || addr == NULL || addr == res,
3030          "Unexpected address from reserve.");
3031 
3032   return res;
3033 }
3034 
3035 // Reserve memory at an arbitrary address, only if that area is
3036 // available (and not reserved for something else).
3037 char* os::pd_attempt_reserve_memory_at(size_t bytes, char* requested_addr) {
3038   // Windows os::reserve_memory() fails of the requested address range is
3039   // not avilable.
3040   return reserve_memory(bytes, requested_addr);
3041 }
3042 
3043 char* os::pd_attempt_reserve_memory_at(size_t bytes, char* requested_addr, int file_desc) {
3044   assert(file_desc >= 0, "file_desc is not valid");
3045   return map_memory_to_file(requested_addr, bytes, file_desc);
3046 }
3047 
3048 size_t os::large_page_size() {
3049   return _large_page_size;
3050 }
3051 
3052 bool os::can_commit_large_page_memory() {
3053   // Windows only uses large page memory when the entire region is reserved
3054   // and committed in a single VirtualAlloc() call. This may change in the
3055   // future, but with Windows 2003 it's not possible to commit on demand.
3056   return false;
3057 }
3058 
3059 bool os::can_execute_large_page_memory() {
3060   return true;
3061 }
3062 
3063 char* os::reserve_memory_special(size_t bytes, size_t alignment, char* addr,
3064                                  bool exec) {
3065   assert(UseLargePages, "only for large pages");


< prev index next >