< prev index next >

src/hotspot/os/windows/os_windows.cpp

Print this page




2746   }
2747 
2748   // Verify that the bitmap could be created with a normal page.
2749   // If this fails, the testing method above isn't reliable.
2750 #ifdef ASSERT
2751   void* verify_mem = ::malloc(4 * 1024);
2752   HBITMAP verify_bitmap = gdi_create_tiny_bitmap(verify_mem);
2753   if (verify_bitmap == NULL) {
2754     fatal("Couldn't create test bitmap with malloced memory");
2755   } else {
2756     DeleteObject(verify_bitmap);
2757   }
2758   ::free(verify_mem);
2759 #endif
2760 
2761   return false;
2762 }
2763 
2764 // Test if GDI functions work when memory spans
2765 // two adjacent memory reservations.
2766 static bool gdi_can_use_split_reservation_memory(bool use_large_pages, size_t granule) {
2767   DWORD mem_large_pages = use_large_pages ? MEM_LARGE_PAGES : 0;
2768 
2769   // Find virtual memory range. Two granules for regions and one for alignment.
2770   void* reserved = VirtualAlloc(NULL,
2771                                 granule * 3,
2772                                 MEM_RESERVE,
2773                                 PAGE_NOACCESS);
2774   if (reserved == NULL) {
2775     // Can't proceed with test - pessimistically report false
2776     return false;
2777   }
2778   VirtualFreeChecked(reserved, 0, MEM_RELEASE);
2779 
2780   // Ensure proper alignment
2781   void* res0 = align_up(reserved, granule);
2782   void* res1 = (char*)res0 + granule;
2783 
2784   // Reserve and commit the first part
2785   void* mem0 = VirtualAlloc(res0,
2786                             granule,
2787                             MEM_RESERVE|MEM_COMMIT|mem_large_pages,
2788                             PAGE_READWRITE);
2789   if (mem0 != res0) {
2790     // Can't proceed with test - pessimistically report false
2791     return false;
2792   }
2793 
2794   // Reserve and commit the second part
2795   void* mem1 = VirtualAlloc(res1,
2796                             granule,
2797                             MEM_RESERVE|MEM_COMMIT|mem_large_pages,
2798                             PAGE_READWRITE);
2799   if (mem1 != res1) {
2800     VirtualFreeChecked(mem0, 0, MEM_RELEASE);
2801     // Can't proceed with test - pessimistically report false
2802     return false;
2803   }
2804 
2805   // Set the bitmap's bits to point one "width" bytes before, so that
2806   // the bitmap extends across the reservation boundary.
2807   void* bitmapBits = (char*)mem1 - gdi_tiny_bitmap_width_bytes;
2808 
2809   bool success = gdi_can_use_memory(bitmapBits);
2810 
2811   VirtualFreeChecked(mem1, 0, MEM_RELEASE);
2812   VirtualFreeChecked(mem0, 0, MEM_RELEASE);
2813 
2814   return success;
2815 }
2816 
2817 // Container for NUMA node list info


2899 }
2900 
2901 static bool numa_interleaving_init() {
2902   bool success = false;
2903 
2904   // print a warning if UseNUMAInterleaving flag is specified on command line
2905   bool warn_on_failure = !FLAG_IS_DEFAULT(UseNUMAInterleaving);
2906 
2907 #define WARN(msg) if (warn_on_failure) { warning(msg); }
2908 
2909   // NUMAInterleaveGranularity cannot be less than vm_allocation_granularity (or _large_page_size if using large pages)
2910   size_t min_interleave_granularity = UseLargePages ? _large_page_size : os::vm_allocation_granularity();
2911   NUMAInterleaveGranularity = align_up(NUMAInterleaveGranularity, min_interleave_granularity);
2912 
2913   if (!numa_node_list_holder.build()) {
2914     WARN("Process does not cover multiple NUMA nodes.");
2915     WARN("...Ignoring UseNUMAInterleaving flag.");
2916     return false;
2917   }
2918 
2919   if (!gdi_can_use_split_reservation_memory(UseLargePages, min_interleave_granularity)) {
2920     WARN("Windows GDI cannot handle split reservations.");
2921     WARN("...Ignoring UseNUMAInterleaving flag.");
2922     return false;
2923   }
2924 
2925   if (log_is_enabled(Debug, os, cpu)) {
2926     Log(os, cpu) log;
2927     log.debug("NUMA UsedNodeCount=%d, namely ", numa_node_list_holder.get_count());
2928     for (int i = 0; i < numa_node_list_holder.get_count(); i++) {
2929       log.debug("  %d ", numa_node_list_holder.get_node_list_entry(i));
2930     }
2931   }
2932 
2933 #undef WARN
2934 
2935   return true;
2936 }
2937 
2938 // this routine is used whenever we need to reserve a contiguous VA range
2939 // but we need to make separate VirtualAlloc calls for each piece of the range


3065     return 0;
3066   }
3067 
3068   size_t size = GetLargePageMinimum();
3069   if (size == 0) {
3070     WARN("Large page is not supported by the processor.");
3071     return 0;
3072   }
3073 
3074 #if defined(IA32) || defined(AMD64)
3075   if (size > 4*M || LargePageSizeInBytes > 4*M) {
3076     WARN("JVM cannot use large pages bigger than 4mb.");
3077     return 0;
3078   }
3079 #endif
3080 
3081   if (LargePageSizeInBytes > 0 && LargePageSizeInBytes % size == 0) {
3082     size = LargePageSizeInBytes;
3083   }
3084 
3085   // Now test allocating a page
3086   void* large_page = VirtualAlloc(NULL,
3087                                   size,
3088                                   MEM_RESERVE|MEM_COMMIT|MEM_LARGE_PAGES,
3089                                   PAGE_READWRITE);
3090   if (large_page == NULL) {
3091     WARN("JVM cannot allocate one single large page.");
3092     return 0;
3093   }
3094 
3095   // Detect if GDI can use memory backed by large pages
3096   if (!gdi_can_use_memory(large_page)) {
3097     WARN("JVM cannot use large pages because of bug in Windows GDI.");
3098     return 0;
3099   }
3100 
3101   // Release test page
3102   VirtualFreeChecked(large_page, 0, MEM_RELEASE);
3103 
3104 #undef WARN
3105 
3106   return size;
3107 }
3108 
3109 void os::large_page_init() {
3110   if (!UseLargePages) {
3111     return;
3112   }
3113 
3114   _large_page_size = large_page_init_decide_size();
3115 
3116   const size_t default_page_size = (size_t) vm_page_size();
3117   if (_large_page_size > default_page_size) {
3118     _page_sizes[0] = _large_page_size;
3119     _page_sizes[1] = default_page_size;
3120     _page_sizes[2] = 0;
3121   }
3122 
3123   UseLargePages = _large_page_size != 0;
3124 
3125   if (UseLargePages && UseLargePagesIndividualAllocation) {
3126     if (!gdi_can_use_split_reservation_memory(true /* use_large_pages */, _large_page_size)) {
3127       if (FLAG_IS_CMDLINE(UseLargePagesIndividualAllocation)) {
3128         warning("Windows GDI cannot handle split reservations.");
3129         warning("...Ignoring UseLargePagesIndividualAllocation flag.");
3130       }
3131       UseLargePagesIndividualAllocation = false;
3132     }
3133   }
3134 }
3135 
3136 int os::create_file_for_heap(const char* dir) {
3137 
3138   const char name_template[] = "/jvmheap.XXXXXX";
3139 
3140   size_t fullname_len = strlen(dir) + strlen(name_template);
3141   char *fullname = (char*)os::malloc(fullname_len + 1, mtInternal);
3142   if (fullname == NULL) {
3143     vm_exit_during_initialization(err_msg("Malloc failed during creation of backing file for heap (%s)", os::strerror(errno)));
3144     return -1;
3145   }
3146   int n = snprintf(fullname, fullname_len + 1, "%s%s", dir, name_template);
3147   assert((size_t)n == fullname_len, "Unexpected number of characters in string");
3148 
3149   os::native_path(fullname);
3150 
3151   char *path = _mktemp(fullname);
3152   if (path == NULL) {
3153     warning("_mktemp could not create file name from template %s (%s)", fullname, os::strerror(errno));




2746   }
2747 
2748   // Verify that the bitmap could be created with a normal page.
2749   // If this fails, the testing method above isn't reliable.
2750 #ifdef ASSERT
2751   void* verify_mem = ::malloc(4 * 1024);
2752   HBITMAP verify_bitmap = gdi_create_tiny_bitmap(verify_mem);
2753   if (verify_bitmap == NULL) {
2754     fatal("Couldn't create test bitmap with malloced memory");
2755   } else {
2756     DeleteObject(verify_bitmap);
2757   }
2758   ::free(verify_mem);
2759 #endif
2760 
2761   return false;
2762 }
2763 
2764 // Test if GDI functions work when memory spans
2765 // two adjacent memory reservations.
2766 static bool gdi_can_use_split_reservation_memory() {
2767   size_t granule = os::vm_allocation_granularity();
2768 
2769   // Find virtual memory range
2770   void* reserved = VirtualAlloc(NULL,
2771                                 granule * 2,
2772                                 MEM_RESERVE,
2773                                 PAGE_NOACCESS);
2774   if (reserved == NULL) {
2775     // Can't proceed with test - pessimistically report false
2776     return false;
2777   }
2778   VirtualFreeChecked(reserved, 0, MEM_RELEASE);
2779 
2780   void* res0 = reserved;
2781   void* res1 = (char*)reserved + granule;

2782 
2783   // Reserve and commit the first part
2784   void* mem0 = VirtualAlloc(res0,
2785                             granule,
2786                             MEM_RESERVE|MEM_COMMIT,
2787                             PAGE_READWRITE);
2788   if (mem0 != res0) {
2789     // Can't proceed with test - pessimistically report false
2790     return false;
2791   }
2792 
2793   // Reserve and commit the second part
2794   void* mem1 = VirtualAlloc(res1,
2795                             granule,
2796                             MEM_RESERVE|MEM_COMMIT,
2797                             PAGE_READWRITE);
2798   if (mem1 != res1) {
2799     VirtualFreeChecked(mem0, 0, MEM_RELEASE);
2800     // Can't proceed with test - pessimistically report false
2801     return false;
2802   }
2803 
2804   // Set the bitmap's bits to point one "width" bytes before, so that
2805   // the bitmap extends across the reservation boundary.
2806   void* bitmapBits = (char*)mem1 - gdi_tiny_bitmap_width_bytes;
2807 
2808   bool success = gdi_can_use_memory(bitmapBits);
2809 
2810   VirtualFreeChecked(mem1, 0, MEM_RELEASE);
2811   VirtualFreeChecked(mem0, 0, MEM_RELEASE);
2812 
2813   return success;
2814 }
2815 
2816 // Container for NUMA node list info


2898 }
2899 
2900 static bool numa_interleaving_init() {
2901   bool success = false;
2902 
2903   // print a warning if UseNUMAInterleaving flag is specified on command line
2904   bool warn_on_failure = !FLAG_IS_DEFAULT(UseNUMAInterleaving);
2905 
2906 #define WARN(msg) if (warn_on_failure) { warning(msg); }
2907 
2908   // NUMAInterleaveGranularity cannot be less than vm_allocation_granularity (or _large_page_size if using large pages)
2909   size_t min_interleave_granularity = UseLargePages ? _large_page_size : os::vm_allocation_granularity();
2910   NUMAInterleaveGranularity = align_up(NUMAInterleaveGranularity, min_interleave_granularity);
2911 
2912   if (!numa_node_list_holder.build()) {
2913     WARN("Process does not cover multiple NUMA nodes.");
2914     WARN("...Ignoring UseNUMAInterleaving flag.");
2915     return false;
2916   }
2917 
2918   if (!gdi_can_use_split_reservation_memory()) {
2919     WARN("Windows GDI cannot handle split reservations.");
2920     WARN("...Ignoring UseNUMAInterleaving flag.");
2921     return false;
2922   }
2923 
2924   if (log_is_enabled(Debug, os, cpu)) {
2925     Log(os, cpu) log;
2926     log.debug("NUMA UsedNodeCount=%d, namely ", numa_node_list_holder.get_count());
2927     for (int i = 0; i < numa_node_list_holder.get_count(); i++) {
2928       log.debug("  %d ", numa_node_list_holder.get_node_list_entry(i));
2929     }
2930   }
2931 
2932 #undef WARN
2933 
2934   return true;
2935 }
2936 
2937 // this routine is used whenever we need to reserve a contiguous VA range
2938 // but we need to make separate VirtualAlloc calls for each piece of the range


3064     return 0;
3065   }
3066 
3067   size_t size = GetLargePageMinimum();
3068   if (size == 0) {
3069     WARN("Large page is not supported by the processor.");
3070     return 0;
3071   }
3072 
3073 #if defined(IA32) || defined(AMD64)
3074   if (size > 4*M || LargePageSizeInBytes > 4*M) {
3075     WARN("JVM cannot use large pages bigger than 4mb.");
3076     return 0;
3077   }
3078 #endif
3079 
3080   if (LargePageSizeInBytes > 0 && LargePageSizeInBytes % size == 0) {
3081     size = LargePageSizeInBytes;
3082   }
3083 



















3084 #undef WARN
3085 
3086   return size;
3087 }
3088 
3089 void os::large_page_init() {
3090   if (!UseLargePages) {
3091     return;
3092   }
3093 
3094   _large_page_size = large_page_init_decide_size();
3095 
3096   const size_t default_page_size = (size_t) vm_page_size();
3097   if (_large_page_size > default_page_size) {
3098     _page_sizes[0] = _large_page_size;
3099     _page_sizes[1] = default_page_size;
3100     _page_sizes[2] = 0;
3101   }
3102 
3103   UseLargePages = _large_page_size != 0;










3104 }
3105 
3106 int os::create_file_for_heap(const char* dir) {
3107 
3108   const char name_template[] = "/jvmheap.XXXXXX";
3109 
3110   size_t fullname_len = strlen(dir) + strlen(name_template);
3111   char *fullname = (char*)os::malloc(fullname_len + 1, mtInternal);
3112   if (fullname == NULL) {
3113     vm_exit_during_initialization(err_msg("Malloc failed during creation of backing file for heap (%s)", os::strerror(errno)));
3114     return -1;
3115   }
3116   int n = snprintf(fullname, fullname_len + 1, "%s%s", dir, name_template);
3117   assert((size_t)n == fullname_len, "Unexpected number of characters in string");
3118 
3119   os::native_path(fullname);
3120 
3121   char *path = _mktemp(fullname);
3122   if (path == NULL) {
3123     warning("_mktemp could not create file name from template %s (%s)", fullname, os::strerror(errno));


< prev index next >