2707 void os::pd_realign_memory(char *addr, size_t bytes, size_t alignment_hint) {
2708 if (UseTransparentHugePages && alignment_hint > (size_t)vm_page_size()) {
2709 // We don't check the return value: madvise(MADV_HUGEPAGE) may not
2710 // be supported or the memory may already be backed by huge pages.
2711 ::madvise(addr, bytes, MADV_HUGEPAGE);
2712 }
2713 }
2714
2715 void os::pd_free_memory(char *addr, size_t bytes, size_t alignment_hint) {
2716 // This method works by doing an mmap over an existing mmaping and effectively discarding
2717 // the existing pages. However it won't work for SHM-based large pages that cannot be
2718 // uncommitted at all. We don't do anything in this case to avoid creating a segment with
2719 // small pages on top of the SHM segment. This method always works for small pages, so we
2720 // allow that in any case.
2721 if (alignment_hint <= (size_t)os::vm_page_size() || can_commit_large_page_memory()) {
2722 commit_memory(addr, bytes, alignment_hint, !ExecMem);
2723 }
2724 }
2725
2726 void os::numa_make_global(char *addr, size_t bytes) {
2727 Linux::numa_interleave_memory(addr, bytes);
2728 }
2729
2730 // Define for numa_set_bind_policy(int). Setting the argument to 0 will set the
2731 // bind policy to MPOL_PREFERRED for the current thread.
2732 #define USE_MPOL_PREFERRED 0
2733
2734 void os::numa_make_local(char *addr, size_t bytes, int lgrp_hint) {
2735 // To make NUMA and large pages more robust when both enabled, we need to ease
2736 // the requirements on where the memory should be allocated. MPOL_BIND is the
2737 // default policy and it will force memory to be allocated on the specified
2738 // node. Changing this to MPOL_PREFERRED will prefer to allocate the memory on
2739 // the specified node, but will not force it. Using this policy will prevent
2740 // getting SIGBUS when trying to allocate large pages on NUMA nodes with no
2741 // free large pages.
2742 Linux::numa_set_bind_policy(USE_MPOL_PREFERRED);
2743 Linux::numa_tonode_memory(addr, bytes, lgrp_hint);
2744 }
2745
2746 bool os::numa_topology_changed() { return false; }
2763 }
2764
2765 int os::Linux::get_existing_num_nodes() {
2766 int node;
2767 int highest_node_number = Linux::numa_max_node();
2768 int num_nodes = 0;
2769
2770 // Get the total number of nodes in the system including nodes without memory.
2771 for (node = 0; node <= highest_node_number; node++) {
2772 if (isnode_in_existing_nodes(node)) {
2773 num_nodes++;
2774 }
2775 }
2776 return num_nodes;
2777 }
2778
2779 size_t os::numa_get_leaf_groups(int *ids, size_t size) {
2780 int highest_node_number = Linux::numa_max_node();
2781 size_t i = 0;
2782
2783 // Map all node ids in which it is possible to allocate memory. Also nodes are
2784 // not always consecutively available, i.e. available from 0 to the highest
2785 // node number. If the nodes have been bound explicitly using numactl membind,
2786 // then allocate memory from those nodes only.
2787 for (int node = 0; node <= highest_node_number; node++) {
2788 if (Linux::isnode_in_bound_nodes((unsigned int)node)) {
2789 ids[i++] = node;
2790 }
2791 }
2792 return i;
2793 }
2794
2795 bool os::get_page_info(char *start, page_info* info) {
2796 return false;
2797 }
2798
2799 char *os::scan_pages(char *start, char* end, page_info* page_expected,
2800 page_info* page_found) {
2801 return end;
2802 }
2803
2804
2805 int os::Linux::sched_getcpu_syscall(void) {
2806 unsigned int cpu = 0;
2807 int retval = -1;
2808
2809 #if defined(IA32)
2810 #ifndef SYS_getcpu
2811 #define SYS_getcpu 318
2871 set_numa_max_node(CAST_TO_FN_PTR(numa_max_node_func_t,
2872 libnuma_dlsym(handle, "numa_max_node")));
2873 set_numa_num_configured_nodes(CAST_TO_FN_PTR(numa_num_configured_nodes_func_t,
2874 libnuma_dlsym(handle, "numa_num_configured_nodes")));
2875 set_numa_available(CAST_TO_FN_PTR(numa_available_func_t,
2876 libnuma_dlsym(handle, "numa_available")));
2877 set_numa_tonode_memory(CAST_TO_FN_PTR(numa_tonode_memory_func_t,
2878 libnuma_dlsym(handle, "numa_tonode_memory")));
2879 set_numa_interleave_memory(CAST_TO_FN_PTR(numa_interleave_memory_func_t,
2880 libnuma_dlsym(handle, "numa_interleave_memory")));
2881 set_numa_interleave_memory_v2(CAST_TO_FN_PTR(numa_interleave_memory_v2_func_t,
2882 libnuma_v2_dlsym(handle, "numa_interleave_memory")));
2883 set_numa_set_bind_policy(CAST_TO_FN_PTR(numa_set_bind_policy_func_t,
2884 libnuma_dlsym(handle, "numa_set_bind_policy")));
2885 set_numa_bitmask_isbitset(CAST_TO_FN_PTR(numa_bitmask_isbitset_func_t,
2886 libnuma_dlsym(handle, "numa_bitmask_isbitset")));
2887 set_numa_distance(CAST_TO_FN_PTR(numa_distance_func_t,
2888 libnuma_dlsym(handle, "numa_distance")));
2889 set_numa_get_membind(CAST_TO_FN_PTR(numa_get_membind_func_t,
2890 libnuma_v2_dlsym(handle, "numa_get_membind")));
2891
2892 if (numa_available() != -1) {
2893 set_numa_all_nodes((unsigned long*)libnuma_dlsym(handle, "numa_all_nodes"));
2894 set_numa_all_nodes_ptr((struct bitmask **)libnuma_dlsym(handle, "numa_all_nodes_ptr"));
2895 set_numa_nodes_ptr((struct bitmask **)libnuma_dlsym(handle, "numa_nodes_ptr"));
2896 // Create an index -> node mapping, since nodes are not always consecutive
2897 _nindex_to_node = new (ResourceObj::C_HEAP, mtInternal) GrowableArray<int>(0, true);
2898 rebuild_nindex_to_node_map();
2899 // Create a cpu -> node mapping
2900 _cpu_to_node = new (ResourceObj::C_HEAP, mtInternal) GrowableArray<int>(0, true);
2901 rebuild_cpu_to_node_map();
2902 return true;
2903 }
2904 }
2905 }
2906 return false;
2907 }
2908
2909 size_t os::Linux::default_guard_size(os::ThreadType thr_type) {
2910 // Creating guard page is very expensive. Java thread has HotSpot
2911 // guard pages, only enable glibc guard page for non-Java threads.
2912 // (Remember: compiler thread is a Java thread, too!)
2913 return ((thr_type == java_thread || thr_type == compiler_thread) ? 0 : page_size());
2914 }
2915
3002 if (cpu_to_node() != NULL && cpu_id >= 0 && cpu_id < cpu_to_node()->length()) {
3003 return cpu_to_node()->at(cpu_id);
3004 }
3005 return -1;
3006 }
3007
3008 GrowableArray<int>* os::Linux::_cpu_to_node;
3009 GrowableArray<int>* os::Linux::_nindex_to_node;
3010 os::Linux::sched_getcpu_func_t os::Linux::_sched_getcpu;
3011 os::Linux::numa_node_to_cpus_func_t os::Linux::_numa_node_to_cpus;
3012 os::Linux::numa_max_node_func_t os::Linux::_numa_max_node;
3013 os::Linux::numa_num_configured_nodes_func_t os::Linux::_numa_num_configured_nodes;
3014 os::Linux::numa_available_func_t os::Linux::_numa_available;
3015 os::Linux::numa_tonode_memory_func_t os::Linux::_numa_tonode_memory;
3016 os::Linux::numa_interleave_memory_func_t os::Linux::_numa_interleave_memory;
3017 os::Linux::numa_interleave_memory_v2_func_t os::Linux::_numa_interleave_memory_v2;
3018 os::Linux::numa_set_bind_policy_func_t os::Linux::_numa_set_bind_policy;
3019 os::Linux::numa_bitmask_isbitset_func_t os::Linux::_numa_bitmask_isbitset;
3020 os::Linux::numa_distance_func_t os::Linux::_numa_distance;
3021 os::Linux::numa_get_membind_func_t os::Linux::_numa_get_membind;
3022 unsigned long* os::Linux::_numa_all_nodes;
3023 struct bitmask* os::Linux::_numa_all_nodes_ptr;
3024 struct bitmask* os::Linux::_numa_nodes_ptr;
3025
3026 bool os::pd_uncommit_memory(char* addr, size_t size) {
3027 uintptr_t res = (uintptr_t) ::mmap(addr, size, PROT_NONE,
3028 MAP_PRIVATE|MAP_FIXED|MAP_NORESERVE|MAP_ANONYMOUS, -1, 0);
3029 return res != (uintptr_t) MAP_FAILED;
3030 }
3031
3032 static address get_stack_commited_bottom(address bottom, size_t size) {
3033 address nbot = bottom;
3034 address ntop = bottom + size;
3035
3036 size_t page_sz = os::vm_page_size();
3037 unsigned pages = size / page_sz;
3038
3039 unsigned char vec[1];
3040 unsigned imin = 1, imax = pages + 1, imid;
3041 int mincore_return_value = 0;
3042
3043 assert(imin <= imax, "Unexpected page size");
3044
4988 }
4989
4990 suppress_primordial_thread_resolution = Arguments::created_by_java_launcher();
4991 if (!suppress_primordial_thread_resolution) {
4992 Linux::capture_initial_stack(JavaThread::stack_size_at_create());
4993 }
4994
4995 #if defined(IA32)
4996 workaround_expand_exec_shield_cs_limit();
4997 #endif
4998
4999 Linux::libpthread_init();
5000 Linux::sched_getcpu_init();
5001 log_info(os)("HotSpot is running with %s, %s",
5002 Linux::glibc_version(), Linux::libpthread_version());
5003
5004 if (UseNUMA) {
5005 if (!Linux::libnuma_init()) {
5006 UseNUMA = false;
5007 } else {
5008 if ((Linux::numa_max_node() < 1) || Linux::isbound_to_single_node()) {
5009 // If there's only one node (they start from 0) or if the process
5010 // is bound explicitly to a single node using membind, disable NUMA.
5011 UseNUMA = false;
5012 }
5013 }
5014
5015 if (UseParallelGC && UseNUMA && UseLargePages && !can_commit_large_page_memory()) {
5016 // With SHM and HugeTLBFS large pages we cannot uncommit a page, so there's no way
5017 // we can make the adaptive lgrp chunk resizing work. If the user specified both
5018 // UseNUMA and UseLargePages (or UseSHM/UseHugeTLBFS) on the command line - warn
5019 // and disable adaptive resizing.
5020 if (UseAdaptiveSizePolicy || UseAdaptiveNUMAChunkSizing) {
5021 warning("UseNUMA is not fully compatible with SHM/HugeTLBFS large pages, "
5022 "disabling adaptive resizing (-XX:-UseAdaptiveSizePolicy -XX:-UseAdaptiveNUMAChunkSizing)");
5023 UseAdaptiveSizePolicy = false;
5024 UseAdaptiveNUMAChunkSizing = false;
5025 }
5026 }
5027
5028 if (!UseNUMA && ForceNUMA) {
5029 UseNUMA = true;
5030 }
5031 }
5032
5033 if (MaxFDLimit) {
|
2707 void os::pd_realign_memory(char *addr, size_t bytes, size_t alignment_hint) {
2708 if (UseTransparentHugePages && alignment_hint > (size_t)vm_page_size()) {
2709 // We don't check the return value: madvise(MADV_HUGEPAGE) may not
2710 // be supported or the memory may already be backed by huge pages.
2711 ::madvise(addr, bytes, MADV_HUGEPAGE);
2712 }
2713 }
2714
2715 void os::pd_free_memory(char *addr, size_t bytes, size_t alignment_hint) {
2716 // This method works by doing an mmap over an existing mmaping and effectively discarding
2717 // the existing pages. However it won't work for SHM-based large pages that cannot be
2718 // uncommitted at all. We don't do anything in this case to avoid creating a segment with
2719 // small pages on top of the SHM segment. This method always works for small pages, so we
2720 // allow that in any case.
2721 if (alignment_hint <= (size_t)os::vm_page_size() || can_commit_large_page_memory()) {
2722 commit_memory(addr, bytes, alignment_hint, !ExecMem);
2723 }
2724 }
2725
2726 void os::numa_make_global(char *addr, size_t bytes) {
2727 if (!UseNUMAInterleaving) {
2728 return;
2729 }
2730 Linux::numa_interleave_memory(addr, bytes);
2731 }
2732
2733 // Define for numa_set_bind_policy(int). Setting the argument to 0 will set the
2734 // bind policy to MPOL_PREFERRED for the current thread.
2735 #define USE_MPOL_PREFERRED 0
2736
2737 void os::numa_make_local(char *addr, size_t bytes, int lgrp_hint) {
2738 // To make NUMA and large pages more robust when both enabled, we need to ease
2739 // the requirements on where the memory should be allocated. MPOL_BIND is the
2740 // default policy and it will force memory to be allocated on the specified
2741 // node. Changing this to MPOL_PREFERRED will prefer to allocate the memory on
2742 // the specified node, but will not force it. Using this policy will prevent
2743 // getting SIGBUS when trying to allocate large pages on NUMA nodes with no
2744 // free large pages.
2745 Linux::numa_set_bind_policy(USE_MPOL_PREFERRED);
2746 Linux::numa_tonode_memory(addr, bytes, lgrp_hint);
2747 }
2748
2749 bool os::numa_topology_changed() { return false; }
2766 }
2767
2768 int os::Linux::get_existing_num_nodes() {
2769 int node;
2770 int highest_node_number = Linux::numa_max_node();
2771 int num_nodes = 0;
2772
2773 // Get the total number of nodes in the system including nodes without memory.
2774 for (node = 0; node <= highest_node_number; node++) {
2775 if (isnode_in_existing_nodes(node)) {
2776 num_nodes++;
2777 }
2778 }
2779 return num_nodes;
2780 }
2781
2782 size_t os::numa_get_leaf_groups(int *ids, size_t size) {
2783 int highest_node_number = Linux::numa_max_node();
2784 size_t i = 0;
2785
2786 // If externally invoked in interleave mode then get node bitmasks from interleave mode pointer.
2787 if (Linux::_numa_interleave_ptr != NULL ) {
2788 for (int node = 0; node <= highest_node_number; node++) {
2789 if (Linux::_numa_bitmask_isbitset(Linux::_numa_interleave_ptr, node)) {
2790 ids[i++] = node;
2791 }
2792 }
2793 } else {
2794 // Map all node ids in which it is possible to allocate memory. Also nodes are
2795 // not always consecutively available, i.e. available from 0 to the highest
2796 // node number. If the nodes have been bound explicitly using numactl membind,
2797 // then allocate memory from those nodes only.
2798 for (int node = 0; node <= highest_node_number; node++) {
2799 if (Linux::isnode_in_bound_nodes((unsigned int)node)) {
2800 ids[i++] = node;
2801 }
2802 }
2803 }
2804 return i;
2805 }
2806
2807 bool os::get_page_info(char *start, page_info* info) {
2808 return false;
2809 }
2810
2811 char *os::scan_pages(char *start, char* end, page_info* page_expected,
2812 page_info* page_found) {
2813 return end;
2814 }
2815
2816
2817 int os::Linux::sched_getcpu_syscall(void) {
2818 unsigned int cpu = 0;
2819 int retval = -1;
2820
2821 #if defined(IA32)
2822 #ifndef SYS_getcpu
2823 #define SYS_getcpu 318
2883 set_numa_max_node(CAST_TO_FN_PTR(numa_max_node_func_t,
2884 libnuma_dlsym(handle, "numa_max_node")));
2885 set_numa_num_configured_nodes(CAST_TO_FN_PTR(numa_num_configured_nodes_func_t,
2886 libnuma_dlsym(handle, "numa_num_configured_nodes")));
2887 set_numa_available(CAST_TO_FN_PTR(numa_available_func_t,
2888 libnuma_dlsym(handle, "numa_available")));
2889 set_numa_tonode_memory(CAST_TO_FN_PTR(numa_tonode_memory_func_t,
2890 libnuma_dlsym(handle, "numa_tonode_memory")));
2891 set_numa_interleave_memory(CAST_TO_FN_PTR(numa_interleave_memory_func_t,
2892 libnuma_dlsym(handle, "numa_interleave_memory")));
2893 set_numa_interleave_memory_v2(CAST_TO_FN_PTR(numa_interleave_memory_v2_func_t,
2894 libnuma_v2_dlsym(handle, "numa_interleave_memory")));
2895 set_numa_set_bind_policy(CAST_TO_FN_PTR(numa_set_bind_policy_func_t,
2896 libnuma_dlsym(handle, "numa_set_bind_policy")));
2897 set_numa_bitmask_isbitset(CAST_TO_FN_PTR(numa_bitmask_isbitset_func_t,
2898 libnuma_dlsym(handle, "numa_bitmask_isbitset")));
2899 set_numa_distance(CAST_TO_FN_PTR(numa_distance_func_t,
2900 libnuma_dlsym(handle, "numa_distance")));
2901 set_numa_get_membind(CAST_TO_FN_PTR(numa_get_membind_func_t,
2902 libnuma_v2_dlsym(handle, "numa_get_membind")));
2903 set_numa_get_interleave_mask(CAST_TO_FN_PTR(numa_get_interleave_mask_func_t,
2904 libnuma_v2_dlsym(handle, "numa_get_interleave_mask")));
2905
2906 if (numa_available() != -1) {
2907 set_numa_all_nodes((unsigned long*)libnuma_dlsym(handle, "numa_all_nodes"));
2908 set_numa_all_nodes_ptr((struct bitmask*)libnuma_dlsym(handle, "numa_all_nodes_ptr"));
2909 set_numa_nodes_ptr((struct bitmask*)libnuma_dlsym(handle, "numa_nodes_ptr"));
2910
2911 set_numa_interleave_ptr(_numa_get_interleave_mask());
2912 set_numa_membind_ptr(_numa_get_membind());
2913
2914 // Create an index -> node mapping, since nodes are not always consecutive
2915 _nindex_to_node = new (ResourceObj::C_HEAP, mtInternal) GrowableArray<int>(0, true);
2916 rebuild_nindex_to_node_map();
2917 // Create a cpu -> node mapping
2918 _cpu_to_node = new (ResourceObj::C_HEAP, mtInternal) GrowableArray<int>(0, true);
2919 rebuild_cpu_to_node_map();
2920 return true;
2921 }
2922 }
2923 }
2924 return false;
2925 }
2926
2927 size_t os::Linux::default_guard_size(os::ThreadType thr_type) {
2928 // Creating guard page is very expensive. Java thread has HotSpot
2929 // guard pages, only enable glibc guard page for non-Java threads.
2930 // (Remember: compiler thread is a Java thread, too!)
2931 return ((thr_type == java_thread || thr_type == compiler_thread) ? 0 : page_size());
2932 }
2933
3020 if (cpu_to_node() != NULL && cpu_id >= 0 && cpu_id < cpu_to_node()->length()) {
3021 return cpu_to_node()->at(cpu_id);
3022 }
3023 return -1;
3024 }
3025
3026 GrowableArray<int>* os::Linux::_cpu_to_node;
3027 GrowableArray<int>* os::Linux::_nindex_to_node;
3028 os::Linux::sched_getcpu_func_t os::Linux::_sched_getcpu;
3029 os::Linux::numa_node_to_cpus_func_t os::Linux::_numa_node_to_cpus;
3030 os::Linux::numa_max_node_func_t os::Linux::_numa_max_node;
3031 os::Linux::numa_num_configured_nodes_func_t os::Linux::_numa_num_configured_nodes;
3032 os::Linux::numa_available_func_t os::Linux::_numa_available;
3033 os::Linux::numa_tonode_memory_func_t os::Linux::_numa_tonode_memory;
3034 os::Linux::numa_interleave_memory_func_t os::Linux::_numa_interleave_memory;
3035 os::Linux::numa_interleave_memory_v2_func_t os::Linux::_numa_interleave_memory_v2;
3036 os::Linux::numa_set_bind_policy_func_t os::Linux::_numa_set_bind_policy;
3037 os::Linux::numa_bitmask_isbitset_func_t os::Linux::_numa_bitmask_isbitset;
3038 os::Linux::numa_distance_func_t os::Linux::_numa_distance;
3039 os::Linux::numa_get_membind_func_t os::Linux::_numa_get_membind;
3040 os::Linux::numa_get_interleave_mask_func_t os::Linux::_numa_get_interleave_mask;
3041 unsigned long* os::Linux::_numa_all_nodes;
3042 struct bitmask* os::Linux::_numa_all_nodes_ptr;
3043 struct bitmask* os::Linux::_numa_nodes_ptr;
3044 struct bitmask* os::Linux::_numa_interleave_ptr;
3045 struct bitmask* os::Linux::_numa_membind_ptr;
3046
3047 bool os::pd_uncommit_memory(char* addr, size_t size) {
3048 uintptr_t res = (uintptr_t) ::mmap(addr, size, PROT_NONE,
3049 MAP_PRIVATE|MAP_FIXED|MAP_NORESERVE|MAP_ANONYMOUS, -1, 0);
3050 return res != (uintptr_t) MAP_FAILED;
3051 }
3052
3053 static address get_stack_commited_bottom(address bottom, size_t size) {
3054 address nbot = bottom;
3055 address ntop = bottom + size;
3056
3057 size_t page_sz = os::vm_page_size();
3058 unsigned pages = size / page_sz;
3059
3060 unsigned char vec[1];
3061 unsigned imin = 1, imax = pages + 1, imid;
3062 int mincore_return_value = 0;
3063
3064 assert(imin <= imax, "Unexpected page size");
3065
5009 }
5010
5011 suppress_primordial_thread_resolution = Arguments::created_by_java_launcher();
5012 if (!suppress_primordial_thread_resolution) {
5013 Linux::capture_initial_stack(JavaThread::stack_size_at_create());
5014 }
5015
5016 #if defined(IA32)
5017 workaround_expand_exec_shield_cs_limit();
5018 #endif
5019
5020 Linux::libpthread_init();
5021 Linux::sched_getcpu_init();
5022 log_info(os)("HotSpot is running with %s, %s",
5023 Linux::glibc_version(), Linux::libpthread_version());
5024
5025 if (UseNUMA) {
5026 if (!Linux::libnuma_init()) {
5027 UseNUMA = false;
5028 } else {
5029
5030 // Identify whether running in membind or interleave mode.
5031 bool is_membind = false;
5032 bool is_interleaved = false;
5033
5034 log_info(os)("UseNUMA is enabled");
5035
5036 // Check for membind mode.
5037 for (int node = 0; node <= Linux::numa_max_node(); node++) {
5038 if (Linux::_numa_bitmask_isbitset(Linux::_numa_membind_ptr, node)) {
5039 is_membind = true;
5040 break;
5041 }
5042 }
5043
5044 // Check for interleave mode.
5045 for (int node = 0; node <= Linux::numa_max_node(); node++) {
5046 if (Linux::_numa_bitmask_isbitset(Linux::_numa_interleave_ptr, node)) {
5047 is_interleaved = true;
5048 // Set membind to false as interleave mode allows all nodes to be used.
5049 is_membind = false;
5050 break;
5051 }
5052 }
5053
5054 struct bitmask* bmp;
5055
5056 if (is_interleaved) {
5057 bmp = Linux::_numa_interleave_ptr;
5058 Linux::set_numa_membind_ptr(NULL);
5059 log_info(os)("Java is configured to run in interleave mode");
5060 } else if (is_membind) {
5061 bmp = Linux::_numa_membind_ptr;
5062 Linux::set_numa_interleave_ptr(NULL);
5063 log_info(os)("Java is configured to run in membind mode");
5064 }
5065
5066 char buf[BUFSIZ] = {'\0'};
5067 char* bufptr = buf;
5068
5069 for (int node = 0; node <= Linux::numa_max_node(); node++) {
5070 if (Linux::_numa_bitmask_isbitset(bmp, node)) {
5071 bufptr += sprintf(bufptr, "%d ", node);
5072 }
5073 }
5074 bufptr[-1] = '\0';
5075 log_info(os)("Heap will be configured using NUMA memory nodes: %s", buf);
5076 }
5077
5078
5079 if (UseParallelGC && UseNUMA && UseLargePages && !can_commit_large_page_memory()) {
5080 // With SHM and HugeTLBFS large pages we cannot uncommit a page, so there's no way
5081 // we can make the adaptive lgrp chunk resizing work. If the user specified both
5082 // UseNUMA and UseLargePages (or UseSHM/UseHugeTLBFS) on the command line - warn
5083 // and disable adaptive resizing.
5084 if (UseAdaptiveSizePolicy || UseAdaptiveNUMAChunkSizing) {
5085 warning("UseNUMA is not fully compatible with SHM/HugeTLBFS large pages, "
5086 "disabling adaptive resizing (-XX:-UseAdaptiveSizePolicy -XX:-UseAdaptiveNUMAChunkSizing)");
5087 UseAdaptiveSizePolicy = false;
5088 UseAdaptiveNUMAChunkSizing = false;
5089 }
5090 }
5091
5092 if (!UseNUMA && ForceNUMA) {
5093 UseNUMA = true;
5094 }
5095 }
5096
5097 if (MaxFDLimit) {
|