2802 #elif defined(AMD64)
2803 // Unfortunately we have to bring all these macros here from vsyscall.h
2804 // to be able to compile on old linuxes.
2805 # define __NR_vgetcpu 2
2806 # define VSYSCALL_START (-10UL << 20)
2807 # define VSYSCALL_SIZE 1024
2808 # define VSYSCALL_ADDR(vsyscall_nr) (VSYSCALL_START+VSYSCALL_SIZE*(vsyscall_nr))
2809 typedef long (*vgetcpu_t)(unsigned int *cpu, unsigned int *node, unsigned long *tcache);
2810 vgetcpu_t vgetcpu = (vgetcpu_t)VSYSCALL_ADDR(__NR_vgetcpu);
2811 retval = vgetcpu(&cpu, NULL, NULL);
2812 #endif
2813
2814 return (retval == -1) ? retval : cpu;
2815 }
2816
2817 // Something to do with the numa-aware allocator needs these symbols
2818 extern "C" JNIEXPORT void numa_warn(int number, char *where, ...) { }
2819 extern "C" JNIEXPORT void numa_error(char *where) { }
2820 extern "C" JNIEXPORT int fork1() { return fork(); }
2821
2822
2823 // If we are running with libnuma version > 2, then we should
2824 // be trying to use symbols with versions 1.1
2825 // If we are running with earlier version, which did not have symbol versions,
2826 // we should use the base version.
2827 void* os::Linux::libnuma_dlsym(void* handle, const char *name) {
2828 void *f = dlvsym(handle, name, "libnuma_1.1");
2829 if (f == NULL) {
2830 f = dlsym(handle, name);
2831 }
2832 return f;
2833 }
2834
2835 bool os::Linux::libnuma_init() {
2836 // sched_getcpu() should be in libc.
2837 set_sched_getcpu(CAST_TO_FN_PTR(sched_getcpu_func_t,
2838 dlsym(RTLD_DEFAULT, "sched_getcpu")));
2839
2840 // If it's not, try a direct syscall.
2841 if (sched_getcpu() == -1)
2842 set_sched_getcpu(CAST_TO_FN_PTR(sched_getcpu_func_t, (void*)&sched_getcpu_syscall));
2843
2844 if (sched_getcpu() != -1) { // Does it work?
2845 void *handle = dlopen("libnuma.so.1", RTLD_LAZY);
2846 if (handle != NULL) {
2847 set_numa_node_to_cpus(CAST_TO_FN_PTR(numa_node_to_cpus_func_t,
2848 libnuma_dlsym(handle, "numa_node_to_cpus")));
2849 set_numa_max_node(CAST_TO_FN_PTR(numa_max_node_func_t,
2850 libnuma_dlsym(handle, "numa_max_node")));
2851 set_numa_num_configured_nodes(CAST_TO_FN_PTR(numa_num_configured_nodes_func_t,
2852 libnuma_dlsym(handle, "numa_num_configured_nodes")));
2853 set_numa_available(CAST_TO_FN_PTR(numa_available_func_t,
2854 libnuma_dlsym(handle, "numa_available")));
2855 set_numa_tonode_memory(CAST_TO_FN_PTR(numa_tonode_memory_func_t,
2856 libnuma_dlsym(handle, "numa_tonode_memory")));
2857 set_numa_interleave_memory(CAST_TO_FN_PTR(numa_interleave_memory_func_t,
2858 libnuma_dlsym(handle, "numa_interleave_memory")));
2859 set_numa_set_bind_policy(CAST_TO_FN_PTR(numa_set_bind_policy_func_t,
2860 libnuma_dlsym(handle, "numa_set_bind_policy")));
2861 set_numa_bitmask_isbitset(CAST_TO_FN_PTR(numa_bitmask_isbitset_func_t,
2862 libnuma_dlsym(handle, "numa_bitmask_isbitset")));
2863 set_numa_distance(CAST_TO_FN_PTR(numa_distance_func_t,
2864 libnuma_dlsym(handle, "numa_distance")));
2865
2866 if (numa_available() != -1) {
2867 set_numa_all_nodes((unsigned long*)libnuma_dlsym(handle, "numa_all_nodes"));
2868 set_numa_all_nodes_ptr((struct bitmask **)libnuma_dlsym(handle, "numa_all_nodes_ptr"));
2869 set_numa_nodes_ptr((struct bitmask **)libnuma_dlsym(handle, "numa_nodes_ptr"));
2870 // Create an index -> node mapping, since nodes are not always consecutive
2871 _nindex_to_node = new (ResourceObj::C_HEAP, mtInternal) GrowableArray<int>(0, true);
2872 rebuild_nindex_to_node_map();
2873 // Create a cpu -> node mapping
2874 _cpu_to_node = new (ResourceObj::C_HEAP, mtInternal) GrowableArray<int>(0, true);
2875 rebuild_cpu_to_node_map();
2876 return true;
2877 }
2878 }
2958 }
2959 FREE_C_HEAP_ARRAY(unsigned long, cpu_map, mtInternal);
2960 }
2961
2962 int os::Linux::get_node_by_cpu(int cpu_id) {
2963 if (cpu_to_node() != NULL && cpu_id >= 0 && cpu_id < cpu_to_node()->length()) {
2964 return cpu_to_node()->at(cpu_id);
2965 }
2966 return -1;
2967 }
2968
2969 GrowableArray<int>* os::Linux::_cpu_to_node;
2970 GrowableArray<int>* os::Linux::_nindex_to_node;
2971 os::Linux::sched_getcpu_func_t os::Linux::_sched_getcpu;
2972 os::Linux::numa_node_to_cpus_func_t os::Linux::_numa_node_to_cpus;
2973 os::Linux::numa_max_node_func_t os::Linux::_numa_max_node;
2974 os::Linux::numa_num_configured_nodes_func_t os::Linux::_numa_num_configured_nodes;
2975 os::Linux::numa_available_func_t os::Linux::_numa_available;
2976 os::Linux::numa_tonode_memory_func_t os::Linux::_numa_tonode_memory;
2977 os::Linux::numa_interleave_memory_func_t os::Linux::_numa_interleave_memory;
2978 os::Linux::numa_set_bind_policy_func_t os::Linux::_numa_set_bind_policy;
2979 os::Linux::numa_bitmask_isbitset_func_t os::Linux::_numa_bitmask_isbitset;
2980 os::Linux::numa_distance_func_t os::Linux::_numa_distance;
2981 unsigned long* os::Linux::_numa_all_nodes;
2982 struct bitmask* os::Linux::_numa_all_nodes_ptr;
2983 struct bitmask* os::Linux::_numa_nodes_ptr;
2984
2985 bool os::pd_uncommit_memory(char* addr, size_t size) {
2986 uintptr_t res = (uintptr_t) ::mmap(addr, size, PROT_NONE,
2987 MAP_PRIVATE|MAP_FIXED|MAP_NORESERVE|MAP_ANONYMOUS, -1, 0);
2988 return res != (uintptr_t) MAP_FAILED;
2989 }
2990
2991 static
2992 address get_stack_commited_bottom(address bottom, size_t size) {
2993 address nbot = bottom;
2994 address ntop = bottom + size;
2995
2996 size_t page_sz = os::vm_page_size();
2997 unsigned pages = size / page_sz;
|
2802 #elif defined(AMD64)
2803 // Unfortunately we have to bring all these macros here from vsyscall.h
2804 // to be able to compile on old linuxes.
2805 # define __NR_vgetcpu 2
2806 # define VSYSCALL_START (-10UL << 20)
2807 # define VSYSCALL_SIZE 1024
2808 # define VSYSCALL_ADDR(vsyscall_nr) (VSYSCALL_START+VSYSCALL_SIZE*(vsyscall_nr))
2809 typedef long (*vgetcpu_t)(unsigned int *cpu, unsigned int *node, unsigned long *tcache);
2810 vgetcpu_t vgetcpu = (vgetcpu_t)VSYSCALL_ADDR(__NR_vgetcpu);
2811 retval = vgetcpu(&cpu, NULL, NULL);
2812 #endif
2813
2814 return (retval == -1) ? retval : cpu;
2815 }
2816
2817 // Something to do with the numa-aware allocator needs these symbols
2818 extern "C" JNIEXPORT void numa_warn(int number, char *where, ...) { }
2819 extern "C" JNIEXPORT void numa_error(char *where) { }
2820 extern "C" JNIEXPORT int fork1() { return fork(); }
2821
2822 // Handle request to load libnuma symbol version 1.1 (API v1). If it fails
2823 // load symbol from base version instead.
2824 void* os::Linux::libnuma_dlsym(void* handle, const char *name) {
2825 void *f = dlvsym(handle, name, "libnuma_1.1");
2826 if (f == NULL) {
2827 f = dlsym(handle, name);
2828 }
2829 return f;
2830 }
2831
2832 // Handle request to load libnuma symbol version 1.2 (API v2) only.
2833 // Return NULL if the symbol is not defined in this particular version.
2834 void* os::Linux::libnuma_v2_dlsym(void* handle, const char* name) {
2835 return dlvsym(handle, name, "libnuma_1.2");
2836 }
2837
2838 bool os::Linux::libnuma_init() {
2839 // sched_getcpu() should be in libc.
2840 set_sched_getcpu(CAST_TO_FN_PTR(sched_getcpu_func_t,
2841 dlsym(RTLD_DEFAULT, "sched_getcpu")));
2842
2843 // If it's not, try a direct syscall.
2844 if (sched_getcpu() == -1)
2845 set_sched_getcpu(CAST_TO_FN_PTR(sched_getcpu_func_t, (void*)&sched_getcpu_syscall));
2846
2847 if (sched_getcpu() != -1) { // Does it work?
2848 void *handle = dlopen("libnuma.so.1", RTLD_LAZY);
2849 if (handle != NULL) {
2850 set_numa_node_to_cpus(CAST_TO_FN_PTR(numa_node_to_cpus_func_t,
2851 libnuma_dlsym(handle, "numa_node_to_cpus")));
2852 set_numa_max_node(CAST_TO_FN_PTR(numa_max_node_func_t,
2853 libnuma_dlsym(handle, "numa_max_node")));
2854 set_numa_num_configured_nodes(CAST_TO_FN_PTR(numa_num_configured_nodes_func_t,
2855 libnuma_dlsym(handle, "numa_num_configured_nodes")));
2856 set_numa_available(CAST_TO_FN_PTR(numa_available_func_t,
2857 libnuma_dlsym(handle, "numa_available")));
2858 set_numa_tonode_memory(CAST_TO_FN_PTR(numa_tonode_memory_func_t,
2859 libnuma_dlsym(handle, "numa_tonode_memory")));
2860 set_numa_interleave_memory(CAST_TO_FN_PTR(numa_interleave_memory_func_t,
2861 libnuma_dlsym(handle, "numa_interleave_memory")));
2862 set_numa_interleave_memory_v2(CAST_TO_FN_PTR(numa_interleave_memory_v2_func_t,
2863 libnuma_v2_dlsym(handle, "numa_interleave_memory")));
2864 set_numa_set_bind_policy(CAST_TO_FN_PTR(numa_set_bind_policy_func_t,
2865 libnuma_dlsym(handle, "numa_set_bind_policy")));
2866 set_numa_bitmask_isbitset(CAST_TO_FN_PTR(numa_bitmask_isbitset_func_t,
2867 libnuma_dlsym(handle, "numa_bitmask_isbitset")));
2868 set_numa_distance(CAST_TO_FN_PTR(numa_distance_func_t,
2869 libnuma_dlsym(handle, "numa_distance")));
2870
2871 if (numa_available() != -1) {
2872 set_numa_all_nodes((unsigned long*)libnuma_dlsym(handle, "numa_all_nodes"));
2873 set_numa_all_nodes_ptr((struct bitmask **)libnuma_dlsym(handle, "numa_all_nodes_ptr"));
2874 set_numa_nodes_ptr((struct bitmask **)libnuma_dlsym(handle, "numa_nodes_ptr"));
2875 // Create an index -> node mapping, since nodes are not always consecutive
2876 _nindex_to_node = new (ResourceObj::C_HEAP, mtInternal) GrowableArray<int>(0, true);
2877 rebuild_nindex_to_node_map();
2878 // Create a cpu -> node mapping
2879 _cpu_to_node = new (ResourceObj::C_HEAP, mtInternal) GrowableArray<int>(0, true);
2880 rebuild_cpu_to_node_map();
2881 return true;
2882 }
2883 }
2963 }
2964 FREE_C_HEAP_ARRAY(unsigned long, cpu_map, mtInternal);
2965 }
2966
2967 int os::Linux::get_node_by_cpu(int cpu_id) {
2968 if (cpu_to_node() != NULL && cpu_id >= 0 && cpu_id < cpu_to_node()->length()) {
2969 return cpu_to_node()->at(cpu_id);
2970 }
2971 return -1;
2972 }
2973
2974 GrowableArray<int>* os::Linux::_cpu_to_node;
2975 GrowableArray<int>* os::Linux::_nindex_to_node;
2976 os::Linux::sched_getcpu_func_t os::Linux::_sched_getcpu;
2977 os::Linux::numa_node_to_cpus_func_t os::Linux::_numa_node_to_cpus;
2978 os::Linux::numa_max_node_func_t os::Linux::_numa_max_node;
2979 os::Linux::numa_num_configured_nodes_func_t os::Linux::_numa_num_configured_nodes;
2980 os::Linux::numa_available_func_t os::Linux::_numa_available;
2981 os::Linux::numa_tonode_memory_func_t os::Linux::_numa_tonode_memory;
2982 os::Linux::numa_interleave_memory_func_t os::Linux::_numa_interleave_memory;
2983 os::Linux::numa_interleave_memory_v2_func_t os::Linux::_numa_interleave_memory_v2;
2984 os::Linux::numa_set_bind_policy_func_t os::Linux::_numa_set_bind_policy;
2985 os::Linux::numa_bitmask_isbitset_func_t os::Linux::_numa_bitmask_isbitset;
2986 os::Linux::numa_distance_func_t os::Linux::_numa_distance;
2987 unsigned long* os::Linux::_numa_all_nodes;
2988 struct bitmask* os::Linux::_numa_all_nodes_ptr;
2989 struct bitmask* os::Linux::_numa_nodes_ptr;
2990
2991 bool os::pd_uncommit_memory(char* addr, size_t size) {
2992 uintptr_t res = (uintptr_t) ::mmap(addr, size, PROT_NONE,
2993 MAP_PRIVATE|MAP_FIXED|MAP_NORESERVE|MAP_ANONYMOUS, -1, 0);
2994 return res != (uintptr_t) MAP_FAILED;
2995 }
2996
2997 static
2998 address get_stack_commited_bottom(address bottom, size_t size) {
2999 address nbot = bottom;
3000 address ntop = bottom + size;
3001
3002 size_t page_sz = os::vm_page_size();
3003 unsigned pages = size / page_sz;
|