2816 retval = syscall(SYS_getcpu, &cpu, NULL, NULL);
2817 #elif defined(AMD64)
2818 // Unfortunately we have to bring all these macros here from vsyscall.h
2819 // to be able to compile on old linuxes.
2820 #define __NR_vgetcpu 2
2821 #define VSYSCALL_START (-10UL << 20)
2822 #define VSYSCALL_SIZE 1024
2823 #define VSYSCALL_ADDR(vsyscall_nr) (VSYSCALL_START+VSYSCALL_SIZE*(vsyscall_nr))
2824 typedef long (*vgetcpu_t)(unsigned int *cpu, unsigned int *node, unsigned long *tcache);
2825 vgetcpu_t vgetcpu = (vgetcpu_t)VSYSCALL_ADDR(__NR_vgetcpu);
2826 retval = vgetcpu(&cpu, NULL, NULL);
2827 #endif
2828
2829 return (retval == -1) ? retval : cpu;
2830 }
2831
2832 // Something to do with the numa-aware allocator needs these symbols
2833 extern "C" JNIEXPORT void numa_warn(int number, char *where, ...) { }
2834 extern "C" JNIEXPORT void numa_error(char *where) { }
2835
2836
2837 // If we are running with libnuma version > 2, then we should
2838 // be trying to use symbols with versions 1.1
2839 // If we are running with earlier version, which did not have symbol versions,
2840 // we should use the base version.
2841 void* os::Linux::libnuma_dlsym(void* handle, const char *name) {
2842 void *f = dlvsym(handle, name, "libnuma_1.1");
2843 if (f == NULL) {
2844 f = dlsym(handle, name);
2845 }
2846 return f;
2847 }
2848
2849 bool os::Linux::libnuma_init() {
2850 // sched_getcpu() should be in libc.
2851 set_sched_getcpu(CAST_TO_FN_PTR(sched_getcpu_func_t,
2852 dlsym(RTLD_DEFAULT, "sched_getcpu")));
2853
2854 // If it's not, try a direct syscall.
2855 if (sched_getcpu() == -1) {
2856 set_sched_getcpu(CAST_TO_FN_PTR(sched_getcpu_func_t,
2857 (void*)&sched_getcpu_syscall));
2858 }
2859
2860 if (sched_getcpu() != -1) { // Does it work?
2861 void *handle = dlopen("libnuma.so.1", RTLD_LAZY);
2862 if (handle != NULL) {
2863 set_numa_node_to_cpus(CAST_TO_FN_PTR(numa_node_to_cpus_func_t,
2864 libnuma_dlsym(handle, "numa_node_to_cpus")));
2865 set_numa_max_node(CAST_TO_FN_PTR(numa_max_node_func_t,
2866 libnuma_dlsym(handle, "numa_max_node")));
2867 set_numa_num_configured_nodes(CAST_TO_FN_PTR(numa_num_configured_nodes_func_t,
2868 libnuma_dlsym(handle, "numa_num_configured_nodes")));
2869 set_numa_available(CAST_TO_FN_PTR(numa_available_func_t,
2870 libnuma_dlsym(handle, "numa_available")));
2871 set_numa_tonode_memory(CAST_TO_FN_PTR(numa_tonode_memory_func_t,
2872 libnuma_dlsym(handle, "numa_tonode_memory")));
2873 set_numa_interleave_memory(CAST_TO_FN_PTR(numa_interleave_memory_func_t,
2874 libnuma_dlsym(handle, "numa_interleave_memory")));
2875 set_numa_set_bind_policy(CAST_TO_FN_PTR(numa_set_bind_policy_func_t,
2876 libnuma_dlsym(handle, "numa_set_bind_policy")));
2877 set_numa_bitmask_isbitset(CAST_TO_FN_PTR(numa_bitmask_isbitset_func_t,
2878 libnuma_dlsym(handle, "numa_bitmask_isbitset")));
2879 set_numa_distance(CAST_TO_FN_PTR(numa_distance_func_t,
2880 libnuma_dlsym(handle, "numa_distance")));
2881
2882 if (numa_available() != -1) {
2883 set_numa_all_nodes((unsigned long*)libnuma_dlsym(handle, "numa_all_nodes"));
2884 set_numa_all_nodes_ptr((struct bitmask **)libnuma_dlsym(handle, "numa_all_nodes_ptr"));
2885 set_numa_nodes_ptr((struct bitmask **)libnuma_dlsym(handle, "numa_nodes_ptr"));
2886 // Create an index -> node mapping, since nodes are not always consecutive
2887 _nindex_to_node = new (ResourceObj::C_HEAP, mtInternal) GrowableArray<int>(0, true);
2888 rebuild_nindex_to_node_map();
2889 // Create a cpu -> node mapping
2890 _cpu_to_node = new (ResourceObj::C_HEAP, mtInternal) GrowableArray<int>(0, true);
2891 rebuild_cpu_to_node_map();
2892 return true;
2893 }
2894 }
2981 }
2982 FREE_C_HEAP_ARRAY(unsigned long, cpu_map);
2983 }
2984
2985 int os::Linux::get_node_by_cpu(int cpu_id) {
2986 if (cpu_to_node() != NULL && cpu_id >= 0 && cpu_id < cpu_to_node()->length()) {
2987 return cpu_to_node()->at(cpu_id);
2988 }
2989 return -1;
2990 }
2991
2992 GrowableArray<int>* os::Linux::_cpu_to_node;
2993 GrowableArray<int>* os::Linux::_nindex_to_node;
2994 os::Linux::sched_getcpu_func_t os::Linux::_sched_getcpu;
2995 os::Linux::numa_node_to_cpus_func_t os::Linux::_numa_node_to_cpus;
2996 os::Linux::numa_max_node_func_t os::Linux::_numa_max_node;
2997 os::Linux::numa_num_configured_nodes_func_t os::Linux::_numa_num_configured_nodes;
2998 os::Linux::numa_available_func_t os::Linux::_numa_available;
2999 os::Linux::numa_tonode_memory_func_t os::Linux::_numa_tonode_memory;
3000 os::Linux::numa_interleave_memory_func_t os::Linux::_numa_interleave_memory;
3001 os::Linux::numa_set_bind_policy_func_t os::Linux::_numa_set_bind_policy;
3002 os::Linux::numa_bitmask_isbitset_func_t os::Linux::_numa_bitmask_isbitset;
3003 os::Linux::numa_distance_func_t os::Linux::_numa_distance;
3004 unsigned long* os::Linux::_numa_all_nodes;
3005 struct bitmask* os::Linux::_numa_all_nodes_ptr;
3006 struct bitmask* os::Linux::_numa_nodes_ptr;
3007
3008 bool os::pd_uncommit_memory(char* addr, size_t size) {
3009 uintptr_t res = (uintptr_t) ::mmap(addr, size, PROT_NONE,
3010 MAP_PRIVATE|MAP_FIXED|MAP_NORESERVE|MAP_ANONYMOUS, -1, 0);
3011 return res != (uintptr_t) MAP_FAILED;
3012 }
3013
3014 static address get_stack_commited_bottom(address bottom, size_t size) {
3015 address nbot = bottom;
3016 address ntop = bottom + size;
3017
3018 size_t page_sz = os::vm_page_size();
3019 unsigned pages = size / page_sz;
3020
|
2816 retval = syscall(SYS_getcpu, &cpu, NULL, NULL);
2817 #elif defined(AMD64)
2818 // Unfortunately we have to bring all these macros here from vsyscall.h
2819 // to be able to compile on old linuxes.
2820 #define __NR_vgetcpu 2
2821 #define VSYSCALL_START (-10UL << 20)
2822 #define VSYSCALL_SIZE 1024
2823 #define VSYSCALL_ADDR(vsyscall_nr) (VSYSCALL_START+VSYSCALL_SIZE*(vsyscall_nr))
2824 typedef long (*vgetcpu_t)(unsigned int *cpu, unsigned int *node, unsigned long *tcache);
2825 vgetcpu_t vgetcpu = (vgetcpu_t)VSYSCALL_ADDR(__NR_vgetcpu);
2826 retval = vgetcpu(&cpu, NULL, NULL);
2827 #endif
2828
2829 return (retval == -1) ? retval : cpu;
2830 }
2831
2832 // Something to do with the numa-aware allocator needs these symbols
2833 extern "C" JNIEXPORT void numa_warn(int number, char *where, ...) { }
2834 extern "C" JNIEXPORT void numa_error(char *where) { }
2835
2836 // Handle request to load libnuma symbol version 1.1 (API v1). If it fails
2837 // load symbol from base version instead.
2838 void* os::Linux::libnuma_dlsym(void* handle, const char *name) {
2839 void *f = dlvsym(handle, name, "libnuma_1.1");
2840 if (f == NULL) {
2841 f = dlsym(handle, name);
2842 }
2843 return f;
2844 }
2845
2846 // Handle request to load libnuma symbol version 1.2 (API v2) only.
2847 // Return NULL if the symbol is not defined in this particular version.
2848 void* os::Linux::libnuma_v2_dlsym(void* handle, const char* name) {
2849 return dlvsym(handle, name, "libnuma_1.2");
2850 }
2851
2852
2853 bool os::Linux::libnuma_init() {
2854 // sched_getcpu() should be in libc.
2855 set_sched_getcpu(CAST_TO_FN_PTR(sched_getcpu_func_t,
2856 dlsym(RTLD_DEFAULT, "sched_getcpu")));
2857
2858 // If it's not, try a direct syscall.
2859 if (sched_getcpu() == -1) {
2860 set_sched_getcpu(CAST_TO_FN_PTR(sched_getcpu_func_t,
2861 (void*)&sched_getcpu_syscall));
2862 }
2863
2864 if (sched_getcpu() != -1) { // Does it work?
2865 void *handle = dlopen("libnuma.so.1", RTLD_LAZY);
2866 if (handle != NULL) {
2867 set_numa_node_to_cpus(CAST_TO_FN_PTR(numa_node_to_cpus_func_t,
2868 libnuma_dlsym(handle, "numa_node_to_cpus")));
2869 set_numa_max_node(CAST_TO_FN_PTR(numa_max_node_func_t,
2870 libnuma_dlsym(handle, "numa_max_node")));
2871 set_numa_num_configured_nodes(CAST_TO_FN_PTR(numa_num_configured_nodes_func_t,
2872 libnuma_dlsym(handle, "numa_num_configured_nodes")));
2873 set_numa_available(CAST_TO_FN_PTR(numa_available_func_t,
2874 libnuma_dlsym(handle, "numa_available")));
2875 set_numa_tonode_memory(CAST_TO_FN_PTR(numa_tonode_memory_func_t,
2876 libnuma_dlsym(handle, "numa_tonode_memory")));
2877 set_numa_interleave_memory(CAST_TO_FN_PTR(numa_interleave_memory_func_t,
2878 libnuma_dlsym(handle, "numa_interleave_memory")));
2879 set_numa_interleave_memory_v2(CAST_TO_FN_PTR(numa_interleave_memory_v2_func_t,
2880 libnuma_v2_dlsym(handle, "numa_interleave_memory")));
2881 set_numa_set_bind_policy(CAST_TO_FN_PTR(numa_set_bind_policy_func_t,
2882 libnuma_dlsym(handle, "numa_set_bind_policy")));
2883 set_numa_bitmask_isbitset(CAST_TO_FN_PTR(numa_bitmask_isbitset_func_t,
2884 libnuma_dlsym(handle, "numa_bitmask_isbitset")));
2885 set_numa_distance(CAST_TO_FN_PTR(numa_distance_func_t,
2886 libnuma_dlsym(handle, "numa_distance")));
2887
2888 if (numa_available() != -1) {
2889 set_numa_all_nodes((unsigned long*)libnuma_dlsym(handle, "numa_all_nodes"));
2890 set_numa_all_nodes_ptr((struct bitmask **)libnuma_dlsym(handle, "numa_all_nodes_ptr"));
2891 set_numa_nodes_ptr((struct bitmask **)libnuma_dlsym(handle, "numa_nodes_ptr"));
2892 // Create an index -> node mapping, since nodes are not always consecutive
2893 _nindex_to_node = new (ResourceObj::C_HEAP, mtInternal) GrowableArray<int>(0, true);
2894 rebuild_nindex_to_node_map();
2895 // Create a cpu -> node mapping
2896 _cpu_to_node = new (ResourceObj::C_HEAP, mtInternal) GrowableArray<int>(0, true);
2897 rebuild_cpu_to_node_map();
2898 return true;
2899 }
2900 }
2987 }
2988 FREE_C_HEAP_ARRAY(unsigned long, cpu_map);
2989 }
2990
2991 int os::Linux::get_node_by_cpu(int cpu_id) {
2992 if (cpu_to_node() != NULL && cpu_id >= 0 && cpu_id < cpu_to_node()->length()) {
2993 return cpu_to_node()->at(cpu_id);
2994 }
2995 return -1;
2996 }
2997
2998 GrowableArray<int>* os::Linux::_cpu_to_node;
2999 GrowableArray<int>* os::Linux::_nindex_to_node;
3000 os::Linux::sched_getcpu_func_t os::Linux::_sched_getcpu;
3001 os::Linux::numa_node_to_cpus_func_t os::Linux::_numa_node_to_cpus;
3002 os::Linux::numa_max_node_func_t os::Linux::_numa_max_node;
3003 os::Linux::numa_num_configured_nodes_func_t os::Linux::_numa_num_configured_nodes;
3004 os::Linux::numa_available_func_t os::Linux::_numa_available;
3005 os::Linux::numa_tonode_memory_func_t os::Linux::_numa_tonode_memory;
3006 os::Linux::numa_interleave_memory_func_t os::Linux::_numa_interleave_memory;
3007 os::Linux::numa_interleave_memory_v2_func_t os::Linux::_numa_interleave_memory_v2;
3008 os::Linux::numa_set_bind_policy_func_t os::Linux::_numa_set_bind_policy;
3009 os::Linux::numa_bitmask_isbitset_func_t os::Linux::_numa_bitmask_isbitset;
3010 os::Linux::numa_distance_func_t os::Linux::_numa_distance;
3011 unsigned long* os::Linux::_numa_all_nodes;
3012 struct bitmask* os::Linux::_numa_all_nodes_ptr;
3013 struct bitmask* os::Linux::_numa_nodes_ptr;
3014
3015 bool os::pd_uncommit_memory(char* addr, size_t size) {
3016 uintptr_t res = (uintptr_t) ::mmap(addr, size, PROT_NONE,
3017 MAP_PRIVATE|MAP_FIXED|MAP_NORESERVE|MAP_ANONYMOUS, -1, 0);
3018 return res != (uintptr_t) MAP_FAILED;
3019 }
3020
3021 static address get_stack_commited_bottom(address bottom, size_t size) {
3022 address nbot = bottom;
3023 address ntop = bottom + size;
3024
3025 size_t page_sz = os::vm_page_size();
3026 unsigned pages = size / page_sz;
3027
|