2991 bool os::numa_topology_changed() { return false; }
2992
2993 size_t os::numa_get_groups_num() {
2994 // Return just the number of nodes in which it's possible to allocate memory
2995 // (in numa terminology, configured nodes).
2996 return Linux::numa_num_configured_nodes();
2997 }
2998
2999 int os::numa_get_group_id() {
3000 int cpu_id = Linux::sched_getcpu();
3001 if (cpu_id != -1) {
3002 int lgrp_id = Linux::get_node_by_cpu(cpu_id);
3003 if (lgrp_id != -1) {
3004 return lgrp_id;
3005 }
3006 }
3007 return 0;
3008 }
3009
3010 int os::numa_get_group_id_for_address(const void* address) {
3011 #ifndef MPOL_F_NODE
3012 #define MPOL_F_NODE (1<<0) // Return next IL mode instead of node mask
3013 #endif
3014
3015 #ifndef MPOL_F_ADDR
3016 #define MPOL_F_ADDR (1<<1) // Look up VMA using address
3017 #endif
3018
3019 int id = 0;
3020
3021 if (syscall(SYS_get_mempolicy, &id, NULL, 0, const_cast<void*>(address), MPOL_F_NODE | MPOL_F_ADDR) == -1) {
3022 return -1;
3023 }
3024 return id;
3025 }
3026
3027 int os::Linux::get_existing_num_nodes() {
3028 int node;
3029 int highest_node_number = Linux::numa_max_node();
3030 int num_nodes = 0;
3031
3032 // Get the total number of nodes in the system including nodes without memory.
3033 for (node = 0; node <= highest_node_number; node++) {
3034 if (is_node_in_existing_nodes(node)) {
3035 num_nodes++;
3036 }
3037 }
3038 return num_nodes;
3039 }
3040
3041 size_t os::numa_get_leaf_groups(int *ids, size_t size) {
3135 set_numa_num_configured_nodes(CAST_TO_FN_PTR(numa_num_configured_nodes_func_t,
3136 libnuma_dlsym(handle, "numa_num_configured_nodes")));
3137 set_numa_available(CAST_TO_FN_PTR(numa_available_func_t,
3138 libnuma_dlsym(handle, "numa_available")));
3139 set_numa_tonode_memory(CAST_TO_FN_PTR(numa_tonode_memory_func_t,
3140 libnuma_dlsym(handle, "numa_tonode_memory")));
3141 set_numa_interleave_memory(CAST_TO_FN_PTR(numa_interleave_memory_func_t,
3142 libnuma_dlsym(handle, "numa_interleave_memory")));
3143 set_numa_interleave_memory_v2(CAST_TO_FN_PTR(numa_interleave_memory_v2_func_t,
3144 libnuma_v2_dlsym(handle, "numa_interleave_memory")));
3145 set_numa_set_bind_policy(CAST_TO_FN_PTR(numa_set_bind_policy_func_t,
3146 libnuma_dlsym(handle, "numa_set_bind_policy")));
3147 set_numa_bitmask_isbitset(CAST_TO_FN_PTR(numa_bitmask_isbitset_func_t,
3148 libnuma_dlsym(handle, "numa_bitmask_isbitset")));
3149 set_numa_distance(CAST_TO_FN_PTR(numa_distance_func_t,
3150 libnuma_dlsym(handle, "numa_distance")));
3151 set_numa_get_membind(CAST_TO_FN_PTR(numa_get_membind_func_t,
3152 libnuma_v2_dlsym(handle, "numa_get_membind")));
3153 set_numa_get_interleave_mask(CAST_TO_FN_PTR(numa_get_interleave_mask_func_t,
3154 libnuma_v2_dlsym(handle, "numa_get_interleave_mask")));
3155
3156 if (numa_available() != -1) {
3157 set_numa_all_nodes((unsigned long*)libnuma_dlsym(handle, "numa_all_nodes"));
3158 set_numa_all_nodes_ptr((struct bitmask **)libnuma_dlsym(handle, "numa_all_nodes_ptr"));
3159 set_numa_nodes_ptr((struct bitmask **)libnuma_dlsym(handle, "numa_nodes_ptr"));
3160 set_numa_interleave_bitmask(_numa_get_interleave_mask());
3161 set_numa_membind_bitmask(_numa_get_membind());
3162 // Create an index -> node mapping, since nodes are not always consecutive
3163 _nindex_to_node = new (ResourceObj::C_HEAP, mtInternal) GrowableArray<int>(0, true);
3164 rebuild_nindex_to_node_map();
3165 // Create a cpu -> node mapping
3166 _cpu_to_node = new (ResourceObj::C_HEAP, mtInternal) GrowableArray<int>(0, true);
3167 rebuild_cpu_to_node_map();
3168 return true;
3169 }
3170 }
3171 }
3172 return false;
3173 }
3174
3269 return cpu_to_node()->at(cpu_id);
3270 }
3271 return -1;
3272 }
3273
3274 GrowableArray<int>* os::Linux::_cpu_to_node;
3275 GrowableArray<int>* os::Linux::_nindex_to_node;
3276 os::Linux::sched_getcpu_func_t os::Linux::_sched_getcpu;
3277 os::Linux::numa_node_to_cpus_func_t os::Linux::_numa_node_to_cpus;
3278 os::Linux::numa_max_node_func_t os::Linux::_numa_max_node;
3279 os::Linux::numa_num_configured_nodes_func_t os::Linux::_numa_num_configured_nodes;
3280 os::Linux::numa_available_func_t os::Linux::_numa_available;
3281 os::Linux::numa_tonode_memory_func_t os::Linux::_numa_tonode_memory;
3282 os::Linux::numa_interleave_memory_func_t os::Linux::_numa_interleave_memory;
3283 os::Linux::numa_interleave_memory_v2_func_t os::Linux::_numa_interleave_memory_v2;
3284 os::Linux::numa_set_bind_policy_func_t os::Linux::_numa_set_bind_policy;
3285 os::Linux::numa_bitmask_isbitset_func_t os::Linux::_numa_bitmask_isbitset;
3286 os::Linux::numa_distance_func_t os::Linux::_numa_distance;
3287 os::Linux::numa_get_membind_func_t os::Linux::_numa_get_membind;
3288 os::Linux::numa_get_interleave_mask_func_t os::Linux::_numa_get_interleave_mask;
3289 os::Linux::NumaAllocationPolicy os::Linux::_current_numa_policy;
3290 unsigned long* os::Linux::_numa_all_nodes;
3291 struct bitmask* os::Linux::_numa_all_nodes_ptr;
3292 struct bitmask* os::Linux::_numa_nodes_ptr;
3293 struct bitmask* os::Linux::_numa_interleave_bitmask;
3294 struct bitmask* os::Linux::_numa_membind_bitmask;
3295
3296 bool os::pd_uncommit_memory(char* addr, size_t size) {
3297 uintptr_t res = (uintptr_t) ::mmap(addr, size, PROT_NONE,
3298 MAP_PRIVATE|MAP_FIXED|MAP_NORESERVE|MAP_ANONYMOUS, -1, 0);
3299 return res != (uintptr_t) MAP_FAILED;
3300 }
3301
3302 static address get_stack_commited_bottom(address bottom, size_t size) {
3303 address nbot = bottom;
3304 address ntop = bottom + size;
3305
3306 size_t page_sz = os::vm_page_size();
3307 unsigned pages = size / page_sz;
3308
|
2991 bool os::numa_topology_changed() { return false; }
2992
2993 size_t os::numa_get_groups_num() {
2994 // Return just the number of nodes in which it's possible to allocate memory
2995 // (in numa terminology, configured nodes).
2996 return Linux::numa_num_configured_nodes();
2997 }
2998
2999 int os::numa_get_group_id() {
3000 int cpu_id = Linux::sched_getcpu();
3001 if (cpu_id != -1) {
3002 int lgrp_id = Linux::get_node_by_cpu(cpu_id);
3003 if (lgrp_id != -1) {
3004 return lgrp_id;
3005 }
3006 }
3007 return 0;
3008 }
3009
3010 int os::numa_get_group_id_for_address(const void* address) {
3011 void** pages = const_cast<void**>(&address);
3012 int id = -1;
3013
3014 if (os::Linux::numa_move_pages(0, 1, pages, NULL, &id, 0) == -1) {
3015 return -1;
3016 }
3017 if (id < 0) {
3018 return -1;
3019 }
3020 return id;
3021 }
3022
3023 int os::Linux::get_existing_num_nodes() {
3024 int node;
3025 int highest_node_number = Linux::numa_max_node();
3026 int num_nodes = 0;
3027
3028 // Get the total number of nodes in the system including nodes without memory.
3029 for (node = 0; node <= highest_node_number; node++) {
3030 if (is_node_in_existing_nodes(node)) {
3031 num_nodes++;
3032 }
3033 }
3034 return num_nodes;
3035 }
3036
3037 size_t os::numa_get_leaf_groups(int *ids, size_t size) {
3131 set_numa_num_configured_nodes(CAST_TO_FN_PTR(numa_num_configured_nodes_func_t,
3132 libnuma_dlsym(handle, "numa_num_configured_nodes")));
3133 set_numa_available(CAST_TO_FN_PTR(numa_available_func_t,
3134 libnuma_dlsym(handle, "numa_available")));
3135 set_numa_tonode_memory(CAST_TO_FN_PTR(numa_tonode_memory_func_t,
3136 libnuma_dlsym(handle, "numa_tonode_memory")));
3137 set_numa_interleave_memory(CAST_TO_FN_PTR(numa_interleave_memory_func_t,
3138 libnuma_dlsym(handle, "numa_interleave_memory")));
3139 set_numa_interleave_memory_v2(CAST_TO_FN_PTR(numa_interleave_memory_v2_func_t,
3140 libnuma_v2_dlsym(handle, "numa_interleave_memory")));
3141 set_numa_set_bind_policy(CAST_TO_FN_PTR(numa_set_bind_policy_func_t,
3142 libnuma_dlsym(handle, "numa_set_bind_policy")));
3143 set_numa_bitmask_isbitset(CAST_TO_FN_PTR(numa_bitmask_isbitset_func_t,
3144 libnuma_dlsym(handle, "numa_bitmask_isbitset")));
3145 set_numa_distance(CAST_TO_FN_PTR(numa_distance_func_t,
3146 libnuma_dlsym(handle, "numa_distance")));
3147 set_numa_get_membind(CAST_TO_FN_PTR(numa_get_membind_func_t,
3148 libnuma_v2_dlsym(handle, "numa_get_membind")));
3149 set_numa_get_interleave_mask(CAST_TO_FN_PTR(numa_get_interleave_mask_func_t,
3150 libnuma_v2_dlsym(handle, "numa_get_interleave_mask")));
3151 set_numa_move_pages(CAST_TO_FN_PTR(numa_move_pages_func_t,
3152 libnuma_dlsym(handle, "numa_move_pages")));
3153
3154 if (numa_available() != -1) {
3155 set_numa_all_nodes((unsigned long*)libnuma_dlsym(handle, "numa_all_nodes"));
3156 set_numa_all_nodes_ptr((struct bitmask **)libnuma_dlsym(handle, "numa_all_nodes_ptr"));
3157 set_numa_nodes_ptr((struct bitmask **)libnuma_dlsym(handle, "numa_nodes_ptr"));
3158 set_numa_interleave_bitmask(_numa_get_interleave_mask());
3159 set_numa_membind_bitmask(_numa_get_membind());
3160 // Create an index -> node mapping, since nodes are not always consecutive
3161 _nindex_to_node = new (ResourceObj::C_HEAP, mtInternal) GrowableArray<int>(0, true);
3162 rebuild_nindex_to_node_map();
3163 // Create a cpu -> node mapping
3164 _cpu_to_node = new (ResourceObj::C_HEAP, mtInternal) GrowableArray<int>(0, true);
3165 rebuild_cpu_to_node_map();
3166 return true;
3167 }
3168 }
3169 }
3170 return false;
3171 }
3172
3267 return cpu_to_node()->at(cpu_id);
3268 }
3269 return -1;
3270 }
3271
3272 GrowableArray<int>* os::Linux::_cpu_to_node;
3273 GrowableArray<int>* os::Linux::_nindex_to_node;
3274 os::Linux::sched_getcpu_func_t os::Linux::_sched_getcpu;
3275 os::Linux::numa_node_to_cpus_func_t os::Linux::_numa_node_to_cpus;
3276 os::Linux::numa_max_node_func_t os::Linux::_numa_max_node;
3277 os::Linux::numa_num_configured_nodes_func_t os::Linux::_numa_num_configured_nodes;
3278 os::Linux::numa_available_func_t os::Linux::_numa_available;
3279 os::Linux::numa_tonode_memory_func_t os::Linux::_numa_tonode_memory;
3280 os::Linux::numa_interleave_memory_func_t os::Linux::_numa_interleave_memory;
3281 os::Linux::numa_interleave_memory_v2_func_t os::Linux::_numa_interleave_memory_v2;
3282 os::Linux::numa_set_bind_policy_func_t os::Linux::_numa_set_bind_policy;
3283 os::Linux::numa_bitmask_isbitset_func_t os::Linux::_numa_bitmask_isbitset;
3284 os::Linux::numa_distance_func_t os::Linux::_numa_distance;
3285 os::Linux::numa_get_membind_func_t os::Linux::_numa_get_membind;
3286 os::Linux::numa_get_interleave_mask_func_t os::Linux::_numa_get_interleave_mask;
3287 os::Linux::numa_move_pages_func_t os::Linux::_numa_move_pages;
3288 os::Linux::NumaAllocationPolicy os::Linux::_current_numa_policy;
3289 unsigned long* os::Linux::_numa_all_nodes;
3290 struct bitmask* os::Linux::_numa_all_nodes_ptr;
3291 struct bitmask* os::Linux::_numa_nodes_ptr;
3292 struct bitmask* os::Linux::_numa_interleave_bitmask;
3293 struct bitmask* os::Linux::_numa_membind_bitmask;
3294
3295 bool os::pd_uncommit_memory(char* addr, size_t size) {
3296 uintptr_t res = (uintptr_t) ::mmap(addr, size, PROT_NONE,
3297 MAP_PRIVATE|MAP_FIXED|MAP_NORESERVE|MAP_ANONYMOUS, -1, 0);
3298 return res != (uintptr_t) MAP_FAILED;
3299 }
3300
3301 static address get_stack_commited_bottom(address bottom, size_t size) {
3302 address nbot = bottom;
3303 address ntop = bottom + size;
3304
3305 size_t page_sz = os::vm_page_size();
3306 unsigned pages = size / page_sz;
3307
|