< prev index next >

src/hotspot/os/linux/os_linux.hpp

Print this page
rev 56821 : imported patch 8220310.mut.0
rev 56822 : imported patch 8220310.mut.1
rev 56837 : imported patch 8220312.stat.4-move_pages


 199   static jlong fast_thread_cpu_time(clockid_t clockid);
 200 
 201   // Stack repair handling
 202 
 203   // none present
 204 
 205  private:
 206   static void numa_init();
 207   static void expand_stack_to(address bottom);
 208 
 209   typedef int (*sched_getcpu_func_t)(void);
 210   typedef int (*numa_node_to_cpus_func_t)(int node, unsigned long *buffer, int bufferlen);
 211   typedef int (*numa_max_node_func_t)(void);
 212   typedef int (*numa_num_configured_nodes_func_t)(void);
 213   typedef int (*numa_available_func_t)(void);
 214   typedef int (*numa_tonode_memory_func_t)(void *start, size_t size, int node);
 215   typedef void (*numa_interleave_memory_func_t)(void *start, size_t size, unsigned long *nodemask);
 216   typedef void (*numa_interleave_memory_v2_func_t)(void *start, size_t size, struct bitmask* mask);
 217   typedef struct bitmask* (*numa_get_membind_func_t)(void);
 218   typedef struct bitmask* (*numa_get_interleave_mask_func_t)(void);

 219 
 220   typedef void (*numa_set_bind_policy_func_t)(int policy);
 221   typedef int (*numa_bitmask_isbitset_func_t)(struct bitmask *bmp, unsigned int n);
 222   typedef int (*numa_distance_func_t)(int node1, int node2);
 223 
 224   static sched_getcpu_func_t _sched_getcpu;
 225   static numa_node_to_cpus_func_t _numa_node_to_cpus;
 226   static numa_max_node_func_t _numa_max_node;
 227   static numa_num_configured_nodes_func_t _numa_num_configured_nodes;
 228   static numa_available_func_t _numa_available;
 229   static numa_tonode_memory_func_t _numa_tonode_memory;
 230   static numa_interleave_memory_func_t _numa_interleave_memory;
 231   static numa_interleave_memory_v2_func_t _numa_interleave_memory_v2;
 232   static numa_set_bind_policy_func_t _numa_set_bind_policy;
 233   static numa_bitmask_isbitset_func_t _numa_bitmask_isbitset;
 234   static numa_distance_func_t _numa_distance;
 235   static numa_get_membind_func_t _numa_get_membind;
 236   static numa_get_interleave_mask_func_t _numa_get_interleave_mask;

 237   static unsigned long* _numa_all_nodes;
 238   static struct bitmask* _numa_all_nodes_ptr;
 239   static struct bitmask* _numa_nodes_ptr;
 240   static struct bitmask* _numa_interleave_bitmask;
 241   static struct bitmask* _numa_membind_bitmask;
 242 
 243   static void set_sched_getcpu(sched_getcpu_func_t func) { _sched_getcpu = func; }
 244   static void set_numa_node_to_cpus(numa_node_to_cpus_func_t func) { _numa_node_to_cpus = func; }
 245   static void set_numa_max_node(numa_max_node_func_t func) { _numa_max_node = func; }
 246   static void set_numa_num_configured_nodes(numa_num_configured_nodes_func_t func) { _numa_num_configured_nodes = func; }
 247   static void set_numa_available(numa_available_func_t func) { _numa_available = func; }
 248   static void set_numa_tonode_memory(numa_tonode_memory_func_t func) { _numa_tonode_memory = func; }
 249   static void set_numa_interleave_memory(numa_interleave_memory_func_t func) { _numa_interleave_memory = func; }
 250   static void set_numa_interleave_memory_v2(numa_interleave_memory_v2_func_t func) { _numa_interleave_memory_v2 = func; }
 251   static void set_numa_set_bind_policy(numa_set_bind_policy_func_t func) { _numa_set_bind_policy = func; }
 252   static void set_numa_bitmask_isbitset(numa_bitmask_isbitset_func_t func) { _numa_bitmask_isbitset = func; }
 253   static void set_numa_distance(numa_distance_func_t func) { _numa_distance = func; }
 254   static void set_numa_get_membind(numa_get_membind_func_t func) { _numa_get_membind = func; }
 255   static void set_numa_get_interleave_mask(numa_get_interleave_mask_func_t func) { _numa_get_interleave_mask = func; }

 256   static void set_numa_all_nodes(unsigned long* ptr) { _numa_all_nodes = ptr; }
 257   static void set_numa_all_nodes_ptr(struct bitmask **ptr) { _numa_all_nodes_ptr = (ptr == NULL ? NULL : *ptr); }
 258   static void set_numa_nodes_ptr(struct bitmask **ptr) { _numa_nodes_ptr = (ptr == NULL ? NULL : *ptr); }
 259   static void set_numa_interleave_bitmask(struct bitmask* ptr)     { _numa_interleave_bitmask = ptr ;   }
 260   static void set_numa_membind_bitmask(struct bitmask* ptr)        { _numa_membind_bitmask = ptr ;      }
 261   static int sched_getcpu_syscall(void);
 262 
 263   enum NumaAllocationPolicy{
 264     NotInitialized,
 265     Membind,
 266     Interleave
 267   };
 268   static NumaAllocationPolicy _current_numa_policy;
 269 
 270  public:
 271   static int sched_getcpu()  { return _sched_getcpu != NULL ? _sched_getcpu() : -1; }
 272   static int numa_node_to_cpus(int node, unsigned long *buffer, int bufferlen) {
 273     return _numa_node_to_cpus != NULL ? _numa_node_to_cpus(node, buffer, bufferlen) : -1;
 274   }
 275   static int numa_max_node() { return _numa_max_node != NULL ? _numa_max_node() : -1; }


 300 
 301   static void numa_interleave_memory(void *start, size_t size) {
 302     // Prefer v2 API
 303     if (_numa_interleave_memory_v2 != NULL) {
 304       if (is_running_in_interleave_mode()) {
 305         _numa_interleave_memory_v2(start, size, _numa_interleave_bitmask);
 306       } else if (_numa_membind_bitmask != NULL) {
 307         _numa_interleave_memory_v2(start, size, _numa_membind_bitmask);
 308       }
 309     } else if (_numa_interleave_memory != NULL) {
 310       _numa_interleave_memory(start, size, _numa_all_nodes);
 311     }
 312   }
 313   static void numa_set_bind_policy(int policy) {
 314     if (_numa_set_bind_policy != NULL) {
 315       _numa_set_bind_policy(policy);
 316     }
 317   }
 318   static int numa_distance(int node1, int node2) {
 319     return _numa_distance != NULL ? _numa_distance(node1, node2) : -1;



 320   }
 321   static int get_node_by_cpu(int cpu_id);
 322   static int get_existing_num_nodes();
 323   // Check if numa node is configured (non-zero memory node).
 324   static bool is_node_in_configured_nodes(unsigned int n) {
 325     if (_numa_bitmask_isbitset != NULL && _numa_all_nodes_ptr != NULL) {
 326       return _numa_bitmask_isbitset(_numa_all_nodes_ptr, n);
 327     } else
 328       return false;
 329   }
 330   // Check if numa node exists in the system (including zero memory nodes).
 331   static bool is_node_in_existing_nodes(unsigned int n) {
 332     if (_numa_bitmask_isbitset != NULL && _numa_nodes_ptr != NULL) {
 333       return _numa_bitmask_isbitset(_numa_nodes_ptr, n);
 334     } else if (_numa_bitmask_isbitset != NULL && _numa_all_nodes_ptr != NULL) {
 335       // Not all libnuma API v2 implement numa_nodes_ptr, so it's not possible
 336       // to trust the API version for checking its absence. On the other hand,
 337       // numa_nodes_ptr found in libnuma 2.0.9 and above is the only way to get
 338       // a complete view of all numa nodes in the system, hence numa_nodes_ptr
 339       // is used to handle CPU and nodes on architectures (like PowerPC) where




 199   static jlong fast_thread_cpu_time(clockid_t clockid);
 200 
 201   // Stack repair handling
 202 
 203   // none present
 204 
 205  private:
 206   static void numa_init();
 207   static void expand_stack_to(address bottom);
 208 
 209   typedef int (*sched_getcpu_func_t)(void);
 210   typedef int (*numa_node_to_cpus_func_t)(int node, unsigned long *buffer, int bufferlen);
 211   typedef int (*numa_max_node_func_t)(void);
 212   typedef int (*numa_num_configured_nodes_func_t)(void);
 213   typedef int (*numa_available_func_t)(void);
 214   typedef int (*numa_tonode_memory_func_t)(void *start, size_t size, int node);
 215   typedef void (*numa_interleave_memory_func_t)(void *start, size_t size, unsigned long *nodemask);
 216   typedef void (*numa_interleave_memory_v2_func_t)(void *start, size_t size, struct bitmask* mask);
 217   typedef struct bitmask* (*numa_get_membind_func_t)(void);
 218   typedef struct bitmask* (*numa_get_interleave_mask_func_t)(void);
 219   typedef long (*numa_move_pages_func_t)(int pid, unsigned long count, void **pages, const int *nodes, int *status, int flags);
 220 
 221   typedef void (*numa_set_bind_policy_func_t)(int policy);
 222   typedef int (*numa_bitmask_isbitset_func_t)(struct bitmask *bmp, unsigned int n);
 223   typedef int (*numa_distance_func_t)(int node1, int node2);
 224 
 225   static sched_getcpu_func_t _sched_getcpu;
 226   static numa_node_to_cpus_func_t _numa_node_to_cpus;
 227   static numa_max_node_func_t _numa_max_node;
 228   static numa_num_configured_nodes_func_t _numa_num_configured_nodes;
 229   static numa_available_func_t _numa_available;
 230   static numa_tonode_memory_func_t _numa_tonode_memory;
 231   static numa_interleave_memory_func_t _numa_interleave_memory;
 232   static numa_interleave_memory_v2_func_t _numa_interleave_memory_v2;
 233   static numa_set_bind_policy_func_t _numa_set_bind_policy;
 234   static numa_bitmask_isbitset_func_t _numa_bitmask_isbitset;
 235   static numa_distance_func_t _numa_distance;
 236   static numa_get_membind_func_t _numa_get_membind;
 237   static numa_get_interleave_mask_func_t _numa_get_interleave_mask;
 238   static numa_move_pages_func_t _numa_move_pages;
 239   static unsigned long* _numa_all_nodes;
 240   static struct bitmask* _numa_all_nodes_ptr;
 241   static struct bitmask* _numa_nodes_ptr;
 242   static struct bitmask* _numa_interleave_bitmask;
 243   static struct bitmask* _numa_membind_bitmask;
 244 
 245   static void set_sched_getcpu(sched_getcpu_func_t func) { _sched_getcpu = func; }
 246   static void set_numa_node_to_cpus(numa_node_to_cpus_func_t func) { _numa_node_to_cpus = func; }
 247   static void set_numa_max_node(numa_max_node_func_t func) { _numa_max_node = func; }
 248   static void set_numa_num_configured_nodes(numa_num_configured_nodes_func_t func) { _numa_num_configured_nodes = func; }
 249   static void set_numa_available(numa_available_func_t func) { _numa_available = func; }
 250   static void set_numa_tonode_memory(numa_tonode_memory_func_t func) { _numa_tonode_memory = func; }
 251   static void set_numa_interleave_memory(numa_interleave_memory_func_t func) { _numa_interleave_memory = func; }
 252   static void set_numa_interleave_memory_v2(numa_interleave_memory_v2_func_t func) { _numa_interleave_memory_v2 = func; }
 253   static void set_numa_set_bind_policy(numa_set_bind_policy_func_t func) { _numa_set_bind_policy = func; }
 254   static void set_numa_bitmask_isbitset(numa_bitmask_isbitset_func_t func) { _numa_bitmask_isbitset = func; }
 255   static void set_numa_distance(numa_distance_func_t func) { _numa_distance = func; }
 256   static void set_numa_get_membind(numa_get_membind_func_t func) { _numa_get_membind = func; }
 257   static void set_numa_get_interleave_mask(numa_get_interleave_mask_func_t func) { _numa_get_interleave_mask = func; }
 258   static void set_numa_move_pages(numa_move_pages_func_t func) { _numa_move_pages = func; }
 259   static void set_numa_all_nodes(unsigned long* ptr) { _numa_all_nodes = ptr; }
 260   static void set_numa_all_nodes_ptr(struct bitmask **ptr) { _numa_all_nodes_ptr = (ptr == NULL ? NULL : *ptr); }
 261   static void set_numa_nodes_ptr(struct bitmask **ptr) { _numa_nodes_ptr = (ptr == NULL ? NULL : *ptr); }
 262   static void set_numa_interleave_bitmask(struct bitmask* ptr)     { _numa_interleave_bitmask = ptr ;   }
 263   static void set_numa_membind_bitmask(struct bitmask* ptr)        { _numa_membind_bitmask = ptr ;      }
 264   static int sched_getcpu_syscall(void);
 265 
 266   enum NumaAllocationPolicy{
 267     NotInitialized,
 268     Membind,
 269     Interleave
 270   };
 271   static NumaAllocationPolicy _current_numa_policy;
 272 
 273  public:
 274   static int sched_getcpu()  { return _sched_getcpu != NULL ? _sched_getcpu() : -1; }
 275   static int numa_node_to_cpus(int node, unsigned long *buffer, int bufferlen) {
 276     return _numa_node_to_cpus != NULL ? _numa_node_to_cpus(node, buffer, bufferlen) : -1;
 277   }
 278   static int numa_max_node() { return _numa_max_node != NULL ? _numa_max_node() : -1; }


 303 
 304   static void numa_interleave_memory(void *start, size_t size) {
 305     // Prefer v2 API
 306     if (_numa_interleave_memory_v2 != NULL) {
 307       if (is_running_in_interleave_mode()) {
 308         _numa_interleave_memory_v2(start, size, _numa_interleave_bitmask);
 309       } else if (_numa_membind_bitmask != NULL) {
 310         _numa_interleave_memory_v2(start, size, _numa_membind_bitmask);
 311       }
 312     } else if (_numa_interleave_memory != NULL) {
 313       _numa_interleave_memory(start, size, _numa_all_nodes);
 314     }
 315   }
 316   static void numa_set_bind_policy(int policy) {
 317     if (_numa_set_bind_policy != NULL) {
 318       _numa_set_bind_policy(policy);
 319     }
 320   }
 321   static int numa_distance(int node1, int node2) {
 322     return _numa_distance != NULL ? _numa_distance(node1, node2) : -1;
 323   }
 324   static long numa_move_pages(int pid, unsigned long count, void **pages, const int *nodes, int *status, int flags) {
 325     return _numa_move_pages != NULL ? _numa_move_pages(pid, count, pages, nodes, status, flags) : -1;
 326   }
 327   static int get_node_by_cpu(int cpu_id);
 328   static int get_existing_num_nodes();
 329   // Check if numa node is configured (non-zero memory node).
 330   static bool is_node_in_configured_nodes(unsigned int n) {
 331     if (_numa_bitmask_isbitset != NULL && _numa_all_nodes_ptr != NULL) {
 332       return _numa_bitmask_isbitset(_numa_all_nodes_ptr, n);
 333     } else
 334       return false;
 335   }
 336   // Check if numa node exists in the system (including zero memory nodes).
 337   static bool is_node_in_existing_nodes(unsigned int n) {
 338     if (_numa_bitmask_isbitset != NULL && _numa_nodes_ptr != NULL) {
 339       return _numa_bitmask_isbitset(_numa_nodes_ptr, n);
 340     } else if (_numa_bitmask_isbitset != NULL && _numa_all_nodes_ptr != NULL) {
 341       // Not all libnuma API v2 implement numa_nodes_ptr, so it's not possible
 342       // to trust the API version for checking its absence. On the other hand,
 343       // numa_nodes_ptr found in libnuma 2.0.9 and above is the only way to get
 344       // a complete view of all numa nodes in the system, hence numa_nodes_ptr
 345       // is used to handle CPU and nodes on architectures (like PowerPC) where


< prev index next >