< prev index next >

src/hotspot/share/runtime/os.hpp

Print this page
rev 50554 : webrev.02


 211   // information may require a lock on some platforms.
 212   static char*      local_time_string(char *buf, size_t buflen);
 213   static struct tm* localtime_pd     (const time_t* clock, struct tm*  res);
 214   static struct tm* gmtime_pd        (const time_t* clock, struct tm*  res);
 215   // Fill in buffer with current local time as an ISO-8601 string.
 216   // E.g., YYYY-MM-DDThh:mm:ss.mmm+zzzz.
 217   // Returns buffer, or NULL if it failed.
 218   static char* iso8601_time(char* buffer, size_t buffer_length, bool utc = false);
 219 
 220   // Interface for detecting multiprocessor system
 221   static inline bool is_MP() {
 222     // During bootstrap if _processor_count is not yet initialized
 223     // we claim to be MP as that is safest. If any platform has a
 224     // stub generator that might be triggered in this phase and for
 225     // which being declared MP when in fact not, is a problem - then
 226     // the bootstrap routine for the stub generator needs to check
 227     // the processor count directly and leave the bootstrap routine
 228     // in place until called after initialization has ocurred.
 229     return AssumeMP || (_processor_count != 1);
 230   }


















 231   static julong available_memory();
 232   static julong physical_memory();
 233   static bool has_allocatable_memory_limit(julong* limit);
 234   static bool is_server_class_machine();
 235 
 236   // Returns the id of the processor on which the calling thread is currently executing.
 237   // The returned value is guaranteed to be between 0 and (os::processor_count() - 1).
 238   static uint processor_id();
 239 
 240   // number of CPUs
 241   static int processor_count() {
 242     return _processor_count;
 243   }
 244   static void set_processor_count(int count) { _processor_count = count; }
 245 






 246   // Returns the number of CPUs this process is currently allowed to run on.
 247   // Note that on some OSes this can change dynamically.
 248   static int active_processor_count();
 249 
 250   // At startup the number of active CPUs this process is allowed to run on.
 251   // This value does not change dynamically. May be different from active_processor_count().
 252   static int initial_active_processor_count() {
 253     assert(_initial_active_processor_count > 0, "Initial active processor count not set yet.");
 254     return _initial_active_processor_count;
 255   }
 256 
 257   // Bind processes to processors.
 258   //     This is a two step procedure:
 259   //     first you generate a distribution of processes to processors,
 260   //     then you bind processes according to that distribution.
 261   // Compute a distribution for number of processes to processors.
 262   //    Stores the processor id's into the distribution array argument.
 263   //    Returns true if it worked, false if it didn't.
 264   static bool distribute_processes(uint length, uint* distribution);
 265   // Binds the current process to a processor.


 314                                const size_t page_size,
 315                                const char* base,
 316                                const size_t size);
 317   static void trace_page_sizes_for_requested_size(const char* str,
 318                                                   const size_t requested_size,
 319                                                   const size_t page_size,
 320                                                   const size_t alignment,
 321                                                   const char* base,
 322                                                   const size_t size);
 323 
 324   static int    vm_allocation_granularity();
 325   static char*  reserve_memory(size_t bytes, char* addr = 0,
 326                                size_t alignment_hint = 0, int file_desc = -1);
 327   static char*  reserve_memory(size_t bytes, char* addr,
 328                                size_t alignment_hint, MEMFLAGS flags);
 329   static char*  reserve_memory_aligned(size_t size, size_t alignment, int file_desc = -1);
 330   static char*  attempt_reserve_memory_at(size_t bytes, char* addr, int file_desc = -1);
 331   static void   split_reserved_memory(char *base, size_t size,
 332                                       size_t split, bool realloc);
 333   static bool   commit_memory(char* addr, size_t bytes, bool executable);

 334   static bool   commit_memory(char* addr, size_t size, size_t alignment_hint,
 335                               bool executable);


 336   // Same as commit_memory() that either succeeds or calls
 337   // vm_exit_out_of_memory() with the specified mesg.
 338   static void   commit_memory_or_exit(char* addr, size_t bytes,
 339                                       bool executable, const char* mesg);
 340   static void   commit_memory_or_exit(char* addr, size_t size,
 341                                       size_t alignment_hint,
 342                                       bool executable, const char* mesg);
 343   static bool   uncommit_memory(char* addr, size_t bytes);
 344   static bool   release_memory(char* addr, size_t bytes);
 345 
 346   // Touch memory pages that cover the memory range from start to end (exclusive)
 347   // to make the OS back the memory range with actual memory.
 348   // Current implementation may not touch the last page if unaligned addresses
 349   // are passed.
 350   static void   pretouch_memory(void* start, void* end, size_t page_size = vm_page_size());
 351 
 352   enum ProtType { MEM_PROT_NONE, MEM_PROT_READ, MEM_PROT_RW, MEM_PROT_RWX };
 353   static bool   protect_memory(char* addr, size_t bytes, ProtType prot,
 354                                bool is_committed = true);
 355 
 356   static bool   guard_memory(char* addr, size_t bytes);
 357   static bool   unguard_memory(char* addr, size_t bytes);
 358   static bool   create_stack_guard_pages(char* addr, size_t bytes);
 359   static bool   pd_create_stack_guard_pages(char* addr, size_t bytes);
 360   static bool   remove_stack_guard_pages(char* addr, size_t bytes);
 361   // Helper function to create a new file with template jvmheap.XXXXXX.
 362   // Returns a valid fd on success or else returns -1
 363   static int create_file_for_heap(const char* dir);
 364   // Map memory to the file referred by fd. This function is slightly different from map_memory()
 365   // and is added to be used for implementation of -XX:AllocateHeapAt
 366   static char* map_memory_to_file(char* base, size_t size, int fd);

 367   // Replace existing reserved memory with file mapping
 368   static char* replace_existing_mapping_with_file_mapping(char* base, size_t size, int fd);
 369 
 370   static char*  map_memory(int fd, const char* file_name, size_t file_offset,
 371                            char *addr, size_t bytes, bool read_only = false,
 372                            bool allow_exec = false);
 373   static char*  remap_memory(int fd, const char* file_name, size_t file_offset,
 374                              char *addr, size_t bytes, bool read_only,
 375                              bool allow_exec);
 376   static bool   unmap_memory(char *addr, size_t bytes);
 377   static void   free_memory(char *addr, size_t bytes, size_t alignment_hint);
 378   static void   realign_memory(char *addr, size_t bytes, size_t alignment_hint);
 379 
 380   // NUMA-specific interface
 381   static bool   numa_has_static_binding();
 382   static bool   numa_has_group_homing();
 383   static void   numa_make_local(char *addr, size_t bytes, int lgrp_hint);
 384   static void   numa_make_global(char *addr, size_t bytes);
 385   static size_t numa_get_groups_num();
 386   static size_t numa_get_leaf_groups(int *ids, size_t size);


1000     }
1001 
1002     bool is_suspended() const {
1003       return _state == SR_SUSPENDED;
1004     }
1005   };
1006 #endif // !WINDOWS
1007 
1008 
1009  protected:
1010   static volatile unsigned int _rand_seed;    // seed for random number generator
1011   static int _processor_count;                // number of processors
1012   static int _initial_active_processor_count; // number of active processors during initialization.
1013 
1014   static char* format_boot_path(const char* format_string,
1015                                 const char* home,
1016                                 int home_len,
1017                                 char fileSep,
1018                                 char pathSep);
1019   static bool set_boot_path(char fileSep, char pathSep);
1020 




1021 };
1022 
1023 #ifndef _WINDOWS
1024 template<> struct IsRegisteredEnum<os::SuspendResume::State> : public TrueType {};
1025 #endif // !_WINDOWS
1026 
1027 // Note that "PAUSE" is almost always used with synchronization
1028 // so arguably we should provide Atomic::SpinPause() instead
1029 // of the global SpinPause() with C linkage.
1030 // It'd also be eligible for inlining on many platforms.
1031 
1032 extern "C" int SpinPause();
1033 
1034 #endif // SHARE_VM_RUNTIME_OS_HPP


 211   // information may require a lock on some platforms.
 212   static char*      local_time_string(char *buf, size_t buflen);
 213   static struct tm* localtime_pd     (const time_t* clock, struct tm*  res);
 214   static struct tm* gmtime_pd        (const time_t* clock, struct tm*  res);
 215   // Fill in buffer with current local time as an ISO-8601 string.
 216   // E.g., YYYY-MM-DDThh:mm:ss.mmm+zzzz.
 217   // Returns buffer, or NULL if it failed.
 218   static char* iso8601_time(char* buffer, size_t buffer_length, bool utc = false);
 219 
 220   // Interface for detecting multiprocessor system
 221   static inline bool is_MP() {
 222     // During bootstrap if _processor_count is not yet initialized
 223     // we claim to be MP as that is safest. If any platform has a
 224     // stub generator that might be triggered in this phase and for
 225     // which being declared MP when in fact not, is a problem - then
 226     // the bootstrap routine for the stub generator needs to check
 227     // the processor count directly and leave the bootstrap routine
 228     // in place until called after initialization has ocurred.
 229     return AssumeMP || (_processor_count != 1);
 230   }
 231   static inline bool has_nvdimm() {
 232     // This is set AFTER memory is successfully mapped on NVDIMM's 
 233     // DAX filesystem
 234     return _nvdimm_present;
 235   }
 236   static inline int nvdimm_fd() {
 237     // ParallelOldGC adaptive sizing requires nvdimm fd.
 238     return _nvdimm_fd;
 239   }
 240   static inline address dram_heapbase() {
 241     return _dram_heap_base;
 242   }
 243   static inline address nvdimm_heapbase() {
 244     return _nvdimm_heap_base;
 245   }
 246   static inline uint nvdimm_regionlength() {
 247     return _nvdimm_region_length;
 248   }
 249   static julong available_memory();
 250   static julong physical_memory();
 251   static bool has_allocatable_memory_limit(julong* limit);
 252   static bool is_server_class_machine();
 253 
 254   // Returns the id of the processor on which the calling thread is currently executing.
 255   // The returned value is guaranteed to be between 0 and (os::processor_count() - 1).
 256   static uint processor_id();
 257 
 258   // number of CPUs
 259   static int processor_count() {
 260     return _processor_count;
 261   }
 262   static void set_processor_count(int count) { _processor_count = count; }
 263 
 264   static void set_nvdimm_present(bool status) { _nvdimm_present = status; }
 265   static void set_nvdimm_fd(int fd) { _nvdimm_fd = fd; }
 266   static void set_dram_heapbase(address base) {_dram_heap_base = base; }
 267   static void set_nvdimm_heapbase(address base) {_nvdimm_heap_base = base; }
 268   static void set_nvdimm_regionlength(uint length) {_nvdimm_region_length = length; }
 269 
 270   // Returns the number of CPUs this process is currently allowed to run on.
 271   // Note that on some OSes this can change dynamically.
 272   static int active_processor_count();
 273 
 274   // At startup the number of active CPUs this process is allowed to run on.
 275   // This value does not change dynamically. May be different from active_processor_count().
 276   static int initial_active_processor_count() {
 277     assert(_initial_active_processor_count > 0, "Initial active processor count not set yet.");
 278     return _initial_active_processor_count;
 279   }
 280 
 281   // Bind processes to processors.
 282   //     This is a two step procedure:
 283   //     first you generate a distribution of processes to processors,
 284   //     then you bind processes according to that distribution.
 285   // Compute a distribution for number of processes to processors.
 286   //    Stores the processor id's into the distribution array argument.
 287   //    Returns true if it worked, false if it didn't.
 288   static bool distribute_processes(uint length, uint* distribution);
 289   // Binds the current process to a processor.


 338                                const size_t page_size,
 339                                const char* base,
 340                                const size_t size);
 341   static void trace_page_sizes_for_requested_size(const char* str,
 342                                                   const size_t requested_size,
 343                                                   const size_t page_size,
 344                                                   const size_t alignment,
 345                                                   const char* base,
 346                                                   const size_t size);
 347 
 348   static int    vm_allocation_granularity();
 349   static char*  reserve_memory(size_t bytes, char* addr = 0,
 350                                size_t alignment_hint = 0, int file_desc = -1);
 351   static char*  reserve_memory(size_t bytes, char* addr,
 352                                size_t alignment_hint, MEMFLAGS flags);
 353   static char*  reserve_memory_aligned(size_t size, size_t alignment, int file_desc = -1);
 354   static char*  attempt_reserve_memory_at(size_t bytes, char* addr, int file_desc = -1);
 355   static void   split_reserved_memory(char *base, size_t size,
 356                                       size_t split, bool realloc);
 357   static bool   commit_memory(char* addr, size_t bytes, bool executable);
 358   static bool   commit_memory(char* addr, size_t bytes, bool executable, int file_desc, size_t offset = 0);
 359   static bool   commit_memory(char* addr, size_t size, size_t alignment_hint,
 360                               bool executable);
 361   static bool   commit_memory(char* addr, size_t size, size_t alignment_hint,
 362                               bool executable, int file_desc, size_t offset = 0);
 363   // Same as commit_memory() that either succeeds or calls
 364   // vm_exit_out_of_memory() with the specified mesg.
 365   static void   commit_memory_or_exit(char* addr, size_t bytes,
 366                                       bool executable, const char* mesg);
 367   static void   commit_memory_or_exit(char* addr, size_t size,
 368                                       size_t alignment_hint,
 369                                       bool executable, const char* mesg);
 370   static bool   uncommit_memory(char* addr, size_t bytes);
 371   static bool   release_memory(char* addr, size_t bytes);
 372 
 373   // Touch memory pages that cover the memory range from start to end (exclusive)
 374   // to make the OS back the memory range with actual memory.
 375   // Current implementation may not touch the last page if unaligned addresses
 376   // are passed.
 377   static void   pretouch_memory(void* start, void* end, size_t page_size = vm_page_size());
 378 
 379   enum ProtType { MEM_PROT_NONE, MEM_PROT_READ, MEM_PROT_RW, MEM_PROT_RWX };
 380   static bool   protect_memory(char* addr, size_t bytes, ProtType prot,
 381                                bool is_committed = true);
 382 
 383   static bool   guard_memory(char* addr, size_t bytes);
 384   static bool   unguard_memory(char* addr, size_t bytes);
 385   static bool   create_stack_guard_pages(char* addr, size_t bytes);
 386   static bool   pd_create_stack_guard_pages(char* addr, size_t bytes);
 387   static bool   remove_stack_guard_pages(char* addr, size_t bytes);
 388   // Helper function to create a new file with template jvmheap.XXXXXX.
 389   // Returns a valid fd on success or else returns -1
 390   static int create_file_for_heap(const char* dir);
 391   // Map memory to the file referred by fd. This function is slightly different from map_memory()
 392   // and is added to be used for implementation of -XX:AllocateHeapAt
 393   static char* map_memory_to_file(char* base, size_t size, int fd, int offset = 0, bool exec = false, bool allocate = true);
 394   static int   allocate_file(int file_desc, size_t size);
 395   // Replace existing reserved memory with file mapping
 396   static char* replace_existing_mapping_with_file_mapping(char* base, size_t size, int fd);
 397 
 398   static char*  map_memory(int fd, const char* file_name, size_t file_offset,
 399                            char *addr, size_t bytes, bool read_only = false,
 400                            bool allow_exec = false);
 401   static char*  remap_memory(int fd, const char* file_name, size_t file_offset,
 402                              char *addr, size_t bytes, bool read_only,
 403                              bool allow_exec);
 404   static bool   unmap_memory(char *addr, size_t bytes);
 405   static void   free_memory(char *addr, size_t bytes, size_t alignment_hint);
 406   static void   realign_memory(char *addr, size_t bytes, size_t alignment_hint);
 407 
 408   // NUMA-specific interface
 409   static bool   numa_has_static_binding();
 410   static bool   numa_has_group_homing();
 411   static void   numa_make_local(char *addr, size_t bytes, int lgrp_hint);
 412   static void   numa_make_global(char *addr, size_t bytes);
 413   static size_t numa_get_groups_num();
 414   static size_t numa_get_leaf_groups(int *ids, size_t size);


1028     }
1029 
1030     bool is_suspended() const {
1031       return _state == SR_SUSPENDED;
1032     }
1033   };
1034 #endif // !WINDOWS
1035 
1036 
1037  protected:
1038   static volatile unsigned int _rand_seed;    // seed for random number generator
1039   static int _processor_count;                // number of processors
1040   static int _initial_active_processor_count; // number of active processors during initialization.
1041 
1042   static char* format_boot_path(const char* format_string,
1043                                 const char* home,
1044                                 int home_len,
1045                                 char fileSep,
1046                                 char pathSep);
1047   static bool set_boot_path(char fileSep, char pathSep);
1048   static bool _nvdimm_present;
1049   static int _nvdimm_fd;
1050   static address _dram_heap_base;
1051   static address _nvdimm_heap_base;
1052   static uint _nvdimm_region_length;
1053 };
1054 
1055 #ifndef _WINDOWS
1056 template<> struct IsRegisteredEnum<os::SuspendResume::State> : public TrueType {};
1057 #endif // !_WINDOWS
1058 
1059 // Note that "PAUSE" is almost always used with synchronization
1060 // so arguably we should provide Atomic::SpinPause() instead
1061 // of the global SpinPause() with C linkage.
1062 // It'd also be eligible for inlining on many platforms.
1063 
1064 extern "C" int SpinPause();
1065 
1066 #endif // SHARE_VM_RUNTIME_OS_HPP
< prev index next >