93 public:
94 enum { page_sizes_max = 9 }; // Size of _page_sizes array (8 plus a sentinel)
95
96 private:
97 static OSThread* _starting_thread;
98 static address _polling_page;
99 static volatile int32_t * _mem_serialize_page;
100 static uintptr_t _serialize_page_mask;
101 public:
102 static size_t _page_sizes[page_sizes_max];
103
104 private:
105 static void init_page_sizes(size_t default_page_size) {
106 _page_sizes[0] = default_page_size;
107 _page_sizes[1] = 0; // sentinel
108 }
109
110 static char* pd_reserve_memory(size_t bytes, char* addr = 0,
111 size_t alignment_hint = 0);
112 static char* pd_attempt_reserve_memory_at(size_t bytes, char* addr);
113 static void pd_split_reserved_memory(char *base, size_t size,
114 size_t split, bool realloc);
115 static bool pd_commit_memory(char* addr, size_t bytes, bool executable);
116 static bool pd_commit_memory(char* addr, size_t size, size_t alignment_hint,
117 bool executable);
118 // Same as pd_commit_memory() that either succeeds or calls
119 // vm_exit_out_of_memory() with the specified mesg.
120 static void pd_commit_memory_or_exit(char* addr, size_t bytes,
121 bool executable, const char* mesg);
122 static void pd_commit_memory_or_exit(char* addr, size_t size,
123 size_t alignment_hint,
124 bool executable, const char* mesg);
125 static bool pd_uncommit_memory(char* addr, size_t bytes);
126 static bool pd_release_memory(char* addr, size_t bytes);
127
128 static char* pd_map_memory(int fd, const char* file_name, size_t file_offset,
129 char *addr, size_t bytes, bool read_only = false,
130 bool allow_exec = false);
131 static char* pd_remap_memory(int fd, const char* file_name, size_t file_offset,
132 char *addr, size_t bytes, bool read_only,
285 // The region_{min,max}_size parameters should be the values
286 // passed to page_size_for_region() and page_size should be the result of that
287 // call. The (optional) base and size parameters should come from the
288 // ReservedSpace base() and size() methods.
289 static void trace_page_sizes(const char* str, const size_t* page_sizes, int count);
290 static void trace_page_sizes(const char* str,
291 const size_t region_min_size,
292 const size_t region_max_size,
293 const size_t page_size,
294 const char* base,
295 const size_t size);
296 static void trace_page_sizes_for_requested_size(const char* str,
297 const size_t requested_size,
298 const size_t page_size,
299 const size_t alignment,
300 const char* base,
301 const size_t size);
302
303 static int vm_allocation_granularity();
304 static char* reserve_memory(size_t bytes, char* addr = 0,
305 size_t alignment_hint = 0);
306 static char* reserve_memory(size_t bytes, char* addr,
307 size_t alignment_hint, MEMFLAGS flags);
308 static char* reserve_memory_aligned(size_t size, size_t alignment);
309 static char* attempt_reserve_memory_at(size_t bytes, char* addr);
310 static void split_reserved_memory(char *base, size_t size,
311 size_t split, bool realloc);
312 static bool commit_memory(char* addr, size_t bytes, bool executable);
313 static bool commit_memory(char* addr, size_t size, size_t alignment_hint,
314 bool executable);
315 // Same as commit_memory() that either succeeds or calls
316 // vm_exit_out_of_memory() with the specified mesg.
317 static void commit_memory_or_exit(char* addr, size_t bytes,
318 bool executable, const char* mesg);
319 static void commit_memory_or_exit(char* addr, size_t size,
320 size_t alignment_hint,
321 bool executable, const char* mesg);
322 static bool uncommit_memory(char* addr, size_t bytes);
323 static bool release_memory(char* addr, size_t bytes);
324
325 // Touch memory pages that cover the memory range from start to end (exclusive)
326 // to make the OS back the memory range with actual memory.
327 // Current implementation may not touch the last page if unaligned addresses
328 // are passed.
329 static void pretouch_memory(void* start, void* end, size_t page_size = vm_page_size());
330
331 enum ProtType { MEM_PROT_NONE, MEM_PROT_READ, MEM_PROT_RW, MEM_PROT_RWX };
332 static bool protect_memory(char* addr, size_t bytes, ProtType prot,
333 bool is_committed = true);
334
335 static bool guard_memory(char* addr, size_t bytes);
336 static bool unguard_memory(char* addr, size_t bytes);
337 static bool create_stack_guard_pages(char* addr, size_t bytes);
338 static bool pd_create_stack_guard_pages(char* addr, size_t bytes);
339 static bool remove_stack_guard_pages(char* addr, size_t bytes);
340
341 static char* map_memory(int fd, const char* file_name, size_t file_offset,
342 char *addr, size_t bytes, bool read_only = false,
343 bool allow_exec = false);
344 static char* remap_memory(int fd, const char* file_name, size_t file_offset,
345 char *addr, size_t bytes, bool read_only,
346 bool allow_exec);
347 static bool unmap_memory(char *addr, size_t bytes);
348 static void free_memory(char *addr, size_t bytes, size_t alignment_hint);
349 static void realign_memory(char *addr, size_t bytes, size_t alignment_hint);
350
351 // NUMA-specific interface
352 static bool numa_has_static_binding();
353 static bool numa_has_group_homing();
354 static void numa_make_local(char *addr, size_t bytes, int lgrp_hint);
355 static void numa_make_global(char *addr, size_t bytes);
356 static size_t numa_get_groups_num();
357 static size_t numa_get_leaf_groups(int *ids, size_t size);
358 static bool numa_topology_changed();
359 static int numa_get_group_id();
|
93 public:
94 enum { page_sizes_max = 9 }; // Size of _page_sizes array (8 plus a sentinel)
95
96 private:
97 static OSThread* _starting_thread;
98 static address _polling_page;
99 static volatile int32_t * _mem_serialize_page;
100 static uintptr_t _serialize_page_mask;
101 public:
102 static size_t _page_sizes[page_sizes_max];
103
104 private:
105 static void init_page_sizes(size_t default_page_size) {
106 _page_sizes[0] = default_page_size;
107 _page_sizes[1] = 0; // sentinel
108 }
109
110 static char* pd_reserve_memory(size_t bytes, char* addr = 0,
111 size_t alignment_hint = 0);
112 static char* pd_attempt_reserve_memory_at(size_t bytes, char* addr);
113 static char* pd_attempt_reserve_memory_at(size_t bytes, char* addr, int file_desc);
114 static void pd_split_reserved_memory(char *base, size_t size,
115 size_t split, bool realloc);
116 static bool pd_commit_memory(char* addr, size_t bytes, bool executable);
117 static bool pd_commit_memory(char* addr, size_t size, size_t alignment_hint,
118 bool executable);
119 // Same as pd_commit_memory() that either succeeds or calls
120 // vm_exit_out_of_memory() with the specified mesg.
121 static void pd_commit_memory_or_exit(char* addr, size_t bytes,
122 bool executable, const char* mesg);
123 static void pd_commit_memory_or_exit(char* addr, size_t size,
124 size_t alignment_hint,
125 bool executable, const char* mesg);
126 static bool pd_uncommit_memory(char* addr, size_t bytes);
127 static bool pd_release_memory(char* addr, size_t bytes);
128
129 static char* pd_map_memory(int fd, const char* file_name, size_t file_offset,
130 char *addr, size_t bytes, bool read_only = false,
131 bool allow_exec = false);
132 static char* pd_remap_memory(int fd, const char* file_name, size_t file_offset,
133 char *addr, size_t bytes, bool read_only,
286 // The region_{min,max}_size parameters should be the values
287 // passed to page_size_for_region() and page_size should be the result of that
288 // call. The (optional) base and size parameters should come from the
289 // ReservedSpace base() and size() methods.
290 static void trace_page_sizes(const char* str, const size_t* page_sizes, int count);
291 static void trace_page_sizes(const char* str,
292 const size_t region_min_size,
293 const size_t region_max_size,
294 const size_t page_size,
295 const char* base,
296 const size_t size);
297 static void trace_page_sizes_for_requested_size(const char* str,
298 const size_t requested_size,
299 const size_t page_size,
300 const size_t alignment,
301 const char* base,
302 const size_t size);
303
304 static int vm_allocation_granularity();
305 static char* reserve_memory(size_t bytes, char* addr = 0,
306 size_t alignment_hint = 0, int file_desc = -1);
307 static char* reserve_memory(size_t bytes, char* addr,
308 size_t alignment_hint, MEMFLAGS flags);
309 static char* reserve_memory_aligned(size_t size, size_t alignment, int file_desc = -1);
310 static char* attempt_reserve_memory_at(size_t bytes, char* addr, int file_desc = -1);
311 static void split_reserved_memory(char *base, size_t size,
312 size_t split, bool realloc);
313 static bool commit_memory(char* addr, size_t bytes, bool executable);
314 static bool commit_memory(char* addr, size_t size, size_t alignment_hint,
315 bool executable);
316 // Same as commit_memory() that either succeeds or calls
317 // vm_exit_out_of_memory() with the specified mesg.
318 static void commit_memory_or_exit(char* addr, size_t bytes,
319 bool executable, const char* mesg);
320 static void commit_memory_or_exit(char* addr, size_t size,
321 size_t alignment_hint,
322 bool executable, const char* mesg);
323 static bool uncommit_memory(char* addr, size_t bytes);
324 static bool release_memory(char* addr, size_t bytes);
325
326 // Touch memory pages that cover the memory range from start to end (exclusive)
327 // to make the OS back the memory range with actual memory.
328 // Current implementation may not touch the last page if unaligned addresses
329 // are passed.
330 static void pretouch_memory(void* start, void* end, size_t page_size = vm_page_size());
331
332 enum ProtType { MEM_PROT_NONE, MEM_PROT_READ, MEM_PROT_RW, MEM_PROT_RWX };
333 static bool protect_memory(char* addr, size_t bytes, ProtType prot,
334 bool is_committed = true);
335
336 static bool guard_memory(char* addr, size_t bytes);
337 static bool unguard_memory(char* addr, size_t bytes);
338 static bool create_stack_guard_pages(char* addr, size_t bytes);
339 static bool pd_create_stack_guard_pages(char* addr, size_t bytes);
340 static bool remove_stack_guard_pages(char* addr, size_t bytes);
341 // Helper function to create a new file with template jvmheap.XXXXXX.
342 // Returns a valid fd on success or else returns -1
343 static int create_file_for_heap(const char* dir);
344 // Map memory to the file referred by fd. This function is slightly different from map_memory()
345 // and is added to be used for implementation of -XX:AllocateHeapAt
346 static char* map_memory_to_file(char* base, size_t size, int fd);
347 // Replace existing reserved memory with file mapping
348 static char* replace_existing_mapping_with_file_mapping(char* base, size_t size, int fd);
349
350 static char* map_memory(int fd, const char* file_name, size_t file_offset,
351 char *addr, size_t bytes, bool read_only = false,
352 bool allow_exec = false);
353 static char* remap_memory(int fd, const char* file_name, size_t file_offset,
354 char *addr, size_t bytes, bool read_only,
355 bool allow_exec);
356 static bool unmap_memory(char *addr, size_t bytes);
357 static void free_memory(char *addr, size_t bytes, size_t alignment_hint);
358 static void realign_memory(char *addr, size_t bytes, size_t alignment_hint);
359
360 // NUMA-specific interface
361 static bool numa_has_static_binding();
362 static bool numa_has_group_homing();
363 static void numa_make_local(char *addr, size_t bytes, int lgrp_hint);
364 static void numa_make_global(char *addr, size_t bytes);
365 static size_t numa_get_groups_num();
366 static size_t numa_get_leaf_groups(int *ids, size_t size);
367 static bool numa_topology_changed();
368 static int numa_get_group_id();
|