245 // Bind processes to processors.
246 // This is a two step procedure:
247 // first you generate a distribution of processes to processors,
248 // then you bind processes according to that distribution.
249 // Compute a distribution for number of processes to processors.
250 // Stores the processor id's into the distribution array argument.
251 // Returns true if it worked, false if it didn't.
252 static bool distribute_processes(uint length, uint* distribution);
253 // Binds the current process to a processor.
254 // Returns true if it worked, false if it didn't.
255 static bool bind_to_processor(uint processor_id);
256
257 // Give a name to the current thread.
258 static void set_native_thread_name(const char *name);
259
260 // Interface for stack banging (predetect possible stack overflow for
261 // exception processing) There are guard pages, and above that shadow
262 // pages for stack overflow checking.
263 static bool uses_stack_guard_pages();
264 static bool allocate_stack_guard_pages();
265 static void bang_stack_shadow_pages();
266 static bool stack_shadow_pages_available(Thread *thread, const methodHandle& method);
267
268 // OS interface to Virtual Memory
269
270 // Return the default page size.
271 static int vm_page_size();
272
273 // Returns the page size to use for a region of memory.
274 // region_size / min_pages will always be greater than or equal to the
275 // returned value. The returned value will divide region_size.
276 static size_t page_size_for_region_aligned(size_t region_size, size_t min_pages);
277
278 // Returns the page size to use for a region of memory.
279 // region_size / min_pages will always be greater than or equal to the
280 // returned value. The returned value might not divide region_size.
281 static size_t page_size_for_region_unaligned(size_t region_size, size_t min_pages);
282
283 // Return the largest page size that can be used
284 static size_t max_page_size() {
285 // The _page_sizes array is sorted in descending order.
|
245 // Bind processes to processors.
246 // This is a two step procedure:
247 // first you generate a distribution of processes to processors,
248 // then you bind processes according to that distribution.
249 // Compute a distribution for number of processes to processors.
250 // Stores the processor id's into the distribution array argument.
251 // Returns true if it worked, false if it didn't.
252 static bool distribute_processes(uint length, uint* distribution);
253 // Binds the current process to a processor.
254 // Returns true if it worked, false if it didn't.
255 static bool bind_to_processor(uint processor_id);
256
257 // Give a name to the current thread.
258 static void set_native_thread_name(const char *name);
259
260 // Interface for stack banging (predetect possible stack overflow for
261 // exception processing) There are guard pages, and above that shadow
262 // pages for stack overflow checking.
263 static bool uses_stack_guard_pages();
264 static bool allocate_stack_guard_pages();
265 static void map_stack_shadow_pages();
266 static bool stack_shadow_pages_available(Thread *thread, const methodHandle& method);
267
268 // OS interface to Virtual Memory
269
270 // Return the default page size.
271 static int vm_page_size();
272
273 // Returns the page size to use for a region of memory.
274 // region_size / min_pages will always be greater than or equal to the
275 // returned value. The returned value will divide region_size.
276 static size_t page_size_for_region_aligned(size_t region_size, size_t min_pages);
277
278 // Returns the page size to use for a region of memory.
279 // region_size / min_pages will always be greater than or equal to the
280 // returned value. The returned value might not divide region_size.
281 static size_t page_size_for_region_unaligned(size_t region_size, size_t min_pages);
282
283 // Return the largest page size that can be used
284 static size_t max_page_size() {
285 // The _page_sizes array is sorted in descending order.
|