311 #else
312 size_t s = (thr_type == os::compiler_thread ? 2 * M : 512 * K);
313 #endif // _LP64
314 return s;
315 }
316
317 size_t os::Linux::default_guard_size(os::ThreadType thr_type) {
318 // Only enable glibc guard pages for non-Java threads
319 // (Java threads have HotSpot guard pages)
320 return (thr_type == java_thread ? 0 : page_size());
321 }
322
323 static void current_stack_region(address *bottom, size_t *size) {
324 pthread_attr_t attr;
325 int res = pthread_getattr_np(pthread_self(), &attr);
326 if (res != 0) {
327 if (res == ENOMEM) {
328 vm_exit_out_of_memory(0, OOM_MMAP_ERROR, "pthread_getattr_np");
329 }
330 else {
331 fatal(err_msg("pthread_getattr_np failed with errno = %d", res));
332 }
333 }
334
335 address stack_bottom;
336 size_t stack_bytes;
337 res = pthread_attr_getstack(&attr, (void **) &stack_bottom, &stack_bytes);
338 if (res != 0) {
339 fatal(err_msg("pthread_attr_getstack failed with errno = %d", res));
340 }
341 address stack_top = stack_bottom + stack_bytes;
342
343 // The block of memory returned by pthread_attr_getstack() includes
344 // guard pages where present. We need to trim these off.
345 size_t page_bytes = os::Linux::page_size();
346 assert(((intptr_t) stack_bottom & (page_bytes - 1)) == 0, "unaligned stack");
347
348 size_t guard_bytes;
349 res = pthread_attr_getguardsize(&attr, &guard_bytes);
350 if (res != 0) {
351 fatal(err_msg("pthread_attr_getguardsize failed with errno = %d", res));
352 }
353 int guard_pages = align_size_up(guard_bytes, page_bytes) / page_bytes;
354 assert(guard_bytes == guard_pages * page_bytes, "unaligned guard");
355
356 #ifdef IA64
357 // IA64 has two stacks sharing the same area of memory, a normal
358 // stack growing downwards and a register stack growing upwards.
359 // Guard pages, if present, are in the centre. This code splits
360 // the stack in two even without guard pages, though in theory
361 // there's nothing to stop us allocating more to the normal stack
362 // or more to the register stack if one or the other were found
363 // to grow faster.
364 int total_pages = align_size_down(stack_bytes, page_bytes) / page_bytes;
365 stack_bottom += (total_pages - guard_pages) / 2 * page_bytes;
366 #endif // IA64
367
368 stack_bottom += guard_bytes;
369
370 pthread_attr_destroy(&attr);
371
|
311 #else
312 size_t s = (thr_type == os::compiler_thread ? 2 * M : 512 * K);
313 #endif // _LP64
314 return s;
315 }
316
317 size_t os::Linux::default_guard_size(os::ThreadType thr_type) {
318 // Only enable glibc guard pages for non-Java threads
319 // (Java threads have HotSpot guard pages)
320 return (thr_type == java_thread ? 0 : page_size());
321 }
322
323 static void current_stack_region(address *bottom, size_t *size) {
324 pthread_attr_t attr;
325 int res = pthread_getattr_np(pthread_self(), &attr);
326 if (res != 0) {
327 if (res == ENOMEM) {
328 vm_exit_out_of_memory(0, OOM_MMAP_ERROR, "pthread_getattr_np");
329 }
330 else {
331 fatal("pthread_getattr_np failed with errno = %d", res);
332 }
333 }
334
335 address stack_bottom;
336 size_t stack_bytes;
337 res = pthread_attr_getstack(&attr, (void **) &stack_bottom, &stack_bytes);
338 if (res != 0) {
339 fatal("pthread_attr_getstack failed with errno = %d", res);
340 }
341 address stack_top = stack_bottom + stack_bytes;
342
343 // The block of memory returned by pthread_attr_getstack() includes
344 // guard pages where present. We need to trim these off.
345 size_t page_bytes = os::Linux::page_size();
346 assert(((intptr_t) stack_bottom & (page_bytes - 1)) == 0, "unaligned stack");
347
348 size_t guard_bytes;
349 res = pthread_attr_getguardsize(&attr, &guard_bytes);
350 if (res != 0) {
351 fatal("pthread_attr_getguardsize failed with errno = %d", res);
352 }
353 int guard_pages = align_size_up(guard_bytes, page_bytes) / page_bytes;
354 assert(guard_bytes == guard_pages * page_bytes, "unaligned guard");
355
356 #ifdef IA64
357 // IA64 has two stacks sharing the same area of memory, a normal
358 // stack growing downwards and a register stack growing upwards.
359 // Guard pages, if present, are in the centre. This code splits
360 // the stack in two even without guard pages, though in theory
361 // there's nothing to stop us allocating more to the normal stack
362 // or more to the register stack if one or the other were found
363 // to grow faster.
364 int total_pages = align_size_down(stack_bytes, page_bytes) / page_bytes;
365 stack_bottom += (total_pages - guard_pages) / 2 * page_bytes;
366 #endif // IA64
367
368 stack_bottom += guard_bytes;
369
370 pthread_attr_destroy(&attr);
371
|