src/os/bsd/vm/os_bsd.cpp
Index
Unified diffs
Context diffs
Sdiffs
Patch
New
Old
Previous File
Next File
*** old/src/os/linux/vm/os_linux.cpp Tue Sep 13 12:29:50 2011
--- new/src/os/bsd/vm/os_bsd.cpp Tue Sep 13 12:29:50 2011
*** 20,45 ****
--- 20,43 ----
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
# define __STDC_FORMAT_MACROS
// no precompiled headers
#include "classfile/classLoader.hpp"
#include "classfile/systemDictionary.hpp"
#include "classfile/vmSymbols.hpp"
#include "code/icBuffer.hpp"
#include "code/vtableStubs.hpp"
#include "compiler/compileBroker.hpp"
#include "interpreter/interpreter.hpp"
! #include "jvm_linux.h"
! #include "jvm_bsd.h"
#include "memory/allocation.inline.hpp"
#include "memory/filemap.hpp"
! #include "mutex_linux.inline.hpp"
! #include "mutex_bsd.inline.hpp"
#include "oops/oop.inline.hpp"
! #include "os_share_linux.hpp"
! #include "os_share_bsd.hpp"
#include "prims/jniFastGetField.hpp"
#include "prims/jvm.h"
#include "prims/jvm_misc.hpp"
#include "runtime/arguments.hpp"
#include "runtime/extendedPC.hpp"
*** 56,66 ****
--- 54,64 ----
#include "runtime/stubRoutines.hpp"
#include "runtime/threadCritical.hpp"
#include "runtime/timer.hpp"
#include "services/attachListener.hpp"
#include "services/runtimeService.hpp"
! #include "thread_linux.inline.hpp"
! #include "thread_bsd.inline.hpp"
#include "utilities/decoder.hpp"
#include "utilities/defaultStream.hpp"
#include "utilities/events.hpp"
#include "utilities/growableArray.hpp"
#include "utilities/vmError.hpp"
*** 108,156 ****
--- 106,181 ----
# include <sys/time.h>
# include <sys/times.h>
# include <sys/utsname.h>
# include <sys/socket.h>
# include <sys/wait.h>
+ # include <time.h>
# include <pwd.h>
# include <poll.h>
# include <semaphore.h>
# include <fcntl.h>
# include <string.h>
+ #ifdef _ALLBSD_SOURCE
+ # include <sys/param.h>
+ # include <sys/sysctl.h>
+ #else
# include <syscall.h>
# include <sys/sysinfo.h>
# include <gnu/libc-version.h>
+ #endif
# include <sys/ipc.h>
# include <sys/shm.h>
+ #ifndef __APPLE__
# include <link.h>
+ #endif
# include <stdint.h>
# include <inttypes.h>
# include <sys/ioctl.h>
+ #if defined(__FreeBSD__) || defined(__NetBSD__)
+ # include <elf.h>
+ #endif
+
+ #ifdef __APPLE__
+ #include <mach/mach.h> // semaphore_* API
+ #include <mach-o/dyld.h>
+ #endif
+
+ #ifndef MAP_ANONYMOUS
+ #define MAP_ANONYMOUS MAP_ANON
+ #endif
+
#define MAX_PATH (2 * K)
// for timer info max values which include all bits
#define ALL_64_BITS CONST64(0xFFFFFFFFFFFFFFFF)
#define SEC_IN_NANOSECS 1000000000LL
#define LARGEPAGES_BIT (1 << 6)
////////////////////////////////////////////////////////////////////////////////
// global variables
! julong os::Linux::_physical_memory = 0;
! julong os::Bsd::_physical_memory = 0;
address os::Linux::_initial_thread_stack_bottom = NULL;
! uintptr_t os::Linux::_initial_thread_stack_size = 0;
+ #ifndef _ALLBSD_SOURCE
! address os::Bsd::_initial_thread_stack_bottom = NULL;
+ uintptr_t os::Bsd::_initial_thread_stack_size = 0;
+ #endif
! int (*os::Linux::_clock_gettime)(clockid_t, struct timespec *) = NULL;
int (*os::Linux::_pthread_getcpuclockid)(pthread_t, clockid_t *) = NULL;
! Mutex* os::Linux::_createThread_lock = NULL;
! pthread_t os::Linux::_main_thread;
int os::Linux::_page_size = -1;
! bool os::Linux::_is_floating_stack = false;
! bool os::Linux::_is_NPTL = false;
bool os::Linux::_supports_fast_thread_cpu_time = false;
! const char * os::Linux::_glibc_version = NULL;
! const char * os::Linux::_libpthread_version = NULL;
! int (*os::Bsd::_clock_gettime)(clockid_t, struct timespec *) = NULL;
+ #ifndef _ALLBSD_SOURCE
! int (*os::Bsd::_pthread_getcpuclockid)(pthread_t, clockid_t *) = NULL;
! Mutex* os::Bsd::_createThread_lock = NULL;
+ #endif
! pthread_t os::Bsd::_main_thread;
! int os::Bsd::_page_size = -1;
+ #ifndef _ALLBSD_SOURCE
! bool os::Bsd::_is_floating_stack = false;
! bool os::Bsd::_is_NPTL = false;
+ bool os::Bsd::_supports_fast_thread_cpu_time = false;
+ const char * os::Bsd::_glibc_version = NULL;
+ const char * os::Bsd::_libpthread_version = NULL;
+ #endif
static jlong initial_time_count=0;
static int clock_tics_per_sec = 100;
*** 164,224 ****
--- 189,224 ----
/* do not use any signal number less than SIGSEGV, see 4355769 */
static int SR_signum = SIGUSR2;
sigset_t SR_sigset;
/* Used to protect dlsym() calls */
static pthread_mutex_t dl_mutex;
#ifdef JAVASE_EMBEDDED
class MemNotifyThread: public Thread {
friend class VMStructs;
public:
virtual void run();
private:
static MemNotifyThread* _memnotify_thread;
int _fd;
public:
// Constructor
MemNotifyThread(int fd);
// Tester
bool is_memnotify_thread() const { return true; }
// Printing
char* name() const { return (char*)"Linux MemNotify Thread"; }
// Returns the single instance of the MemNotifyThread
static MemNotifyThread* memnotify_thread() { return _memnotify_thread; }
// Create and start the single instance of MemNotifyThread
static void start();
};
#endif // JAVASE_EMBEDDED
+ ////////////////////////////////////////////////////////////////////////////////
// utility functions
static int SR_initialize();
static int SR_finalize();
julong os::available_memory() {
! return Linux::available_memory();
! return Bsd::available_memory();
}
! julong os::Linux::available_memory() {
! julong os::Bsd::available_memory() {
+ #ifdef _ALLBSD_SOURCE
+ // XXXBSD: this is just a stopgap implementation
+ return physical_memory() >> 2;
+ #else
// values in struct sysinfo are "unsigned long"
struct sysinfo si;
sysinfo(&si);
return (julong)si.freeram * si.mem_unit;
+ #endif
}
julong os::physical_memory() {
! return Linux::physical_memory();
! return Bsd::physical_memory();
}
julong os::allocatable_physical_memory(julong size) {
#ifdef _LP64
return size;
*** 258,267 ****
--- 258,268 ----
}
return privileges;
}
+ #ifndef _ALLBSD_SOURCE
#ifndef SYS_gettid
// i386: 224, ia64: 1105, amd64: 186, sparc 143
#ifdef __ia64__
#define SYS_gettid 1105
#elif __i386__
*** 272,281 ****
--- 273,283 ----
#define SYS_gettid 143
#else
#error define gettid for the arch
#endif
#endif
+ #endif
// Cpu architecture string
#if defined(ZERO)
static char cpu_arch[] = ZERO_LIBARCH;
#elif defined(IA64)
*** 297,337 ****
--- 299,381 ----
#else
#error Add appropriate cpu_arch setting
#endif
+ #ifndef _ALLBSD_SOURCE
// pid_t gettid()
//
// Returns the kernel thread id of the currently running thread. Kernel
// thread id is used to access /proc.
//
! // (Note that getpid() on LinuxThreads returns kernel thread id too; but
! // (Note that getpid() on BsdThreads returns kernel thread id too; but
// on NPTL, it returns the same pid for all threads, as required by POSIX.)
//
! pid_t os::Linux::gettid() {
! pid_t os::Bsd::gettid() {
int rslt = syscall(SYS_gettid);
if (rslt == -1) {
// old kernel, no NPTL support
return getpid();
} else {
return (pid_t)rslt;
}
}
! // Most versions of linux have a bug where the number of processors are
! // Most versions of bsd have a bug where the number of processors are
// determined by looking at the /proc file system. In a chroot environment,
// the system call returns 1. This causes the VM to act as if it is
// a single processor and elide locking (see is_MP() call).
static bool unsafe_chroot_detected = false;
static const char *unstable_chroot_error = "/proc file system not found.\n"
"Java may be unstable running multithreaded in a chroot "
! "environment on Linux when /proc filesystem is not mounted.";
! "environment on Bsd when /proc filesystem is not mounted.";
+ #endif
void os::Linux::initialize_system_info() {
+ #ifdef _ALLBSD_SOURCE
+ void os::Bsd::initialize_system_info() {
+ int mib[2];
+ size_t len;
+ int cpu_val;
+ u_long mem_val;
+
+ /* get processors count via hw.ncpus sysctl */
+ mib[0] = CTL_HW;
+ mib[1] = HW_NCPU;
+ len = sizeof(cpu_val);
+ if (sysctl(mib, 2, &cpu_val, &len, NULL, 0) != -1 && cpu_val >= 1) {
+ set_processor_count(cpu_val);
+ }
+ else {
+ set_processor_count(1); // fallback
+ }
+
+ /* get physical memory via hw.usermem sysctl (hw.usermem is used
+ * instead of hw.physmem because we need size of allocatable memory
+ */
+ mib[0] = CTL_HW;
+ mib[1] = HW_USERMEM;
+ len = sizeof(mem_val);
+ if (sysctl(mib, 2, &mem_val, &len, NULL, 0) != -1)
+ _physical_memory = mem_val;
+ else
+ _physical_memory = 256*1024*1024; // fallback (XXXBSD?)
+
+ #ifdef __OpenBSD__
+ {
+ // limit _physical_memory memory view on OpenBSD since
+ // datasize rlimit restricts us anyway.
+ struct rlimit limits;
+ getrlimit(RLIMIT_DATA, &limits);
+ _physical_memory = MIN2(_physical_memory, (julong)limits.rlim_cur);
+ }
+ #endif
+ }
+ #else
+ void os::Bsd::initialize_system_info() {
set_processor_count(sysconf(_SC_NPROCESSORS_CONF));
if (processor_count() == 1) {
! pid_t pid = os::Linux::gettid();
! pid_t pid = os::Bsd::gettid();
char fname[32];
jio_snprintf(fname, sizeof(fname), "/proc/%d", pid);
FILE *fp = fopen(fname, "r");
if (fp == NULL) {
unsafe_chroot_detected = true;
*** 338,349 ****
--- 382,394 ----
} else {
fclose(fp);
}
}
_physical_memory = (julong)sysconf(_SC_PHYS_PAGES) * (julong)sysconf(_SC_PAGESIZE);
! assert(processor_count() > 0, "linux error");
! assert(processor_count() > 0, "bsd error");
}
+ #endif
void os::init_system_properties_values() {
// char arch[12];
// sysinfo(SI_ARCHITECTURE, arch, sizeof(arch));
*** 383,395 ****
--- 428,438 ----
* shared libraries:
* 1: ...
* ...
* 7: The default directories, normally /lib and /usr/lib.
*/
! #if defined(AMD64) || defined(_LP64) && (defined(SPARC) || defined(PPC) || defined(S390))
#define DEFAULT_LIBPATH "/usr/lib64:/lib64:/lib:/usr/lib"
#else
! #ifndef DEFAULT_LIBPATH
#define DEFAULT_LIBPATH "/lib:/usr/lib"
#endif
#define EXTENSIONS_DIR "/lib/ext"
#define ENDORSED_DIR "/lib/endorsed"
*** 439,449 ****
--- 482,492 ----
/*
* Where to look for native libraries
*
* Note: Due to a legacy implementation, most of the library path
* is set in the launcher. This was to accomodate linking restrictions
! * on legacy Linux implementations (which are no longer supported).
! * on legacy Bsd implementations (which are no longer supported).
* Eventually, all the library path setting will be done here.
*
* However, to prevent the proliferation of improperly built native
* libraries, the new path component /usr/java/packages is added here.
* Eventually, all the library path setting will be done here.
*** 464,474 ****
--- 507,521 ----
/*
* Get the user setting of LD_LIBRARY_PATH, and prepended it. It
* should always exist (until the legacy problem cited above is
* addressed).
*/
+ #ifdef __APPLE__
+ char *v = getenv("DYLD_LIBRARY_PATH");
+ #else
char *v = getenv("LD_LIBRARY_PATH");
+ #endif
if (v != NULL) {
char *t = ld_library_path;
/* That's +1 for the colon and +1 for the trailing '\0' */
ld_library_path = (char *) malloc(strlen(v) + 1 + strlen(t) + 1);
sprintf(ld_library_path, "%s:%s", v, t);
*** 524,534 ****
--- 571,581 ----
// signal support
debug_only(static bool signal_sets_initialized = false);
static sigset_t unblocked_sigs, vm_sigs, allowdebug_blocked_sigs;
! bool os::Linux::is_sig_ignored(int sig) {
! bool os::Bsd::is_sig_ignored(int sig) {
struct sigaction oact;
sigaction(sig, (struct sigaction*)NULL, &oact);
void* ohlr = oact.sa_sigaction ? CAST_FROM_FN_PTR(void*, oact.sa_sigaction)
: CAST_FROM_FN_PTR(void*, oact.sa_handler);
if (ohlr == CAST_FROM_FN_PTR(void*, SIG_IGN))
*** 535,545 ****
--- 582,592 ----
return true;
else
return false;
}
! void os::Linux::signal_sets_init() {
! void os::Bsd::signal_sets_init() {
// Should also have an assertion stating we are still single-threaded.
assert(!signal_sets_initialized, "Already initialized");
// Fill in signals that are necessarily unblocked for all threads in
// the VM. Currently, we unblock the following signals:
// SHUTDOWN{1,2,3}_SIGNAL: for shutdown hooks support (unless over-ridden
*** 560,578 ****
--- 607,625 ----
sigaddset(&unblocked_sigs, SIGBUS);
sigaddset(&unblocked_sigs, SIGFPE);
sigaddset(&unblocked_sigs, SR_signum);
if (!ReduceSignalUsage) {
! if (!os::Linux::is_sig_ignored(SHUTDOWN1_SIGNAL)) {
! if (!os::Bsd::is_sig_ignored(SHUTDOWN1_SIGNAL)) {
sigaddset(&unblocked_sigs, SHUTDOWN1_SIGNAL);
sigaddset(&allowdebug_blocked_sigs, SHUTDOWN1_SIGNAL);
}
! if (!os::Linux::is_sig_ignored(SHUTDOWN2_SIGNAL)) {
! if (!os::Bsd::is_sig_ignored(SHUTDOWN2_SIGNAL)) {
sigaddset(&unblocked_sigs, SHUTDOWN2_SIGNAL);
sigaddset(&allowdebug_blocked_sigs, SHUTDOWN2_SIGNAL);
}
! if (!os::Linux::is_sig_ignored(SHUTDOWN3_SIGNAL)) {
! if (!os::Bsd::is_sig_ignored(SHUTDOWN3_SIGNAL)) {
sigaddset(&unblocked_sigs, SHUTDOWN3_SIGNAL);
sigaddset(&allowdebug_blocked_sigs, SHUTDOWN3_SIGNAL);
}
}
// Fill in signals that are blocked by all but the VM thread.
*** 583,620 ****
--- 630,667 ----
}
// These are signals that are unblocked while a thread is running Java.
// (For some reason, they get blocked by default.)
! sigset_t* os::Linux::unblocked_signals() {
! sigset_t* os::Bsd::unblocked_signals() {
assert(signal_sets_initialized, "Not initialized");
return &unblocked_sigs;
}
// These are the signals that are blocked while a (non-VM) thread is
// running Java. Only the VM thread handles these signals.
! sigset_t* os::Linux::vm_signals() {
! sigset_t* os::Bsd::vm_signals() {
assert(signal_sets_initialized, "Not initialized");
return &vm_sigs;
}
// These are signals that are blocked during cond_wait to allow debugger in
! sigset_t* os::Linux::allowdebug_blocked_signals() {
! sigset_t* os::Bsd::allowdebug_blocked_signals() {
assert(signal_sets_initialized, "Not initialized");
return &allowdebug_blocked_sigs;
}
! void os::Linux::hotspot_sigmask(Thread* thread) {
! void os::Bsd::hotspot_sigmask(Thread* thread) {
//Save caller's signal mask before setting VM signal mask
sigset_t caller_sigmask;
pthread_sigmask(SIG_BLOCK, NULL, &caller_sigmask);
OSThread* osthread = thread->osthread();
osthread->set_caller_sigmask(caller_sigmask);
! pthread_sigmask(SIG_UNBLOCK, os::Linux::unblocked_signals(), NULL);
! pthread_sigmask(SIG_UNBLOCK, os::Bsd::unblocked_signals(), NULL);
if (!ReduceSignalUsage) {
if (thread->is_VM_thread()) {
// Only the VM thread handles BREAK_SIGNAL ...
pthread_sigmask(SIG_UNBLOCK, vm_signals(), NULL);
*** 623,636 ****
--- 670,684 ----
pthread_sigmask(SIG_BLOCK, vm_signals(), NULL);
}
}
}
+ #ifndef _ALLBSD_SOURCE
//////////////////////////////////////////////////////////////////////////////
// detecting pthread library
! void os::Linux::libpthread_init() {
! void os::Bsd::libpthread_init() {
// Save glibc and pthread version strings. Note that _CS_GNU_LIBC_VERSION
// and _CS_GNU_LIBPTHREAD_VERSION are supported in glibc >= 2.3.2. Use a
// generic name for earlier versions.
// Define macros here so we can build HotSpot on old systems.
# ifndef _CS_GNU_LIBC_VERSION
*** 642,727 ****
--- 690,775 ----
size_t n = confstr(_CS_GNU_LIBC_VERSION, NULL, 0);
if (n > 0) {
char *str = (char *)malloc(n);
confstr(_CS_GNU_LIBC_VERSION, str, n);
! os::Linux::set_glibc_version(str);
! os::Bsd::set_glibc_version(str);
} else {
// _CS_GNU_LIBC_VERSION is not supported, try gnu_get_libc_version()
static char _gnu_libc_version[32];
jio_snprintf(_gnu_libc_version, sizeof(_gnu_libc_version),
"glibc %s %s", gnu_get_libc_version(), gnu_get_libc_release());
! os::Linux::set_glibc_version(_gnu_libc_version);
! os::Bsd::set_glibc_version(_gnu_libc_version);
}
n = confstr(_CS_GNU_LIBPTHREAD_VERSION, NULL, 0);
if (n > 0) {
char *str = (char *)malloc(n);
confstr(_CS_GNU_LIBPTHREAD_VERSION, str, n);
// Vanilla RH-9 (glibc 2.3.2) has a bug that confstr() always tells
! // us "NPTL-0.29" even we are running with LinuxThreads. Check if this
! // is the case. LinuxThreads has a hard limit on max number of threads.
! // us "NPTL-0.29" even we are running with BsdThreads. Check if this
! // is the case. BsdThreads has a hard limit on max number of threads.
// So sysconf(_SC_THREAD_THREADS_MAX) will return a positive value.
// On the other hand, NPTL does not have such a limit, sysconf()
// will return -1 and errno is not changed. Check if it is really NPTL.
! if (strcmp(os::Linux::glibc_version(), "glibc 2.3.2") == 0 &&
! if (strcmp(os::Bsd::glibc_version(), "glibc 2.3.2") == 0 &&
strstr(str, "NPTL") &&
sysconf(_SC_THREAD_THREADS_MAX) > 0) {
free(str);
! os::Linux::set_libpthread_version("linuxthreads");
! os::Bsd::set_libpthread_version("bsdthreads");
} else {
! os::Linux::set_libpthread_version(str);
! os::Bsd::set_libpthread_version(str);
}
} else {
! // glibc before 2.3.2 only has LinuxThreads.
! os::Linux::set_libpthread_version("linuxthreads");
! // glibc before 2.3.2 only has BsdThreads.
! os::Bsd::set_libpthread_version("bsdthreads");
}
if (strstr(libpthread_version(), "NPTL")) {
! os::Linux::set_is_NPTL();
! os::Bsd::set_is_NPTL();
} else {
! os::Linux::set_is_LinuxThreads();
! os::Bsd::set_is_BsdThreads();
}
! // LinuxThreads have two flavors: floating-stack mode, which allows variable
! // BsdThreads have two flavors: floating-stack mode, which allows variable
// stack size; and fixed-stack mode. NPTL is always floating-stack.
! if (os::Linux::is_NPTL() || os::Linux::supports_variable_stack_size()) {
! os::Linux::set_is_floating_stack();
! if (os::Bsd::is_NPTL() || os::Bsd::supports_variable_stack_size()) {
! os::Bsd::set_is_floating_stack();
}
}
/////////////////////////////////////////////////////////////////////////////
// thread stack
! // Force Linux kernel to expand current thread stack. If "bottom" is close
! // Force Bsd kernel to expand current thread stack. If "bottom" is close
// to the stack guard, caller should block all signals.
//
// MAP_GROWSDOWN:
// A special mmap() flag that is used to implement thread stacks. It tells
// kernel that the memory region should extend downwards when needed. This
! // allows early versions of LinuxThreads to only mmap the first few pages
! // when creating a new thread. Linux kernel will automatically expand thread
! // allows early versions of BsdThreads to only mmap the first few pages
! // when creating a new thread. Bsd kernel will automatically expand thread
// stack as needed (on page faults).
//
// However, because the memory region of a MAP_GROWSDOWN stack can grow on
// demand, if a page fault happens outside an already mapped MAP_GROWSDOWN
// region, it's hard to tell if the fault is due to a legitimate stack
// access or because of reading/writing non-exist memory (e.g. buffer
// overrun). As a rule, if the fault happens below current stack pointer,
! // Linux kernel does not expand stack, instead a SIGSEGV is sent to the
! // application (see Linux kernel fault.c).
! // Bsd kernel does not expand stack, instead a SIGSEGV is sent to the
! // application (see Bsd kernel fault.c).
//
! // This Linux feature can cause SIGSEGV when VM bangs thread stack for
! // This Bsd feature can cause SIGSEGV when VM bangs thread stack for
// stack overflow detection.
//
! // Newer version of LinuxThreads (since glibc-2.2, or, RH-7.x) and NPTL do
! // Newer version of BsdThreads (since glibc-2.2, or, RH-7.x) and NPTL do
// not use this flag. However, the stack of initial thread is not created
// by pthread, it is still MAP_GROWSDOWN. Also it's possible (though
// unlikely) that user code can create a thread with MAP_GROWSDOWN stack
// and then attach the thread to JVM.
//
! // To get around the problem and allow stack banging on Linux, we need to
! // To get around the problem and allow stack banging on Bsd, we need to
// manually expand thread stack after receiving the SIGSEGV.
//
// There are two ways to expand thread stack to address "bottom", we used
// both of them in JVM before 1.5:
// 1. adjust stack pointer first so that it is below "bottom", and then
*** 733,743 ****
--- 781,791 ----
// call mmap() to map page 100, it is possible that part of the mmap() frame
// will be placed in page 100. When page 100 is mapped, it is zero-filled.
// That will destroy the mmap() frame and cause VM to crash.
//
// The following code works by adjusting sp first, then accessing the "bottom"
! // page to force a page fault. Linux kernel will then automatically expand the
! // page to force a page fault. Bsd kernel will then automatically expand the
// stack mapping.
//
// _expand_stack_to() assumes its frame size is less than page size, which
// should always be true if the function is not inlined.
*** 754,765 ****
--- 802,813 ----
size_t size;
volatile char *p;
// Adjust bottom to point to the largest address within the same page, it
// gives us a one-page buffer if alloca() allocates slightly more memory.
! bottom = (address)align_size_down((uintptr_t)bottom, os::Linux::page_size());
! bottom += os::Linux::page_size() - 1;
! bottom = (address)align_size_down((uintptr_t)bottom, os::Bsd::page_size());
! bottom += os::Bsd::page_size() - 1;
// sp might be slightly above current stack pointer; if that's the case, we
// will alloca() a little more space than necessary, which is OK. Don't use
// os::current_stack_pointer(), as its result can be slightly below current
// stack pointer, causing us to not alloca enough to reach "bottom".
*** 771,781 ****
--- 819,829 ----
assert(p != NULL && p <= (volatile char *)bottom, "alloca problem?");
p[0] = '\0';
}
}
! bool os::Linux::manually_expand_stack(JavaThread * t, address addr) {
! bool os::Bsd::manually_expand_stack(JavaThread * t, address addr) {
assert(t!=NULL, "just checking");
assert(t->osthread()->expanding_stack(), "expand should be set");
assert(t->stack_base() != NULL, "stack_base was not initialized");
if (addr < t->stack_base() && addr >= t->stack_yellow_zone_base()) {
*** 786,809 ****
--- 834,861 ----
pthread_sigmask(SIG_SETMASK, &old_sigset, NULL);
return true;
}
return false;
}
+ #endif
//////////////////////////////////////////////////////////////////////////////
// create new thread
static address highest_vm_reserved_address();
// check if it's safe to start a new thread
static bool _thread_safety_check(Thread* thread) {
if (os::Linux::is_LinuxThreads() && !os::Linux::is_floating_stack()) {
// Fixed stack LinuxThreads (SuSE Linux/x86, and some versions of Redhat)
+ #ifdef _ALLBSD_SOURCE
+ return true;
+ #else
+ if (os::Bsd::is_BsdThreads() && !os::Bsd::is_floating_stack()) {
+ // Fixed stack BsdThreads (SuSE Bsd/x86, and some versions of Redhat)
// Heap is mmap'ed at lower end of memory space. Thread stacks are
// allocated (MAP_FIXED) from high address space. Every thread stack
// occupies a fixed size slot (usually 2Mbytes, but user can change
! // it to other values if they rebuild LinuxThreads).
! // it to other values if they rebuild BsdThreads).
//
// Problem with MAP_FIXED is that mmap() can still succeed even part of
// the memory region has already been mmap'ed. That means if we have too
// many threads and/or very large heap, eventually thread stack will
// collide with heap.
*** 821,836 ****
--- 873,889 ----
return stack_bottom - ThreadSafetyMargin >= highest_vm_reserved_address();
} else {
return true;
}
} else {
! // Floating stack LinuxThreads or NPTL:
! // Unlike fixed stack LinuxThreads, thread stacks are not MAP_FIXED. When
! // Floating stack BsdThreads or NPTL:
! // Unlike fixed stack BsdThreads, thread stacks are not MAP_FIXED. When
// there's not enough space left, pthread_create() will fail. If we come
// here, that means enough space has been reserved for stack.
return true;
}
+ #endif
}
// Thread start routine for all newly created threads
static void *java_start(Thread *thread) {
// Try to randomize the cache line index of hot stack frames.
*** 845,877 ****
--- 898,935 ----
ThreadLocalStorage::set_thread(thread);
OSThread* osthread = thread->osthread();
Monitor* sync = osthread->startThread_lock();
! // non floating stack LinuxThreads needs extra check, see above
! // non floating stack BsdThreads needs extra check, see above
if (!_thread_safety_check(thread)) {
// notify parent thread
MutexLockerEx ml(sync, Mutex::_no_safepoint_check_flag);
osthread->set_state(ZOMBIE);
sync->notify_all();
return NULL;
}
+ #ifdef _ALLBSD_SOURCE
+ // thread_id is pthread_id on BSD
+ osthread->set_thread_id(::pthread_self());
+ #else
// thread_id is kernel thread id (similar to Solaris LWP id)
! osthread->set_thread_id(os::Linux::gettid());
! osthread->set_thread_id(os::Bsd::gettid());
if (UseNUMA) {
int lgrp_id = os::numa_get_group_id();
if (lgrp_id != -1) {
thread->set_lgrp_id(lgrp_id);
}
}
+ #endif
// initialize signal mask for this thread
! os::Linux::hotspot_sigmask(thread);
! os::Bsd::hotspot_sigmask(thread);
// initialize floating point control register
! os::Linux::init_thread_fpu_state();
! os::Bsd::init_thread_fpu_state();
// handshaking with parent thread
{
MutexLockerEx ml(sync, Mutex::_no_safepoint_check_flag);
*** 912,925 ****
--- 970,983 ----
pthread_attr_t attr;
pthread_attr_init(&attr);
pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED);
// stack size
! if (os::Linux::supports_variable_stack_size()) {
! if (os::Bsd::supports_variable_stack_size()) {
// calculate stack size if it's not specified by caller
if (stack_size == 0) {
! stack_size = os::Linux::default_stack_size(thr_type);
! stack_size = os::Bsd::default_stack_size(thr_type);
switch (thr_type) {
case os::java_thread:
// Java threads use ThreadStackSize which default value can be
// changed with the flag -Xss
*** 939,965 ****
--- 997,1028 ----
if (VMThreadStackSize > 0) stack_size = (size_t)(VMThreadStackSize * K);
break;
}
}
! stack_size = MAX2(stack_size, os::Linux::min_stack_allowed);
! stack_size = MAX2(stack_size, os::Bsd::min_stack_allowed);
pthread_attr_setstacksize(&attr, stack_size);
} else {
// let pthread_create() pick the default value.
}
+ #ifndef _ALLBSD_SOURCE
// glibc guard page
! pthread_attr_setguardsize(&attr, os::Linux::default_guard_size(thr_type));
! pthread_attr_setguardsize(&attr, os::Bsd::default_guard_size(thr_type));
+ #endif
ThreadState state;
{
// Serialize thread creation if we are running with fixed stack LinuxThreads
bool lock = os::Linux::is_LinuxThreads() && !os::Linux::is_floating_stack();
+
+ #ifndef _ALLBSD_SOURCE
+ // Serialize thread creation if we are running with fixed stack BsdThreads
+ bool lock = os::Bsd::is_BsdThreads() && !os::Bsd::is_floating_stack();
if (lock) {
! os::Linux::createThread_lock()->lock_without_safepoint_check();
! os::Bsd::createThread_lock()->lock_without_safepoint_check();
}
+ #endif
pthread_t tid;
int ret = pthread_create(&tid, &attr, (void* (*)(void*)) java_start, thread);
pthread_attr_destroy(&attr);
*** 969,979 ****
--- 1032,1044 ----
perror("pthread_create()");
}
// Need to clean up stuff we've allocated so far
thread->set_osthread(NULL);
delete osthread;
if (lock) os::Linux::createThread_lock()->unlock();
+ #ifndef _ALLBSD_SOURCE
+ if (lock) os::Bsd::createThread_lock()->unlock();
+ #endif
return false;
}
// Store pthread info into the OSThread
osthread->set_pthread_id(tid);
*** 985,997 ****
--- 1050,1064 ----
while ((state = osthread->get_state()) == ALLOCATED) {
sync_with_child->wait(Mutex::_no_safepoint_check_flag);
}
}
+ #ifndef _ALLBSD_SOURCE
if (lock) {
! os::Linux::createThread_lock()->unlock();
! os::Bsd::createThread_lock()->unlock();
}
+ #endif
}
// Aborted due to thread limit being reached
if (state == ZOMBIE) {
thread->set_osthread(NULL);
*** 1008,1018 ****
--- 1075,1085 ----
/////////////////////////////////////////////////////////////////////////////
// attach existing thread
// bootstrap the main thread
bool os::create_main_thread(JavaThread* thread) {
! assert(os::Linux::_main_thread == pthread_self(), "should be called inside main thread");
! assert(os::Bsd::_main_thread == pthread_self(), "should be called inside main thread");
return create_attached_thread(thread);
}
bool os::create_attached_thread(JavaThread* thread) {
#ifdef ASSERT
*** 1025,1075 ****
--- 1092,1148 ----
if (osthread == NULL) {
return false;
}
// Store pthread info into the OSThread
osthread->set_thread_id(os::Linux::gettid());
+ #ifdef _ALLBSD_SOURCE
+ osthread->set_thread_id(::pthread_self());
+ #else
+ osthread->set_thread_id(os::Bsd::gettid());
+ #endif
osthread->set_pthread_id(::pthread_self());
// initialize floating point control register
! os::Linux::init_thread_fpu_state();
! os::Bsd::init_thread_fpu_state();
// Initial thread state is RUNNABLE
osthread->set_state(RUNNABLE);
thread->set_osthread(osthread);
+ #ifndef _ALLBSD_SOURCE
if (UseNUMA) {
int lgrp_id = os::numa_get_group_id();
if (lgrp_id != -1) {
thread->set_lgrp_id(lgrp_id);
}
}
! if (os::Linux::is_initial_thread()) {
! if (os::Bsd::is_initial_thread()) {
// If current thread is initial thread, its stack is mapped on demand,
// see notes about MAP_GROWSDOWN. Here we try to force kernel to map
// the entire stack region to avoid SEGV in stack banging.
// It is also useful to get around the heap-stack-gap problem on SuSE
// kernel (see 4821821 for details). We first expand stack to the top
// of yellow zone, then enable stack yellow zone (order is significant,
! // enabling yellow zone first will crash JVM on SuSE Linux), so there
! // enabling yellow zone first will crash JVM on SuSE Bsd), so there
// is no gap between the last two virtual memory regions.
JavaThread *jt = (JavaThread *)thread;
address addr = jt->stack_yellow_zone_base();
assert(addr != NULL, "initialization problem?");
assert(jt->stack_available(addr) > 0, "stack guard should not be enabled");
osthread->set_expanding_stack();
! os::Linux::manually_expand_stack(jt, addr);
! os::Bsd::manually_expand_stack(jt, addr);
osthread->clear_expanding_stack();
}
+ #endif
// initialize signal mask for this thread
// and save the caller's signal mask
! os::Linux::hotspot_sigmask(thread);
! os::Bsd::hotspot_sigmask(thread);
return true;
}
void os::pd_start_thread(Thread* thread) {
*** 1078,1088 ****
--- 1151,1161 ----
Monitor* sync_with_child = osthread->startThread_lock();
MutexLockerEx ml(sync_with_child, Mutex::_no_safepoint_check_flag);
sync_with_child->notify();
}
! // Free Linux resources related to the OSThread
! // Free Bsd resources related to the OSThread
void os::free_thread(OSThread* osthread) {
assert(osthread != NULL, "osthread not set");
if (Thread::current()->osthread() == osthread) {
// Restore caller's signal mask
*** 1120,1131 ****
--- 1193,1205 ----
}
//////////////////////////////////////////////////////////////////////////////
// initial thread
+ #ifndef _ALLBSD_SOURCE
// Check if current thread is the initial thread, similar to Solaris thr_main.
! bool os::Linux::is_initial_thread(void) {
! bool os::Bsd::is_initial_thread(void) {
char dummy;
// If called before init complete, thread stack bottom will be null.
// Can be called if fatal error occurs before initialization.
if (initial_thread_stack_bottom() == NULL) return false;
assert(initial_thread_stack_bottom() != NULL &&
*** 1160,1172 ****
--- 1234,1246 ----
}
return false;
}
// Locate initial thread stack. This special handling of initial thread stack
! // is needed because pthread_getattr_np() on most (all?) Linux distros returns
! // is needed because pthread_getattr_np() on most (all?) Bsd distros returns
// bogus value for initial thread.
! void os::Linux::capture_initial_stack(size_t max_size) {
! void os::Bsd::capture_initial_stack(size_t max_size) {
// stack size is the easy part, get it from RLIMIT_STACK
size_t stack_size;
struct rlimit rlim;
getrlimit(RLIMIT_STACK, &rlim);
stack_size = rlim.rlim_cur;
*** 1196,1206 ****
--- 1270,1280 ----
// a global variable "__libc_stack_end", which is then used by system
// libraries. __libc_stack_end should be pretty close to stack top. The
// variable is available since the very early days. However, because it is
// a private interface, it could disappear in the future.
//
! // Linux kernel saves start_stack information in /proc/<pid>/stat. Similar
! // Bsd kernel saves start_stack information in /proc/<pid>/stat. Similar
// to __libc_stack_end, it is very close to stack top, but isn't the real
// stack top. Note that /proc may not exist if VM is running as a chroot
// program, so reading /proc/<pid>/stat could fail. Also the contents of
// /proc/<pid>/stat could change in the future (though unlikely).
//
*** 1314,1324 ****
--- 1388,1398 ----
warning("Can't detect initial thread stack location - bad conversion");
stack_start = (uintptr_t) &rlim;
}
} else {
// For some reason we can't open /proc/self/stat (for example, running on
! // FreeBSD with a Linux emulator, or inside chroot), this should work for
! // FreeBSD with a Bsd emulator, or inside chroot), this should work for
// most cases, so don't abort:
warning("Can't detect initial thread stack location - no /proc/self/stat");
stack_start = (uintptr_t) &rlim;
}
}
*** 1356,1365 ****
--- 1430,1440 ----
}
_initial_thread_stack_size = align_size_down(_initial_thread_stack_size, page_size());
_initial_thread_stack_bottom = (address)stack_top - _initial_thread_stack_size;
}
+ #endif
////////////////////////////////////////////////////////////////////////////////
// time support
// Time since start-up in seconds to a fine granularity.
*** 1377,1389 ****
--- 1452,1462 ----
jlong os::elapsed_frequency() {
return (1000 * 1000);
}
// For now, we say that linux does not support vtime. I have no idea
// whether it can actually be made to (DLD, 9/13/05).
+ // XXX: For now, code this as if BSD does not support vtime.
bool os::supports_vtime() { return false; }
bool os::enable_vtime() { return false; }
bool os::vtime_enabled() { return false; }
double os::elapsedVTime() {
// better than nothing, but not much
*** 1391,1410 ****
--- 1464,1498 ----
}
jlong os::javaTimeMillis() {
timeval time;
int status = gettimeofday(&time, NULL);
! assert(status != -1, "linux error");
! assert(status != -1, "bsd error");
return jlong(time.tv_sec) * 1000 + jlong(time.tv_usec / 1000);
}
#ifndef CLOCK_MONOTONIC
#define CLOCK_MONOTONIC (1)
#endif
void os::Linux::clock_init() {
// we do dlopen's in this particular order due to bug in linux
+ #ifdef __APPLE__
+ void os::Bsd::clock_init() {
+ // XXXDARWIN: Investigate replacement monotonic clock
+ }
+ #elif defined(_ALLBSD_SOURCE)
+ void os::Bsd::clock_init() {
+ struct timespec res;
+ struct timespec tp;
+ if (::clock_getres(CLOCK_MONOTONIC, &res) == 0 &&
+ ::clock_gettime(CLOCK_MONOTONIC, &tp) == 0) {
+ // yes, monotonic clock is supported
+ _clock_gettime = ::clock_gettime;
+ }
+ }
+ #else
+ void os::Bsd::clock_init() {
+ // we do dlopen's in this particular order due to bug in bsd
// dynamical loader (see 6348968) leading to crash on exit
void* handle = dlopen("librt.so.1", RTLD_LAZY);
if (handle == NULL) {
handle = dlopen("librt.so", RTLD_LAZY);
}
*** 1434,1444 ****
--- 1522,1534 ----
dlclose(handle);
}
}
}
}
+ #endif
+ #ifndef _ALLBSD_SOURCE
#ifndef SYS_clock_getres
#if defined(IA32) || defined(AMD64)
#define SYS_clock_getres IA32_ONLY(266) AMD64_ONLY(229)
#define sys_clock_getres(x,y) ::syscall(SYS_clock_getres, x, y)
*** 1449,1460 ****
--- 1539,1550 ----
#else
#define sys_clock_getres(x,y) ::syscall(SYS_clock_getres, x, y)
#endif
! void os::Linux::fast_thread_clock_init() {
! if (!UseLinuxPosixThreadCPUClocks) {
! void os::Bsd::fast_thread_clock_init() {
! if (!UseBsdPosixThreadCPUClocks) {
return;
}
clockid_t clockid;
struct timespec tp;
int (*pthread_getcpuclockid_func)(pthread_t, clockid_t *) =
*** 1475,1503 ****
--- 1565,1594 ----
_supports_fast_thread_cpu_time = true;
_pthread_getcpuclockid = pthread_getcpuclockid_func;
}
}
+ #endif
jlong os::javaTimeNanos() {
! if (Linux::supports_monotonic_clock()) {
! if (Bsd::supports_monotonic_clock()) {
struct timespec tp;
! int status = Linux::clock_gettime(CLOCK_MONOTONIC, &tp);
! int status = Bsd::clock_gettime(CLOCK_MONOTONIC, &tp);
assert(status == 0, "gettime error");
jlong result = jlong(tp.tv_sec) * (1000 * 1000 * 1000) + jlong(tp.tv_nsec);
return result;
} else {
timeval time;
int status = gettimeofday(&time, NULL);
! assert(status != -1, "linux error");
! assert(status != -1, "bsd error");
jlong usecs = jlong(time.tv_sec) * (1000 * 1000) + jlong(time.tv_usec);
return 1000 * usecs;
}
}
void os::javaTimeNanos_info(jvmtiTimerInfo *info_ptr) {
! if (Linux::supports_monotonic_clock()) {
! if (Bsd::supports_monotonic_clock()) {
info_ptr->max_value = ALL_64_BITS;
// CLOCK_MONOTONIC - amount of time since some arbitrary point in the past
info_ptr->may_skip_backward = false; // not subject to resetting or drifting
info_ptr->may_skip_forward = false; // not subject to resetting or drifting
*** 1594,1608 ****
--- 1685,1699 ----
::exit(1);
}
// Die immediately, no exit hook, no abort hook, no cleanup.
void os::die() {
! // _exit() on LinuxThreads only kills current thread
! // _exit() on BsdThreads only kills current thread
::abort();
}
! // unused on linux for now.
! // unused on bsd for now.
void os::set_error_file(const char *logfile) {}
// This method is a copy of JDK's sysGetLastErrorString
// from src/solaris/hpi/src/system_md.c
*** 1622,1635 ****
--- 1713,1726 ----
}
intx os::current_thread_id() { return (intx)pthread_self(); }
int os::current_process_id() {
! // Under the old linux thread library, linux gives each thread
! // Under the old bsd thread library, bsd gives each thread
// its own process id. Because of this each thread will return
// a different pid if this method were to return the result
! // of getpid(2). Linux provides no api that returns the pid
! // of getpid(2). Bsd provides no api that returns the pid
// of the launcher thread for the vm. This implementation
// returns a unique pid, the pid of the launcher thread
// that starts the vm 'process'.
// Under the NPTL, getpid() returns the same pid as the
*** 1636,1654 ****
--- 1727,1752 ----
// launcher thread rather than a unique pid per thread.
// Use gettid() if you want the old pre NPTL behaviour.
// if you are looking for the result of a call to getpid() that
// returns a unique pid for the calling thread, then look at the
! // OSThread::thread_id() method in osThread_linux.hpp file
! // OSThread::thread_id() method in osThread_bsd.hpp file
return (int)(_initial_pid ? _initial_pid : getpid());
}
// DLL functions
const char* os::dll_file_extension() { return ".so"; }
+ #define JNI_LIB_PREFIX "lib"
+ #ifdef __APPLE__
+ #define JNI_LIB_SUFFIX ".dylib"
+ #else
+ #define JNI_LIB_SUFFIX ".so"
+ #endif
+ const char* os::dll_file_extension() { return JNI_LIB_SUFFIX; }
+
// This must be hard coded because it's the system's temporary
// directory not the java application's temp directory, ala java.io.tmpdir.
const char* os::get_temp_directory() { return "/tmp"; }
static bool file_exists(const char* filename) {
*** 1663,1688 ****
--- 1761,1787 ----
const char* pname, const char* fname) {
// Copied from libhpi
const size_t pnamelen = pname ? strlen(pname) : 0;
// Quietly truncate on buffer overflow. Should be an error.
! if (pnamelen + strlen(fname) + 10 > (size_t) buflen) {
! if (pnamelen + strlen(fname) + strlen(JNI_LIB_PREFIX) + strlen(JNI_LIB_SUFFIX) + 2 > buflen) {
*buffer = '\0';
return;
}
if (pnamelen == 0) {
! snprintf(buffer, buflen, "lib%s.so", fname);
! snprintf(buffer, buflen, JNI_LIB_PREFIX "%s" JNI_LIB_SUFFIX, fname);
} else if (strchr(pname, *os::path_separator()) != NULL) {
int n;
char** pelements = split_path(pname, &n);
for (int i = 0 ; i < n ; i++) {
// Really shouldn't be NULL, but check can't hurt
if (pelements[i] == NULL || strlen(pelements[i]) == 0) {
continue; // skip the empty path values
}
! snprintf(buffer, buflen, "%s/lib%s.so", pelements[i], fname);
! snprintf(buffer, buflen, "%s/" JNI_LIB_PREFIX "%s" JNI_LIB_SUFFIX,
+ pelements[i], fname);
if (file_exists(buffer)) {
break;
}
}
// release the storage
*** 1693,1703 ****
--- 1792,1802 ----
}
if (pelements != NULL) {
FREE_C_HEAP_ARRAY(char*, pelements);
}
} else {
! snprintf(buffer, buflen, "%s/lib%s.so", pname, fname);
! snprintf(buffer, buflen, "%s/" JNI_LIB_PREFIX "%s" JNI_LIB_SUFFIX, pname, fname);
}
}
const char* os::get_current_directory(char *buf, int buflen) {
return getcwd(buf, buflen);
*** 1743,1752 ****
--- 1842,1868 ----
if (buf != NULL) buf[0] = '\0';
if (offset != NULL) *offset = -1;
return false;
}
+ #ifdef _ALLBSD_SOURCE
+ // ported from solaris version
+ bool os::dll_address_to_library_name(address addr, char* buf,
+ int buflen, int* offset) {
+ Dl_info dlinfo;
+
+ if (dladdr((void*)addr, &dlinfo)){
+ if (buf) jio_snprintf(buf, buflen, "%s", dlinfo.dli_fname);
+ if (offset) *offset = addr - (address)dlinfo.dli_fbase;
+ return true;
+ } else {
+ if (buf) buf[0] = '\0';
+ if (offset) *offset = -1;
+ return false;
+ }
+ }
+ #else
struct _address_to_library_name {
address addr; // input : memory address
size_t buflen; // size of fname
char* fname; // output: library name
address base; // library base addr
*** 1817,1831 ****
--- 1933,1963 ----
if (buf) buf[0] = '\0';
if (offset) *offset = -1;
return false;
}
}
+ #endif
// Loads .dll/.so and
// in case of error it checks if .dll/.so was built for the
// same architecture as Hotspot is running on
+ #ifdef __APPLE__
+ void * os::dll_load(const char *filename, char *ebuf, int ebuflen) {
+ void * result= ::dlopen(filename, RTLD_LAZY);
+ if (result != NULL) {
+ // Successful loading
+ return result;
+ }
+
+ // Read system error message into ebuf
+ ::strncpy(ebuf, ::dlerror(), ebuflen-1);
+ ebuf[ebuflen-1]='\0';
+
+ return NULL;
+ }
+ #else
void * os::dll_load(const char *filename, char *ebuf, int ebuflen)
{
void * result= ::dlopen(filename, RTLD_LAZY);
if (result != NULL) {
// Successful loading
*** 1874,1883 ****
--- 2006,2035 ----
#ifndef EM_486
#define EM_486 6 /* Intel 80486 */
#endif
+ #ifndef EM_MIPS_RS3_LE
+ #define EM_MIPS_RS3_LE 10 /* MIPS */
+ #endif
+
+ #ifndef EM_PPC64
+ #define EM_PPC64 21 /* PowerPC64 */
+ #endif
+
+ #ifndef EM_S390
+ #define EM_S390 22 /* IBM System/390 */
+ #endif
+
+ #ifndef EM_IA_64
+ #define EM_IA_64 50 /* HP/Intel IA-64 */
+ #endif
+
+ #ifndef EM_X86_64
+ #define EM_X86_64 62 /* AMD x86-64 */
+ #endif
+
static const arch_t arch_array[]={
{EM_386, EM_386, ELFCLASS32, ELFDATA2LSB, (char*)"IA 32"},
{EM_486, EM_386, ELFCLASS32, ELFDATA2LSB, (char*)"IA 32"},
{EM_IA_64, EM_IA_64, ELFCLASS64, ELFDATA2LSB, (char*)"IA 64"},
{EM_X86_64, EM_X86_64, ELFCLASS64, ELFDATA2LSB, (char*)"AMD 64"},
*** 1977,1997 ****
--- 2129,2143 ----
}
}
return NULL;
}
+ #endif /* !__APPLE__ */
/*
* glibc-2.0 libdl is not MT safe. If you are building with any glibc,
* chances are you might want to run the generated bits against glibc-2.0
* libdl.so, so always use locking for any version of glibc.
*/
+ // XXX: Do we need a lock around this as per Linux?
void* os::dll_lookup(void* handle, const char* name) {
! pthread_mutex_lock(&dl_mutex);
void* res = dlsym(handle, name);
pthread_mutex_unlock(&dl_mutex);
return res;
! return dlsym(handle, name);
}
static bool _print_ascii_file(const char* filename, outputStream* st) {
int fd = ::open(filename, O_RDONLY);
*** 2010,2049 ****
--- 2156,2240 ----
return true;
}
void os::print_dll_info(outputStream *st) {
st->print_cr("Dynamic libraries:");
+ #ifdef _ALLBSD_SOURCE
+ #ifdef RTLD_DI_LINKMAP
+ Dl_info dli;
+ void *handle;
+ Link_map *map;
+ Link_map *p;
+ if (!dladdr(CAST_FROM_FN_PTR(void *, os::print_dll_info), &dli)) {
+ st->print_cr("Error: Cannot print dynamic libraries.");
+ return;
+ }
+ handle = dlopen(dli.dli_fname, RTLD_LAZY);
+ if (handle == NULL) {
+ st->print_cr("Error: Cannot print dynamic libraries.");
+ return;
+ }
+ dlinfo(handle, RTLD_DI_LINKMAP, &map);
+ if (map == NULL) {
+ st->print_cr("Error: Cannot print dynamic libraries.");
+ return;
+ }
+
+ while (map->l_prev != NULL)
+ map = map->l_prev;
+
+ while (map != NULL) {
+ st->print_cr(PTR_FORMAT " \t%s", map->l_addr, map->l_name);
+ map = map->l_next;
+ }
+
+ dlclose(handle);
+ #elif defined(__APPLE__)
+ uint32_t count;
+ uint32_t i;
+
+ count = _dyld_image_count();
+ for (i = 1; i < count; i++) {
+ const char *name = _dyld_get_image_name(i);
+ intptr_t slide = _dyld_get_image_vmaddr_slide(i);
+ st->print_cr(PTR_FORMAT " \t%s", slide, name);
+ }
+ #else
+ st->print_cr("Error: Cannot print dynamic libraries.");
+ #endif
+ #else
char fname[32];
! pid_t pid = os::Linux::gettid();
! pid_t pid = os::Bsd::gettid();
jio_snprintf(fname, sizeof(fname), "/proc/%d/maps", pid);
if (!_print_ascii_file(fname, st)) {
st->print("Can not get library information for pid = %d\n", pid);
}
+ #endif
}
void os::print_os_info(outputStream* st) {
st->print("OS:");
// Try to identify popular distros.
! // Most Linux distributions have /etc/XXX-release file, which contains
! // Most Bsd distributions have /etc/XXX-release file, which contains
// the OS version string. Some have more than one /etc/XXX-release file
// (e.g. Mandrake has both /etc/mandrake-release and /etc/redhat-release.),
// so the order is important.
if (!_print_ascii_file("/etc/mandrake-release", st) &&
!_print_ascii_file("/etc/sun-release", st) &&
!_print_ascii_file("/etc/redhat-release", st) &&
!_print_ascii_file("/etc/SuSE-release", st) &&
! !_print_ascii_file("/etc/turbolinux-release", st) &&
! !_print_ascii_file("/etc/turbobsd-release", st) &&
!_print_ascii_file("/etc/gentoo-release", st) &&
!_print_ascii_file("/etc/debian_version", st) &&
!_print_ascii_file("/etc/ltib-release", st) &&
!_print_ascii_file("/etc/angstrom-version", st)) {
! st->print("Linux");
! st->print("Bsd");
}
st->cr();
// kernel
st->print("uname:");
*** 2053,2076 ****
--- 2244,2269 ----
st->print(name.release); st->print(" ");
st->print(name.version); st->print(" ");
st->print(name.machine);
st->cr();
+ #ifndef _ALLBSD_SOURCE
// Print warning if unsafe chroot environment detected
if (unsafe_chroot_detected) {
st->print("WARNING!! ");
st->print_cr(unstable_chroot_error);
}
// libc, pthread
st->print("libc:");
! st->print(os::Linux::glibc_version()); st->print(" ");
! st->print(os::Linux::libpthread_version()); st->print(" ");
! if (os::Linux::is_LinuxThreads()) {
! st->print("(%s stack)", os::Linux::is_floating_stack() ? "floating" : "fixed");
! st->print(os::Bsd::glibc_version()); st->print(" ");
! st->print(os::Bsd::libpthread_version()); st->print(" ");
! if (os::Bsd::is_BsdThreads()) {
! st->print("(%s stack)", os::Bsd::is_floating_stack() ? "floating" : "fixed");
}
st->cr();
+ #endif
// rlimit
st->print("rlimit:");
struct rlimit rlim;
*** 2092,2101 ****
--- 2285,2295 ----
st->print(", NOFILE ");
getrlimit(RLIMIT_NOFILE, &rlim);
if (rlim.rlim_cur == RLIM_INFINITY) st->print("infinity");
else st->print("%d", rlim.rlim_cur);
+ #ifndef _ALLBSD_SOURCE
st->print(", AS ");
getrlimit(RLIMIT_AS, &rlim);
if (rlim.rlim_cur == RLIM_INFINITY) st->print("infinity");
else st->print("%uk", rlim.rlim_cur >> 10);
st->cr();
*** 2104,2150 ****
--- 2298,2345 ----
st->print("load average:");
double loadavg[3];
os::loadavg(loadavg, 3);
st->print("%0.02f %0.02f %0.02f", loadavg[0], loadavg[1], loadavg[2]);
st->cr();
// meminfo
st->print("\n/proc/meminfo:\n");
_print_ascii_file("/proc/meminfo", st);
st->cr();
+ #endif
}
void os::pd_print_cpu_info(outputStream* st) {
st->print("\n/proc/cpuinfo:\n");
if (!_print_ascii_file("/proc/cpuinfo", st)) {
st->print(" <Not Available>");
}
st->cr();
+ // Nothing to do for now.
}
void os::print_memory_info(outputStream* st) {
st->print("Memory:");
st->print(" %dk page", os::vm_page_size()>>10);
+ #ifndef _ALLBSD_SOURCE
// values in struct sysinfo are "unsigned long"
struct sysinfo si;
sysinfo(&si);
+ #endif
st->print(", physical " UINT64_FORMAT "k",
os::physical_memory() >> 10);
st->print("(" UINT64_FORMAT "k free)",
os::available_memory() >> 10);
+ #ifndef _ALLBSD_SOURCE
st->print(", swap " UINT64_FORMAT "k",
((jlong)si.totalswap * si.mem_unit) >> 10);
st->print("(" UINT64_FORMAT "k free)",
((jlong)si.freeswap * si.mem_unit) >> 10);
+ #endif
st->cr();
+
+ // meminfo
+ st->print("\n/proc/meminfo:\n");
+ _print_ascii_file("/proc/meminfo", st);
+ st->cr();
}
// Taken from /usr/include/bits/siginfo.h Supposed to be architecture specific
! // but they're the same for all the linux arch that we support
! // but they're the same for all the bsd arch that we support
// and they're the same for solaris but there's no common place to put this.
const char *ill_names[] = { "ILL0", "ILL_ILLOPC", "ILL_ILLOPN", "ILL_ILLADR",
"ILL_ILLTRP", "ILL_PRVOPC", "ILL_PRVREG",
"ILL_COPROC", "ILL_BADSTK" };
*** 2375,2398 ****
--- 2570,2603 ----
}
// a counter for each possible signal value
static volatile jint pending_signals[NSIG+1] = { 0 };
! // Linux(POSIX) specific hand shaking semaphore.
! // Bsd(POSIX) specific hand shaking semaphore.
+ #ifdef __APPLE__
+ static semaphore_t sig_sem;
+ #define SEM_INIT(sem, value) semaphore_create(mach_task_self(), &sem, SYNC_POLICY_FIFO, value)
+ #define SEM_WAIT(sem) semaphore_wait(sem);
+ #define SEM_POST(sem) semaphore_signal(sem);
+ #else
static sem_t sig_sem;
+ #define SEM_INIT(sem, value) sem_init(&sem, 0, value)
+ #define SEM_WAIT(sem) sem_wait(&sem);
+ #define SEM_POST(sem) sem_post(&sem);
+ #endif
void os::signal_init_pd() {
// Initialize signal structures
::memset((void*)pending_signals, 0, sizeof(pending_signals));
// Initialize signal semaphore
! ::sem_init(&sig_sem, 0, 0);
! ::SEM_INIT(sig_sem, 0);
}
void os::signal_notify(int sig) {
Atomic::inc(&pending_signals[sig]);
! ::sem_post(&sig_sem);
! ::SEM_POST(sig_sem);
}
static int check_pending_signals(bool wait) {
Atomic::store(0, &sigint_count);
for (;;) {
*** 2410,2420 ****
--- 2615,2625 ----
bool threadIsSuspended;
do {
thread->set_suspend_equivalent();
// cleared by handle_special_suspend_equivalent_condition() or java_suspend_self()
! ::sem_wait(&sig_sem);
! ::SEM_WAIT(sig_sem);
// were we externally suspended while we were waiting?
threadIsSuspended = thread->handle_special_suspend_equivalent_condition();
if (threadIsSuspended) {
//
*** 2421,2431 ****
--- 2626,2636 ----
// The semaphore has been incremented, but while we were waiting
// another thread suspended us. We don't want to continue running
// while suspended because that would surprise the thread that
// suspended us.
//
! ::sem_post(&sig_sem);
! ::SEM_POST(sig_sem);
thread->java_suspend_self();
}
} while (threadIsSuspended);
}
*** 2442,2478 ****
--- 2647,2683 ----
////////////////////////////////////////////////////////////////////////////////
// Virtual Memory
int os::vm_page_size() {
// Seems redundant as all get out
! assert(os::Linux::page_size() != -1, "must call os::init");
! return os::Linux::page_size();
! assert(os::Bsd::page_size() != -1, "must call os::init");
! return os::Bsd::page_size();
}
// Solaris allocates memory by pages.
int os::vm_allocation_granularity() {
! assert(os::Linux::page_size() != -1, "must call os::init");
! return os::Linux::page_size();
! assert(os::Bsd::page_size() != -1, "must call os::init");
! return os::Bsd::page_size();
}
// Rationale behind this function:
// current (Mon Apr 25 20:12:18 MSD 2005) oprofile drops samples without executable
// mapping for address (see lookup_dcookie() in the kernel module), thus we cannot get
// samples for JITted code. Here we create private executable mapping over the code cache
// and then we can use standard (well, almost, as mapping can change) way to provide
// info for the reporting script by storing timestamp and location of symbol
! void linux_wrap_code(char* base, size_t size) {
! void bsd_wrap_code(char* base, size_t size) {
static volatile jint cnt = 0;
if (!UseOprofile) {
return;
}
! char buf[PATH_MAX + 1];
int num = Atomic::add(1, &cnt);
! snprintf(buf, sizeof(buf), "%s/hs-vm-%d-%d",
! snprintf(buf, PATH_MAX + 1, "%s/hs-vm-%d-%d",
os::get_temp_directory(), os::current_process_id(), num);
unlink(buf);
int fd = ::open(buf, O_CREAT | O_RDWR, S_IRWXU);
*** 2488,2591 ****
--- 2693,2782 ----
::close(fd);
unlink(buf);
}
}
! // NOTE: Linux kernel does not really reserve the pages for us.
! // NOTE: Bsd kernel does not really reserve the pages for us.
// All it does is to check if there are enough free pages
// left at the time of mmap(). This could be a potential
// problem.
bool os::commit_memory(char* addr, size_t size, bool exec) {
int prot = exec ? PROT_READ|PROT_WRITE|PROT_EXEC : PROT_READ|PROT_WRITE;
+ #ifdef __OpenBSD__
+ // XXX: Work-around mmap/MAP_FIXED bug temporarily on OpenBSD
+ return ::mprotect(addr, size, prot) == 0;
+ #else
uintptr_t res = (uintptr_t) ::mmap(addr, size, prot,
MAP_PRIVATE|MAP_FIXED|MAP_ANONYMOUS, -1, 0);
if (res != (uintptr_t) MAP_FAILED) {
if (UseNUMAInterleaving) {
numa_make_global(addr, size);
}
return true;
}
return false;
+ return res != (uintptr_t) MAP_FAILED;
+ #endif
}
+ #ifndef _ALLBSD_SOURCE
// Define MAP_HUGETLB here so we can build HotSpot on old systems.
#ifndef MAP_HUGETLB
#define MAP_HUGETLB 0x40000
#endif
// Define MADV_HUGEPAGE here so we can build HotSpot on old systems.
#ifndef MADV_HUGEPAGE
#define MADV_HUGEPAGE 14
#endif
+ #endif
bool os::commit_memory(char* addr, size_t size, size_t alignment_hint,
bool exec) {
+ #ifndef _ALLBSD_SOURCE
if (UseHugeTLBFS && alignment_hint > (size_t)vm_page_size()) {
int prot = exec ? PROT_READ|PROT_WRITE|PROT_EXEC : PROT_READ|PROT_WRITE;
uintptr_t res =
(uintptr_t) ::mmap(addr, size, prot,
MAP_PRIVATE|MAP_FIXED|MAP_ANONYMOUS|MAP_HUGETLB,
-1, 0);
if (res != (uintptr_t) MAP_FAILED) {
if (UseNUMAInterleaving) {
numa_make_global(addr, size);
+ return res != (uintptr_t) MAP_FAILED;
}
return true;
}
// Fall through and try to use small pages
}
+ #endif
if (commit_memory(addr, size, exec)) {
realign_memory(addr, size, alignment_hint);
return true;
}
return false;
+ return commit_memory(addr, size, exec);
}
void os::realign_memory(char *addr, size_t bytes, size_t alignment_hint) {
+ #ifndef _ALLBSD_SOURCE
if (UseHugeTLBFS && alignment_hint > (size_t)vm_page_size()) {
// We don't check the return value: madvise(MADV_HUGEPAGE) may not
// be supported or the memory may already be backed by huge pages.
::madvise(addr, bytes, MADV_HUGEPAGE);
}
+ #endif
}
void os::free_memory(char *addr, size_t bytes) {
! commit_memory(addr, bytes, false);
! ::madvise(addr, bytes, MADV_DONTNEED);
}
void os::numa_make_global(char *addr, size_t bytes) {
Linux::numa_interleave_memory(addr, bytes);
}
void os::numa_make_local(char *addr, size_t bytes, int lgrp_hint) {
Linux::numa_tonode_memory(addr, bytes, lgrp_hint);
}
bool os::numa_topology_changed() { return false; }
size_t os::numa_get_groups_num() {
! int max_node = Linux::numa_max_node();
return max_node > 0 ? max_node + 1 : 1;
! return 1;
}
int os::numa_get_group_id() {
int cpu_id = Linux::sched_getcpu();
if (cpu_id != -1) {
int lgrp_id = Linux::get_node_by_cpu(cpu_id);
if (lgrp_id != -1) {
return lgrp_id;
}
}
return 0;
}
size_t os::numa_get_leaf_groups(int *ids, size_t size) {
! for (size_t i = 0; i < size; i++) {
! ids[i] = i;
! if (size > 0) {
! ids[0] = 0;
+ return 1;
}
! return size;
! return 0;
}
bool os::get_page_info(char *start, page_info* info) {
return false;
}
*** 2592,2626 ****
--- 2783,2793 ----
char *os::scan_pages(char *start, char* end, page_info* page_expected, page_info* page_found) {
return end;
}
int os::Linux::sched_getcpu_syscall(void) {
unsigned int cpu;
int retval = -1;
#if defined(IA32)
# ifndef SYS_getcpu
# define SYS_getcpu 318
# endif
retval = syscall(SYS_getcpu, &cpu, NULL, NULL);
#elif defined(AMD64)
// Unfortunately we have to bring all these macros here from vsyscall.h
// to be able to compile on old linuxes.
# define __NR_vgetcpu 2
# define VSYSCALL_START (-10UL << 20)
# define VSYSCALL_SIZE 1024
# define VSYSCALL_ADDR(vsyscall_nr) (VSYSCALL_START+VSYSCALL_SIZE*(vsyscall_nr))
typedef long (*vgetcpu_t)(unsigned int *cpu, unsigned int *node, unsigned long *tcache);
vgetcpu_t vgetcpu = (vgetcpu_t)VSYSCALL_ADDR(__NR_vgetcpu);
retval = vgetcpu(&cpu, NULL, NULL);
#endif
return (retval == -1) ? retval : cpu;
}
+ #ifndef _ALLBSD_SOURCE
// Something to do with the numa-aware allocator needs these symbols
extern "C" JNIEXPORT void numa_warn(int number, char *where, ...) { }
extern "C" JNIEXPORT void numa_error(char *where) { }
extern "C" JNIEXPORT int fork1() { return fork(); }
*** 2627,2653 ****
--- 2794,2816 ----
// If we are running with libnuma version > 2, then we should
// be trying to use symbols with versions 1.1
// If we are running with earlier version, which did not have symbol versions,
// we should use the base version.
! void* os::Linux::libnuma_dlsym(void* handle, const char *name) {
! void* os::Bsd::libnuma_dlsym(void* handle, const char *name) {
void *f = dlvsym(handle, name, "libnuma_1.1");
if (f == NULL) {
f = dlsym(handle, name);
}
return f;
}
! bool os::Linux::libnuma_init() {
! bool os::Bsd::libnuma_init() {
// sched_getcpu() should be in libc.
set_sched_getcpu(CAST_TO_FN_PTR(sched_getcpu_func_t,
dlsym(RTLD_DEFAULT, "sched_getcpu")));
// If it's not, try a direct syscall.
if (sched_getcpu() == -1)
set_sched_getcpu(CAST_TO_FN_PTR(sched_getcpu_func_t, (void*)&sched_getcpu_syscall));
if (sched_getcpu() != -1) { // Does it work?
void *handle = dlopen("libnuma.so.1", RTLD_LAZY);
if (handle != NULL) {
set_numa_node_to_cpus(CAST_TO_FN_PTR(numa_node_to_cpus_func_t,
libnuma_dlsym(handle, "numa_node_to_cpus")));
*** 2673,2683 ****
--- 2836,2846 ----
return false;
}
// rebuild_cpu_to_node_map() constructs a table mapping cpud id to node id.
// The table is later used in get_node_by_cpu().
! void os::Linux::rebuild_cpu_to_node_map() {
! void os::Bsd::rebuild_cpu_to_node_map() {
const size_t NCPUS = 32768; // Since the buffer size computation is very obscure
// in libnuma (possible values are starting from 16,
// and continuing up with every other power of 2, but less
// than the maximum number of CPUs supported by kernel), and
// is a subject to change (in libnuma version 2 the requirements
*** 2709,2843 ****
--- 2872,2934 ----
}
}
FREE_C_HEAP_ARRAY(unsigned long, cpu_map);
}
! int os::Linux::get_node_by_cpu(int cpu_id) {
! int os::Bsd::get_node_by_cpu(int cpu_id) {
if (cpu_to_node() != NULL && cpu_id >= 0 && cpu_id < cpu_to_node()->length()) {
return cpu_to_node()->at(cpu_id);
}
return -1;
}
! GrowableArray<int>* os::Linux::_cpu_to_node;
! os::Linux::sched_getcpu_func_t os::Linux::_sched_getcpu;
! os::Linux::numa_node_to_cpus_func_t os::Linux::_numa_node_to_cpus;
! os::Linux::numa_max_node_func_t os::Linux::_numa_max_node;
! os::Linux::numa_available_func_t os::Linux::_numa_available;
! os::Linux::numa_tonode_memory_func_t os::Linux::_numa_tonode_memory;
! os::Linux::numa_interleave_memory_func_t os::Linux::_numa_interleave_memory;
! unsigned long* os::Linux::_numa_all_nodes;
! GrowableArray<int>* os::Bsd::_cpu_to_node;
! os::Bsd::sched_getcpu_func_t os::Bsd::_sched_getcpu;
! os::Bsd::numa_node_to_cpus_func_t os::Bsd::_numa_node_to_cpus;
! os::Bsd::numa_max_node_func_t os::Bsd::_numa_max_node;
! os::Bsd::numa_available_func_t os::Bsd::_numa_available;
! os::Bsd::numa_tonode_memory_func_t os::Bsd::_numa_tonode_memory;
! os::Bsd::numa_interleave_memory_func_t os::Bsd::_numa_interleave_memory;
! unsigned long* os::Bsd::_numa_all_nodes;
+ #endif
bool os::uncommit_memory(char* addr, size_t size) {
+ #ifdef __OpenBSD__
+ // XXX: Work-around mmap/MAP_FIXED bug temporarily on OpenBSD
+ return ::mprotect(addr, size, PROT_NONE) == 0;
+ #else
uintptr_t res = (uintptr_t) ::mmap(addr, size, PROT_NONE,
MAP_PRIVATE|MAP_FIXED|MAP_NORESERVE|MAP_ANONYMOUS, -1, 0);
return res != (uintptr_t) MAP_FAILED;
+ #endif
}
// Linux uses a growable mapping for the stack, and if the mapping for
// the stack guard pages is not removed when we detach a thread the
// stack cannot grow beyond the pages where the stack guard was
// mapped. If at some point later in the process the stack expands to
// that point, the Linux kernel cannot expand the stack any further
// because the guard pages are in the way, and a segfault occurs.
//
// However, it's essential not to split the stack region by unmapping
// a region (leaving a hole) that's already part of the stack mapping,
// so if the stack mapping has already grown beyond the guard pages at
// the time we create them, we have to truncate the stack mapping.
// So, we need to know the extent of the stack mapping when
// create_stack_guard_pages() is called.
// Find the bounds of the stack mapping. Return true for success.
//
// We only need this for stacks that are growable: at the time of
// writing thread stacks don't use growable mappings (i.e. those
// creeated with MAP_GROWSDOWN), and aren't marked "[stack]", so this
// only applies to the main thread.
static
bool get_stack_bounds(uintptr_t *bottom, uintptr_t *top) {
char buf[128];
int fd, sz;
if ((fd = ::open("/proc/self/maps", O_RDONLY)) < 0) {
return false;
}
const char kw[] = "[stack]";
const int kwlen = sizeof(kw)-1;
// Address part of /proc/self/maps couldn't be more than 128 bytes
while ((sz = os::get_line_chars(fd, buf, sizeof(buf))) > 0) {
if (sz > kwlen && ::memcmp(buf+sz-kwlen, kw, kwlen) == 0) {
// Extract addresses
if (sscanf(buf, "%" SCNxPTR "-%" SCNxPTR, bottom, top) == 2) {
uintptr_t sp = (uintptr_t) __builtin_frame_address(0);
if (sp >= *bottom && sp <= *top) {
::close(fd);
return true;
}
}
}
}
::close(fd);
return false;
}
// If the (growable) stack mapping already extends beyond the point
// where we're going to put our guard pages, truncate the mapping at
// that point by munmap()ping it. This ensures that when we later
// munmap() the guard pages we don't leave a hole in the stack
// mapping. This only affects the main/initial thread, but guard
// against future OS changes
bool os::create_stack_guard_pages(char* addr, size_t size) {
uintptr_t stack_extent, stack_base;
bool chk_bounds = NOT_DEBUG(os::Linux::is_initial_thread()) DEBUG_ONLY(true);
if (chk_bounds && get_stack_bounds(&stack_extent, &stack_base)) {
assert(os::Linux::is_initial_thread(),
"growable stack in non-initial thread");
if (stack_extent < (uintptr_t)addr)
::munmap((void*)stack_extent, (uintptr_t)addr - stack_extent);
}
return os::commit_memory(addr, size);
}
// If this is a growable mapping, remove the guard pages entirely by
- // munmap()ping them. If not, just call uncommit_memory(). This only
// affects the main/initial thread, but guard against future OS changes
bool os::remove_stack_guard_pages(char* addr, size_t size) {
uintptr_t stack_extent, stack_base;
bool chk_bounds = NOT_DEBUG(os::Linux::is_initial_thread()) DEBUG_ONLY(true);
if (chk_bounds && get_stack_bounds(&stack_extent, &stack_base)) {
assert(os::Linux::is_initial_thread(),
"growable stack in non-initial thread");
return ::munmap(addr, size) == 0;
}
return os::uncommit_memory(addr, size);
}
static address _highest_vm_reserved_address = NULL;
// If 'fixed' is true, anon_mmap() will attempt to reserve anonymous memory
// at 'requested_addr'. If there are existing memory mappings at the same
// location, however, they will be overwritten. If 'fixed' is false,
// 'requested_addr' is only treated as a hint, the return value may or
! // may not start from the requested address. Unlike Linux mmap(), this
! // may not start from the requested address. Unlike Bsd mmap(), this
// function returns NULL to indicate failure.
static char* anon_mmap(char* requested_addr, size_t bytes, bool fixed) {
char * addr;
int flags;
flags = MAP_PRIVATE | MAP_NORESERVE | MAP_ANONYMOUS;
if (fixed) {
! assert((uintptr_t)requested_addr % os::Linux::page_size() == 0, "unaligned address");
! assert((uintptr_t)requested_addr % os::Bsd::page_size() == 0, "unaligned address");
flags |= MAP_FIXED;
}
// Map uncommitted pages PROT_READ and PROT_WRITE, change access
// to PROT_EXEC if executable when we commit the page.
*** 2876,2897 ****
--- 2967,2988 ----
static address highest_vm_reserved_address() {
return _highest_vm_reserved_address;
}
! static bool linux_mprotect(char* addr, size_t size, int prot) {
! // Linux wants the mprotect address argument to be page aligned.
! char* bottom = (char*)align_size_down((intptr_t)addr, os::Linux::page_size());
! static bool bsd_mprotect(char* addr, size_t size, int prot) {
! // Bsd wants the mprotect address argument to be page aligned.
! char* bottom = (char*)align_size_down((intptr_t)addr, os::Bsd::page_size());
// According to SUSv3, mprotect() should only be used with mappings
// established by mmap(), and mmap() always maps whole pages. Unaligned
// 'addr' likely indicates problem in the VM (e.g. trying to change
// protection of malloc'ed or statically allocated memory). Check the
// caller if you hit this assert.
assert(addr == bottom, "sanity check");
! size = align_size_up(pointer_delta(addr, bottom, 1) + size, os::Linux::page_size());
! size = align_size_up(pointer_delta(addr, bottom, 1) + size, os::Bsd::page_size());
return ::mprotect(bottom, size, prot) == 0;
}
// Set protections specified
bool os::protect_memory(char* addr, size_t bytes, ProtType prot,
*** 2904,2926 ****
--- 2995,3018 ----
case MEM_PROT_RWX: p = PROT_READ|PROT_WRITE|PROT_EXEC; break;
default:
ShouldNotReachHere();
}
// is_committed is unused.
! return linux_mprotect(addr, bytes, p);
! return bsd_mprotect(addr, bytes, p);
}
bool os::guard_memory(char* addr, size_t size) {
! return linux_mprotect(addr, size, PROT_NONE);
! return bsd_mprotect(addr, size, PROT_NONE);
}
bool os::unguard_memory(char* addr, size_t size) {
! return linux_mprotect(addr, size, PROT_READ|PROT_WRITE);
! return bsd_mprotect(addr, size, PROT_READ|PROT_WRITE);
}
! bool os::Linux::hugetlbfs_sanity_check(bool warn, size_t page_size) {
! bool os::Bsd::hugetlbfs_sanity_check(bool warn, size_t page_size) {
bool result = false;
+ #ifndef _ALLBSD_SOURCE
void *p = mmap (NULL, page_size, PROT_READ|PROT_WRITE,
MAP_ANONYMOUS|MAP_PRIVATE|MAP_HUGETLB,
-1, 0);
if (p != (void *) -1) {
*** 2948,2957 ****
--- 3040,3050 ----
}
if (warn) {
warning("HugeTLBFS is not supported by the operating system.");
}
+ #endif
return result;
}
/*
*** 2994,3003 ****
--- 3087,3097 ----
// Large page support
static size_t _large_page_size = 0;
void os::large_page_init() {
+ #ifndef _ALLBSD_SOURCE
if (!UseLargePages) {
UseHugeTLBFS = false;
UseSHM = false;
return;
}
*** 3013,3023 ****
--- 3107,3117 ----
}
if (LargePageSizeInBytes) {
_large_page_size = LargePageSizeInBytes;
} else {
! // large_page_size on Linux is used to round up heap size. x86 uses either
! // large_page_size on Bsd is used to round up heap size. x86 uses either
// 2M or 4M page, depending on whether PAE (Physical Address Extensions)
// mode is enabled. AMD64/EM64T uses 2M page in 64bit mode. IA64 can use
// page as large as 256M.
//
// Here we try to figure out page size by parsing /proc/meminfo and looking
*** 3056,3085 ****
--- 3150,3182 ----
}
// print a warning if any large page related flag is specified on command line
bool warn_on_failure = !FLAG_IS_DEFAULT(UseHugeTLBFS);
! const size_t default_page_size = (size_t)Linux::page_size();
! const size_t default_page_size = (size_t)Bsd::page_size();
if (_large_page_size > default_page_size) {
_page_sizes[0] = _large_page_size;
_page_sizes[1] = default_page_size;
_page_sizes[2] = 0;
}
UseHugeTLBFS = UseHugeTLBFS &&
! Linux::hugetlbfs_sanity_check(warn_on_failure, _large_page_size);
! Bsd::hugetlbfs_sanity_check(warn_on_failure, _large_page_size);
if (UseHugeTLBFS)
UseSHM = false;
UseLargePages = UseHugeTLBFS || UseSHM;
set_coredump_filter();
+ #endif
}
+ #ifndef _ALLBSD_SOURCE
#ifndef SHM_HUGETLB
#define SHM_HUGETLB 04000
#endif
+ #endif
char* os::reserve_memory_special(size_t bytes, char* req_addr, bool exec) {
// "exec" is passed in but not used. Creating the shared image for
// the code cache doesn't have an SHM_X executable permission to check.
assert(UseLargePages && UseSHM, "only for SHM large pages");
*** 3093,3113 ****
--- 3190,3214 ----
);
char msg[128];
// Create a large shared memory region to attach to based on size.
// Currently, size is the total size of the heap
+ #ifndef _ALLBSD_SOURCE
int shmid = shmget(key, bytes, SHM_HUGETLB|IPC_CREAT|SHM_R|SHM_W);
+ #else
+ int shmid = shmget(key, bytes, IPC_CREAT|SHM_R|SHM_W);
+ #endif
if (shmid == -1) {
// Possible reasons for shmget failure:
// 1. shmmax is too small for Java heap.
// > check shmmax value: cat /proc/sys/kernel/shmmax
// > increase shmmax value: echo "0xffffffff" > /proc/sys/kernel/shmmax
// 2. not enough large page memory.
// > check available large pages: cat /proc/meminfo
// > increase amount of large pages:
// echo new_value > /proc/sys/vm/nr_hugepages
! // Note 1: different Linux may use different name for this property,
! // Note 1: different Bsd may use different name for this property,
// e.g. on Redhat AS-3 it is "hugetlb_pool".
// Note 2: it's possible there's enough physical memory available but
// they are so fragmented after a long run that they can't
// coalesce into large pages. Try to reserve large pages when
// the system is still "fresh".
*** 3134,3147 ****
--- 3235,3244 ----
warning(msg);
}
return NULL;
}
if ((addr != NULL) && UseNUMAInterleaving) {
numa_make_global(addr, bytes);
}
return addr;
}
bool os::release_memory_special(char* base, size_t bytes) {
// detaching the SHM segment will also delete it, see reserve_memory_special()
*** 3183,3199 ****
--- 3280,3296 ----
// Repeatedly allocate blocks until the block is allocated at the
// right spot. Give up after max_tries. Note that reserve_memory() will
// automatically update _highest_vm_reserved_address if the call is
// successful. The variable tracks the highest memory address every reserved
// by JVM. It is used to detect heap-stack collision if running with
! // fixed-stack LinuxThreads. Because here we may attempt to reserve more
! // fixed-stack BsdThreads. Because here we may attempt to reserve more
// space than needed, it could confuse the collision detecting code. To
// solve the problem, save current _highest_vm_reserved_address and
// calculate the correct value before return.
address old_highest = _highest_vm_reserved_address;
! // Linux mmap allows caller to pass an address as hint; give it a try first,
! // Bsd mmap allows caller to pass an address as hint; give it a try first,
// if kernel honors the hint then we can return immediately.
char * addr = anon_mmap(requested_addr, bytes, false);
if (addr == requested_addr) {
return requested_addr;
}
*** 3250,3264 ****
--- 3347,3361 ----
return NULL;
}
}
size_t os::read(int fd, void *buf, unsigned int nBytes) {
! return ::read(fd, buf, nBytes);
! RESTARTABLE_RETURN_INT(::read(fd, buf, nBytes));
}
! // TODO-FIXME: reconcile Solaris' os::sleep with the linux variation.
! // Solaris uses poll(), linux uses park().
! // TODO-FIXME: reconcile Solaris' os::sleep with the bsd variation.
! // Solaris uses poll(), bsd uses park().
// Poll() is likely a better choice, assuming that Thread.interrupt()
// generates a SIGUSRx signal. Note that SIGUSR1 can interfere with
// SIGSEGV, see 4355769.
const int NANOSECS_PER_MILLISECS = 1000000;
*** 3281,3291 ****
--- 3378,3388 ----
jlong newtime = javaTimeNanos();
if (newtime - prevtime < 0) {
// time moving backwards, should only happen if no monotonic clock
// not a guarantee() because JVM should not abort on kernel/glibc bugs
! assert(!Linux::supports_monotonic_clock(), "time moving backwards");
! assert(!Bsd::supports_monotonic_clock(), "time moving backwards");
} else {
millis -= (newtime - prevtime) / NANOSECS_PER_MILLISECS;
}
if(millis <= 0) {
*** 3320,3330 ****
--- 3417,3427 ----
jlong newtime = javaTimeNanos();
if (newtime - prevtime < 0) {
// time moving backwards, should only happen if no monotonic clock
// not a guarantee() because JVM should not abort on kernel/glibc bugs
! assert(!Linux::supports_monotonic_clock(), "time moving backwards");
! assert(!Bsd::supports_monotonic_clock(), "time moving backwards");
} else {
millis -= (newtime - prevtime) / NANOSECS_PER_MILLISECS;
}
if(millis <= 0) break ;
*** 3359,3369 ****
--- 3456,3466 ----
os::YieldResult os::NakedYield() { sched_yield(); return os::YIELD_UNKNOWN ;}
void os::yield_all(int attempts) {
// Yields to all threads, including threads with lower priorities
! // Threads on Linux are all with same priority. The Solaris style
! // Threads on Bsd are all with same priority. The Solaris style
// os::yield_all() with nanosleep(1ms) is not necessary.
sched_yield();
}
// Called from the tight loops to possibly influence time-sharing heuristics
*** 3372,3384 ****
--- 3469,3481 ----
}
////////////////////////////////////////////////////////////////////////////////
// thread priority support
! // Note: Normal Linux applications are run with SCHED_OTHER policy. SCHED_OTHER
! // Note: Normal Bsd applications are run with SCHED_OTHER policy. SCHED_OTHER
// only supports dynamic priority, static priority must be zero. For real-time
! // applications, Linux supports SCHED_RR which allows static priority (1-99).
! // applications, Bsd supports SCHED_RR which allows static priority (1-99).
// However, for large multi-threaded applications, SCHED_RR is not only slower
// than SCHED_OTHER, but also very unstable (my volano tests hang hard 4 out
// of 5 runs - Sep 2005).
//
// The following code actually changes the niceness of kernel-thread/LWP. It
*** 3386,3398 ****
--- 3483,3533 ----
// not the entire user process, and user level threads are 1:1 mapped to kernel
// threads. It has always been the case, but could change in the future. For
// this reason, the code should not be used as default (ThreadPriorityPolicy=0).
// It is only used when ThreadPriorityPolicy=1 and requires root privilege.
+ #if defined(_ALLBSD_SOURCE) && !defined(__APPLE__)
int os::java_to_os_priority[MaxPriority + 1] = {
19, // 0 Entry should never be used
+ 0, // 1 MinPriority
+ 3, // 2
+ 6, // 3
+
+ 10, // 4
+ 15, // 5 NormPriority
+ 18, // 6
+
+ 21, // 7
+ 25, // 8
+ 28, // 9 NearMaxPriority
+
+ 31 // 10 MaxPriority
+ };
+ #elif defined(__APPLE__)
+ /* Using Mach high-level priority assignments */
+ int os::java_to_os_priority[MaxPriority + 1] = {
+ 0, // 0 Entry should never be used (MINPRI_USER)
+
+ 27, // 1 MinPriority
+ 28, // 2
+ 29, // 3
+
+ 30, // 4
+ 31, // 5 NormPriority (BASEPRI_DEFAULT)
+ 32, // 6
+
+ 33, // 7
+ 34, // 8
+ 35, // 9 NearMaxPriority
+
+ 36 // 10 MaxPriority
+ };
+ #else
+ int os::java_to_os_priority[MaxPriority + 1] = {
+ 19, // 0 Entry should never be used
+
4, // 1 MinPriority
3, // 2
2, // 3
1, // 4
*** 3403,3421 ****
--- 3538,3557 ----
-3, // 8
-4, // 9 NearMaxPriority
-5 // 10 MaxPriority
};
+ #endif
static int prio_init() {
if (ThreadPriorityPolicy == 1) {
// Only root can raise thread priority. Don't allow ThreadPriorityPolicy=1
// if effective uid is not root. Perhaps, a more elegant way of doing
// this is to test CAP_SYS_NICE capability, but that will require libcap.so
if (geteuid() != 0) {
if (!FLAG_IS_DEFAULT(ThreadPriorityPolicy)) {
! warning("-XX:ThreadPriorityPolicy requires root privilege on Linux");
! warning("-XX:ThreadPriorityPolicy requires root privilege on Bsd");
}
ThreadPriorityPolicy = 0;
}
}
return 0;
*** 3422,3443 ****
--- 3558,3609 ----
}
OSReturn os::set_native_priority(Thread* thread, int newpri) {
if ( !UseThreadPriorities || ThreadPriorityPolicy == 0 ) return OS_OK;
+ #ifdef __OpenBSD__
+ // OpenBSD pthread_setprio starves low priority threads
+ return OS_OK;
+ #elif defined(__FreeBSD__)
+ int ret = pthread_setprio(thread->osthread()->pthread_id(), newpri);
+ #elif defined(__APPLE__) || defined(__NetBSD__)
+ struct sched_param sp;
+ int policy;
+ pthread_t self = pthread_self();
+
+ if (pthread_getschedparam(self, &policy, &sp) != 0)
+ return OS_ERR;
+
+ sp.sched_priority = newpri;
+ if (pthread_setschedparam(self, policy, &sp) != 0)
+ return OS_ERR;
+
+ return OS_OK;
+ #else
int ret = setpriority(PRIO_PROCESS, thread->osthread()->thread_id(), newpri);
return (ret == 0) ? OS_OK : OS_ERR;
+ #endif
}
OSReturn os::get_native_priority(const Thread* const thread, int *priority_ptr) {
if ( !UseThreadPriorities || ThreadPriorityPolicy == 0 ) {
*priority_ptr = java_to_os_priority[NormPriority];
return OS_OK;
}
errno = 0;
+ #if defined(__OpenBSD__) || defined(__FreeBSD__)
+ *priority_ptr = pthread_getprio(thread->osthread()->pthread_id());
+ #elif defined(__APPLE__) || defined(__NetBSD__)
+ int policy;
+ struct sched_param sp;
+
+ pthread_getschedparam(pthread_self(), &policy, &sp);
+ *priority_ptr = sp.sched_priority;
+ #else
*priority_ptr = getpriority(PRIO_PROCESS, thread->osthread()->thread_id());
+ #endif
return (*priority_ptr != -1 || errno == 0 ? OS_OK : OS_ERR);
}
// Hint to the underlying OS that a task switch would not be good.
// Void return because it's a hint and can fail.
*** 3543,3553 ****
--- 3709,3719 ----
struct sigaction act;
char *s;
/* Get signal number to use for suspend/resume */
if ((s = ::getenv("_JAVA_SR_SIGNUM")) != 0) {
int sig = ::strtol(s, 0, 10);
- if (sig > 0 || sig < _NSIG) {
SR_signum = sig;
}
}
assert(SR_signum > SIGSEGV && SR_signum > SIGBUS,
*** 3560,3580 ****
--- 3726,3746 ----
act.sa_flags = SA_RESTART|SA_SIGINFO;
act.sa_handler = (void (*)(int)) SR_handler;
// SR_signum is blocked by default.
// 4528190 - We also need to block pthread restart signal (32 on all
! // supported Linux platforms). Note that LinuxThreads need to block
! // supported Bsd platforms). Note that BsdThreads need to block
// this signal for all threads to work properly. So we don't have
// to use hard-coded signal number when setting up the mask.
pthread_sigmask(SIG_BLOCK, NULL, &act.sa_mask);
if (sigaction(SR_signum, &act, 0) == -1) {
return -1;
}
// Save signal flag
! os::Linux::set_our_sigflags(SR_signum, act.sa_flags);
! os::Bsd::set_our_sigflags(SR_signum, act.sa_flags);
return 0;
}
static int SR_finalize() {
return 0;
*** 3689,3719 ****
--- 3855,3885 ----
//
// Note that the VM will print warnings if it detects conflicting signal
// handlers, unless invoked with the option "-XX:+AllowUserSignalHandlers".
//
extern "C" JNIEXPORT int
! JVM_handle_linux_signal(int signo, siginfo_t* siginfo,
! JVM_handle_bsd_signal(int signo, siginfo_t* siginfo,
void* ucontext, int abort_if_unrecognized);
void signalHandler(int sig, siginfo_t* info, void* uc) {
assert(info != NULL && uc != NULL, "it must be old kernel");
! JVM_handle_linux_signal(sig, info, uc, true);
! JVM_handle_bsd_signal(sig, info, uc, true);
}
// This boolean allows users to forward their own non-matching signals
! // to JVM_handle_linux_signal, harmlessly.
! bool os::Linux::signal_handlers_are_installed = false;
! // to JVM_handle_bsd_signal, harmlessly.
! bool os::Bsd::signal_handlers_are_installed = false;
// For signal-chaining
! struct sigaction os::Linux::sigact[MAXSIGNUM];
! unsigned int os::Linux::sigs = 0;
! bool os::Linux::libjsig_is_loaded = false;
! struct sigaction os::Bsd::sigact[MAXSIGNUM];
! unsigned int os::Bsd::sigs = 0;
! bool os::Bsd::libjsig_is_loaded = false;
typedef struct sigaction *(*get_signal_t)(int);
! get_signal_t os::Linux::get_signal_action = NULL;
! get_signal_t os::Bsd::get_signal_action = NULL;
! struct sigaction* os::Linux::get_chained_signal_action(int sig) {
! struct sigaction* os::Bsd::get_chained_signal_action(int sig) {
struct sigaction *actp = NULL;
if (libjsig_is_loaded) {
// Retrieve the old signal handler from libjsig
actp = (*get_signal_action)(sig);
*** 3769,3779 ****
--- 3935,3945 ----
}
// Tell jvm's signal handler the signal is taken care of.
return true;
}
! bool os::Linux::chained_handler(int sig, siginfo_t* siginfo, void* context) {
! bool os::Bsd::chained_handler(int sig, siginfo_t* siginfo, void* context) {
bool chained = false;
// signal-chaining
if (UseSignalChaining) {
struct sigaction *actp = get_chained_signal_action(sig);
if (actp != NULL) {
*** 3781,3817 ****
--- 3947,3983 ----
}
}
return chained;
}
! struct sigaction* os::Linux::get_preinstalled_handler(int sig) {
! struct sigaction* os::Bsd::get_preinstalled_handler(int sig) {
if ((( (unsigned int)1 << sig ) & sigs) != 0) {
return &sigact[sig];
}
return NULL;
}
! void os::Linux::save_preinstalled_handler(int sig, struct sigaction& oldAct) {
! void os::Bsd::save_preinstalled_handler(int sig, struct sigaction& oldAct) {
assert(sig > 0 && sig < MAXSIGNUM, "vm signal out of expected range");
sigact[sig] = oldAct;
sigs |= (unsigned int)1 << sig;
}
// for diagnostic
! int os::Linux::sigflags[MAXSIGNUM];
! int os::Bsd::sigflags[MAXSIGNUM];
! int os::Linux::get_our_sigflags(int sig) {
! int os::Bsd::get_our_sigflags(int sig) {
assert(sig > 0 && sig < MAXSIGNUM, "vm signal out of expected range");
return sigflags[sig];
}
! void os::Linux::set_our_sigflags(int sig, int flags) {
! void os::Bsd::set_our_sigflags(int sig, int flags) {
assert(sig > 0 && sig < MAXSIGNUM, "vm signal out of expected range");
sigflags[sig] = flags;
}
! void os::Linux::set_signal_handler(int sig, bool set_installed) {
! void os::Bsd::set_signal_handler(int sig, bool set_installed) {
// Check for overwrite.
struct sigaction oldAct;
sigaction(sig, (struct sigaction*)NULL, &oldAct);
void* oldhand = oldAct.sa_sigaction
*** 3857,3867 ****
--- 4023,4033 ----
}
// install signal handlers for signals that HotSpot needs to
// handle in order to support Java-level exception handling.
! void os::Linux::install_signal_handlers() {
! void os::Bsd::install_signal_handlers() {
if (!signal_handlers_are_installed) {
signal_handlers_are_installed = true;
// signal-chaining
typedef void (*signal_setting_t)();
*** 3887,3896 ****
--- 4053,4084 ----
set_signal_handler(SIGBUS, true);
set_signal_handler(SIGILL, true);
set_signal_handler(SIGFPE, true);
set_signal_handler(SIGXFSZ, true);
+ #if defined(__APPLE__)
+ // In Mac OS X 10.4, CrashReporter will write a crash log for all 'fatal' signals, including
+ // signals caught and handled by the JVM. To work around this, we reset the mach task
+ // signal handler that's placed on our process by CrashReporter. This disables
+ // CrashReporter-based reporting.
+ //
+ // This work-around is not necessary for 10.5+, as CrashReporter no longer intercedes
+ // on caught fatal signals.
+ //
+ // Additionally, gdb installs both standard BSD signal handlers, and mach exception
+ // handlers. By replacing the existing task exception handler, we disable gdb's mach
+ // exception handling, while leaving the standard BSD signal handlers functional.
+ kern_return_t kr;
+ kr = task_set_exception_ports(mach_task_self(),
+ EXC_MASK_BAD_ACCESS | EXC_MASK_ARITHMETIC,
+ MACH_PORT_NULL,
+ EXCEPTION_STATE_IDENTITY,
+ MACHINE_THREAD_STATE);
+
+ assert(kr == KERN_SUCCESS, "could not set mach task signal handler");
+ #endif
+
if (libjsig_is_loaded) {
// Tell libjsig jvm finishes setting signal handlers
(*end_signal_setting)();
}
*** 3907,3933 ****
--- 4095,4123 ----
}
}
}
}
// This is the fastest way to get thread cpu time on Linux.
+ #ifndef _ALLBSD_SOURCE
+ // This is the fastest way to get thread cpu time on Bsd.
// Returns cpu time (user+sys) for any thread, not only for current.
// POSIX compliant clocks are implemented in the kernels 2.6.16+.
// It might work on 2.6.10+ with a special kernel/glibc patch.
// For reference, please, see IEEE Std 1003.1-2004:
// http://www.unix.org/single_unix_specification
! jlong os::Linux::fast_thread_cpu_time(clockid_t clockid) {
! jlong os::Bsd::fast_thread_cpu_time(clockid_t clockid) {
struct timespec tp;
! int rc = os::Linux::clock_gettime(clockid, &tp);
! int rc = os::Bsd::clock_gettime(clockid, &tp);
assert(rc == 0, "clock_gettime is expected to return 0 code");
return (tp.tv_sec * SEC_IN_NANOSECS) + tp.tv_nsec;
}
+ #endif
/////
! // glibc on Linux platform uses non-documented flag
! // glibc on Bsd platform uses non-documented flag
// to indicate, that some special sort of signal
// trampoline is used.
// We will never set this flag, and we should
// ignore this flag in our diagnostic
#ifdef SIGNIFICANT_SIGNAL_MASK
*** 3989,4011 ****
--- 4179,4201 ----
// Check: is it our handler?
if(handler == CAST_FROM_FN_PTR(address, (sa_sigaction_t)signalHandler) ||
handler == CAST_FROM_FN_PTR(address, (sa_sigaction_t)SR_handler)) {
// It is our signal handler
// check for flags, reset system-used one!
! if((int)sa.sa_flags != os::Linux::get_our_sigflags(sig)) {
! if((int)sa.sa_flags != os::Bsd::get_our_sigflags(sig)) {
st->print(
", flags was changed from " PTR32_FORMAT ", consider using jsig library",
! os::Linux::get_our_sigflags(sig));
! os::Bsd::get_our_sigflags(sig));
}
}
st->cr();
}
#define DO_SIGNAL_CHECK(sig) \
if (!sigismember(&check_signal_done, sig)) \
! os::Linux::check_signal_handler(sig)
! os::Bsd::check_signal_handler(sig)
// This method is a periodic task to check for misbehaving JNI applications
// under CheckJNI, we can add any periodic checks here
void os::run_periodic_checks() {
*** 4039,4049 ****
--- 4229,4239 ----
typedef int (*os_sigaction_t)(int, const struct sigaction *, struct sigaction *);
static os_sigaction_t os_sigaction = NULL;
! void os::Linux::check_signal_handler(int sig) {
! void os::Bsd::check_signal_handler(int sig) {
char buf[O_BUFLEN];
address jvmHandler = NULL;
struct sigaction act;
*** 4097,4109 ****
--- 4287,4299 ----
tty->print("Warning: %s handler ", exception_name(sig, buf, O_BUFLEN));
tty->print("expected:%s", get_signal_handler_name(jvmHandler, buf, O_BUFLEN));
tty->print_cr(" found:%s", get_signal_handler_name(thisHandler, buf, O_BUFLEN));
// No need to check this sig any longer
sigaddset(&check_signal_done, sig);
! } else if(os::Linux::get_our_sigflags(sig) != 0 && (int)act.sa_flags != os::Linux::get_our_sigflags(sig)) {
! } else if(os::Bsd::get_our_sigflags(sig) != 0 && (int)act.sa_flags != os::Bsd::get_our_sigflags(sig)) {
tty->print("Warning: %s handler flags ", exception_name(sig, buf, O_BUFLEN));
! tty->print("expected:" PTR32_FORMAT, os::Linux::get_our_sigflags(sig));
! tty->print("expected:" PTR32_FORMAT, os::Bsd::get_our_sigflags(sig));
tty->print_cr(" found:" PTR32_FORMAT, act.sa_flags);
// No need to check this sig any longer
sigaddset(&check_signal_done, sig);
}
*** 4132,4172 ****
--- 4322,4371 ----
// this is called _before_ the most of global arguments have been parsed
void os::init(void) {
char dummy; /* used to get a guess on initial stack address */
// first_hrtime = gethrtime();
! // With LinuxThreads the JavaMain thread pid (primordial thread)
! // With BsdThreads the JavaMain thread pid (primordial thread)
// is different than the pid of the java launcher thread.
! // So, on Linux, the launcher thread pid is passed to the VM
! // So, on Bsd, the launcher thread pid is passed to the VM
// via the sun.java.launcher.pid property.
// Use this property instead of getpid() if it was correctly passed.
// See bug 6351349.
pid_t java_launcher_pid = (pid_t) Arguments::sun_java_launcher_pid();
_initial_pid = (java_launcher_pid > 0) ? java_launcher_pid : getpid();
! clock_tics_per_sec = sysconf(_SC_CLK_TCK);
init_random(1234567);
ThreadCritical::initialize();
! Linux::set_page_size(sysconf(_SC_PAGESIZE));
! if (Linux::page_size() == -1) {
! fatal(err_msg("os_linux.cpp: os::init: sysconf failed (%s)",
! Bsd::set_page_size(getpagesize());
! if (Bsd::page_size() == -1) {
! fatal(err_msg("os_bsd.cpp: os::init: sysconf failed (%s)",
strerror(errno)));
}
! init_page_sizes((size_t) Linux::page_size());
! init_page_sizes((size_t) Bsd::page_size());
! Linux::initialize_system_info();
! Bsd::initialize_system_info();
// main_thread points to the aboriginal thread
! Linux::_main_thread = pthread_self();
! Bsd::_main_thread = pthread_self();
! Linux::clock_init();
! Bsd::clock_init();
initial_time_count = os::elapsed_counter();
pthread_mutex_init(&dl_mutex, NULL);
+
+ #ifdef __APPLE__
+ // XXXDARWIN
+ // Work around the unaligned VM callbacks in hotspot's
+ // sharedRuntime. The callbacks don't use SSE2 instructions, and work on
+ // Linux, Solaris, and FreeBSD. On Mac OS X, dyld (rightly so) enforces
+ // alignment when doing symbol lookup. To work around this, we force early
+ // binding of all symbols now, thus binding when alignment is known-good.
+ _dyld_bind_fully_image_containing_address((const void *) &os::init);
+ #endif
}
// To install functions for atexit system call
extern "C" {
static void perfMemory_exit_helper() {
*** 4175,4188 ****
--- 4374,4389 ----
}
// this is called _after_ the global arguments have been parsed
jint os::init_2(void)
{
Linux::fast_thread_clock_init();
+ #ifndef _ALLBSD_SOURCE
+ Bsd::fast_thread_clock_init();
+ #endif
// Allocate a single page and mark it as readable for safepoint polling
! address polling_page = (address) ::mmap(NULL, Linux::page_size(), PROT_READ, MAP_PRIVATE|MAP_ANONYMOUS, -1, 0);
! address polling_page = (address) ::mmap(NULL, Bsd::page_size(), PROT_READ, MAP_PRIVATE|MAP_ANONYMOUS, -1, 0);
guarantee( polling_page != MAP_FAILED, "os::init_2: failed to allocate polling page" );
os::set_polling_page( polling_page );
#ifndef PRODUCT
*** 4189,4199 ****
--- 4390,4400 ----
if(Verbose && PrintMiscellaneous)
tty->print("[SafePoint Polling address: " INTPTR_FORMAT "]\n", (intptr_t)polling_page);
#endif
if (!UseMembar) {
! address mem_serialize_page = (address) ::mmap(NULL, Linux::page_size(), PROT_READ | PROT_WRITE, MAP_PRIVATE|MAP_ANONYMOUS, -1, 0);
! address mem_serialize_page = (address) ::mmap(NULL, Bsd::page_size(), PROT_READ | PROT_WRITE, MAP_PRIVATE|MAP_ANONYMOUS, -1, 0);
guarantee( mem_serialize_page != NULL, "mmap Failed for memory serialize page");
os::set_memory_serialize_page( mem_serialize_page );
#ifndef PRODUCT
if(Verbose && PrintMiscellaneous)
*** 4207,4256 ****
--- 4408,4458 ----
if (SR_initialize() != 0) {
perror("SR_initialize failed");
return JNI_ERR;
}
! Linux::signal_sets_init();
! Linux::install_signal_handlers();
! Bsd::signal_sets_init();
! Bsd::install_signal_handlers();
// Check minimum allowable stack size for thread creation and to initialize
// the java system classes, including StackOverflowError - depends on page
// size. Add a page for compiler2 recursion in main thread.
// Add in 2*BytesPerWord times page size to account for VM stack during
// class initialization depending on 32 or 64 bit VM.
! os::Linux::min_stack_allowed = MAX2(os::Linux::min_stack_allowed,
! os::Bsd::min_stack_allowed = MAX2(os::Bsd::min_stack_allowed,
(size_t)(StackYellowPages+StackRedPages+StackShadowPages+
! 2*BytesPerWord COMPILER2_PRESENT(+1)) * Linux::page_size());
! 2*BytesPerWord COMPILER2_PRESENT(+1)) * Bsd::page_size());
size_t threadStackSizeInBytes = ThreadStackSize * K;
if (threadStackSizeInBytes != 0 &&
! threadStackSizeInBytes < os::Linux::min_stack_allowed) {
! threadStackSizeInBytes < os::Bsd::min_stack_allowed) {
tty->print_cr("\nThe stack size specified is too small, "
"Specify at least %dk",
! os::Linux::min_stack_allowed/ K);
! os::Bsd::min_stack_allowed/ K);
return JNI_ERR;
}
// Make the stack size a multiple of the page size so that
// the yellow/red zones can be guarded.
JavaThread::set_stack_size_at_create(round_to(threadStackSizeInBytes,
vm_page_size()));
Linux::capture_initial_stack(JavaThread::stack_size_at_create());
+ #ifndef _ALLBSD_SOURCE
+ Bsd::capture_initial_stack(JavaThread::stack_size_at_create());
! Linux::libpthread_init();
! Bsd::libpthread_init();
if (PrintMiscellaneous && (Verbose || WizardMode)) {
tty->print_cr("[HotSpot is running with %s, %s(%s)]\n",
! Linux::glibc_version(), Linux::libpthread_version(),
! Linux::is_floating_stack() ? "floating stack" : "fixed stack");
! Bsd::glibc_version(), Bsd::libpthread_version(),
! Bsd::is_floating_stack() ? "floating stack" : "fixed stack");
}
if (UseNUMA) {
! if (!Linux::libnuma_init()) {
! if (!Bsd::libnuma_init()) {
UseNUMA = false;
} else {
! if ((Linux::numa_max_node() < 1)) {
! if ((Bsd::numa_max_node() < 1)) {
// There's only one node(they start from 0), disable NUMA.
UseNUMA = false;
}
}
// With SHM large pages we cannot uncommit a page, so there's not way
*** 4272,4281 ****
--- 4474,4484 ----
}
if (!UseNUMA && ForceNUMA) {
UseNUMA = true;
}
}
+ #endif
if (MaxFDLimit) {
// set the number of file descriptors to max. print out error
// if getrlimit/setrlimit fails but continue regardless.
struct rlimit nbr_files;
*** 4283,4302 ****
--- 4486,4515 ----
if (status != 0) {
if (PrintMiscellaneous && (Verbose || WizardMode))
perror("os::init_2 getrlimit failed");
} else {
nbr_files.rlim_cur = nbr_files.rlim_max;
+
+ #ifdef __APPLE__
+ // Darwin returns RLIM_INFINITY for rlim_max, but fails with EINVAL if
+ // you attempt to use RLIM_INFINITY. As per setrlimit(2), OPEN_MAX must
+ // be used instead
+ nbr_files.rlim_cur = MIN(OPEN_MAX, nbr_files.rlim_cur);
+ #endif
+
status = setrlimit(RLIMIT_NOFILE, &nbr_files);
if (status != 0) {
if (PrintMiscellaneous && (Verbose || WizardMode))
perror("os::init_2 setrlimit failed");
}
}
}
+ #ifndef _ALLBSD_SOURCE
// Initialize lock used to serialize thread creation (see os::create_thread)
! Linux::set_createThread_lock(new Mutex(Mutex::leaf, "createThread_lock", false));
! Bsd::set_createThread_lock(new Mutex(Mutex::leaf, "createThread_lock", false));
+ #endif
// at-exit methods are called in the reverse order of their registration.
// atexit functions are called on return from main or as a result of a
// call to exit(3C). There can be only 32 of these functions registered
// and atexit() does not set errno.
*** 4320,4359 ****
--- 4533,4567 ----
return JNI_OK;
}
// this is called at the end of vm_initialization
! void os::init_3(void) { }
{
#ifdef JAVASE_EMBEDDED
// Start the MemNotifyThread
if (LowMemoryProtection) {
MemNotifyThread::start();
}
return;
#endif
}
// Mark the polling page as unreadable
void os::make_polling_page_unreadable(void) {
! if( !guard_memory((char*)_polling_page, Linux::page_size()) )
! if( !guard_memory((char*)_polling_page, Bsd::page_size()) )
fatal("Could not disable polling page");
};
// Mark the polling page as readable
void os::make_polling_page_readable(void) {
! if( !linux_mprotect((char *)_polling_page, Linux::page_size(), PROT_READ)) {
! if( !bsd_mprotect((char *)_polling_page, Bsd::page_size(), PROT_READ)) {
fatal("Could not enable polling page");
}
};
int os::active_processor_count() {
// Linux doesn't yet have a (official) notion of processor sets,
+ #ifdef _ALLBSD_SOURCE
+ return _processor_count;
+ #else
+ // Bsd doesn't yet have a (official) notion of processor sets,
// so just return the number of online processors.
int online_cpus = ::sysconf(_SC_NPROCESSORS_ONLN);
assert(online_cpus > 0 && online_cpus <= processor_count(), "sanity check");
return online_cpus;
+ #endif
}
bool os::distribute_processes(uint length, uint* distribution) {
// Not yet implemented.
return false;
*** 4376,4386 ****
--- 4584,4594 ----
ExtendedPC epc;
OSThread* osthread = thread->osthread();
if (do_suspend(osthread)) {
if (osthread->ucontext() != NULL) {
! epc = os::Linux::ucontext_get_pc(osthread->ucontext());
! epc = os::Bsd::ucontext_get_pc(osthread->ucontext());
} else {
// NULL context is unexpected, double-check this is the VMThread
guarantee(thread->is_VM_thread(), "can only be called for VMThread");
}
do_resume(osthread);
*** 4389,4405 ****
--- 4597,4616 ----
// a fatal problem, but such problems are ignored elsewhere
return epc;
}
! int os::Linux::safe_cond_timedwait(pthread_cond_t *_cond, pthread_mutex_t *_mutex, const struct timespec *_abstime)
! int os::Bsd::safe_cond_timedwait(pthread_cond_t *_cond, pthread_mutex_t *_mutex, const struct timespec *_abstime)
{
+ #ifdef _ALLBSD_SOURCE
+ return pthread_cond_timedwait(_cond, _mutex, _abstime);
+ #else
if (is_NPTL()) {
return pthread_cond_timedwait(_cond, _mutex, _abstime);
} else {
#ifndef IA64
! // 6292965: LinuxThreads pthread_cond_timedwait() resets FPU control
! // 6292965: BsdThreads pthread_cond_timedwait() resets FPU control
// word back to default 64bit precision if condvar is signaled. Java
// wants 53bit precision. Save and restore current value.
int fpu = get_fpu_control_word();
#endif // IA64
int status = pthread_cond_timedwait(_cond, _mutex, _abstime);
*** 4406,4415 ****
--- 4617,4627 ----
#ifndef IA64
set_fpu_control_word(fpu);
#endif // IA64
return status;
}
+ #endif
}
////////////////////////////////////////////////////////////////////////////////
// debug support
*** 4463,4473 ****
--- 4675,4685 ----
}
////////////////////////////////////////////////////////////////////////////////
// misc
! // This does not do anything on Linux. This is basically a hook for being
! // This does not do anything on Bsd. This is basically a hook for being
// able to use structured exception handling (thread-local exception filters)
// on, e.g., Win32.
void
os::os_exception_wrapper(java_call_t f, JavaValue* value, methodHandle* method,
JavaCallArguments* args, Thread* thread) {
*** 4553,4570 ****
--- 4765,4782 ----
}
int fd;
int o_delete = (oflag & O_DELETE);
oflag = oflag & ~O_DELETE;
- fd = ::open64(path, oflag, mode);
if (fd == -1) return -1;
//If the open succeeded, the file might still be a directory
{
! struct stat64 buf64;
! int ret = ::fstat64(fd, &buf64);
- int st_mode = buf64.st_mode;
! int ret = ::fstat(fd, &buf);
! int st_mode = buf.st_mode;
if (ret != -1) {
if ((st_mode & S_IFMT) == S_IFDIR) {
errno = EISDIR;
::close(fd);
*** 4617,4649 ****
--- 4829,4861 ----
int os::create_binary_file(const char* path, bool rewrite_existing) {
int oflags = O_WRONLY | O_CREAT;
if (!rewrite_existing) {
oflags |= O_EXCL;
}
- return ::open64(path, oflags, S_IREAD | S_IWRITE);
}
// return current position of file pointer
jlong os::current_file_offset(int fd) {
! return (jlong)::lseek64(fd, (off64_t)0, SEEK_CUR);
! return (jlong)::lseek(fd, (off_t)0, SEEK_CUR);
}
// move file pointer to the specified offset
jlong os::seek_to_file_offset(int fd, jlong offset) {
! return (jlong)::lseek64(fd, (off64_t)offset, SEEK_SET);
! return (jlong)::lseek(fd, (off_t)offset, SEEK_SET);
}
// This code originates from JDK's sysAvailable
// from src/solaris/hpi/src/native_threads/src/sys_api_td.c
int os::available(int fd, jlong *bytes) {
jlong cur, end;
int mode;
! struct stat64 buf64;
! if (::fstat64(fd, &buf64) >= 0) {
- mode = buf64.st_mode;
! mode = buf.st_mode;
if (S_ISCHR(mode) || S_ISFIFO(mode) || S_ISSOCK(mode)) {
/*
* XXX: is the following call interruptible? If so, this might
* need to go through the INTERRUPT_IO() wrapper as for other
* blocking, interruptible calls in this file.
*** 4653,4680 ****
--- 4865,4897 ----
*bytes = n;
return 1;
}
}
}
- if ((cur = ::lseek64(fd, 0L, SEEK_CUR)) == -1) {
return 0;
- } else if ((end = ::lseek64(fd, 0L, SEEK_END)) == -1) {
return 0;
- } else if (::lseek64(fd, cur, SEEK_SET) == -1) {
return 0;
}
*bytes = end - cur;
return 1;
}
int os::socket_available(int fd, jint *pbytes) {
// Linux doc says EINTR not returned, unlike Solaris
! int ret = ::ioctl(fd, FIONREAD, pbytes);
+ if (fd < 0)
! return OS_OK;
+ int ret;
+
+ RESTARTABLE(::ioctl(fd, FIONREAD, pbytes), ret);
+
//%% note ioctl can return 0 when successful, JVM_SocketAvailable
// is expected to return 0 on failure and 1 on success to the jdk.
return (ret < 0) ? 0 : 1;
+
+ return (ret == OS_ERR) ? 0 : 1;
}
// Map a block of memory.
char* os::map_memory(int fd, const char* file_name, size_t file_offset,
char *addr, size_t bytes, bool read_only,
*** 4720,4782 ****
--- 4937,5033 ----
// Unmap a block of memory.
bool os::unmap_memory(char* addr, size_t bytes) {
return munmap(addr, bytes) == 0;
}
+ #ifndef _ALLBSD_SOURCE
static jlong slow_thread_cpu_time(Thread *thread, bool user_sys_cpu_time);
static clockid_t thread_cpu_clockid(Thread* thread) {
pthread_t tid = thread->osthread()->pthread_id();
clockid_t clockid;
// Get thread clockid
! int rc = os::Linux::pthread_getcpuclockid(tid, &clockid);
! int rc = os::Bsd::pthread_getcpuclockid(tid, &clockid);
assert(rc == 0, "pthread_getcpuclockid is expected to return 0 code");
return clockid;
}
+ #endif
// current_thread_cpu_time(bool) and thread_cpu_time(Thread*, bool)
// are used by JVM M&M and JVMTI to get user+sys or user CPU time
// of a thread.
//
// current_thread_cpu_time() and thread_cpu_time(Thread*) returns
// the fast estimate available on the platform.
jlong os::current_thread_cpu_time() {
if (os::Linux::supports_fast_thread_cpu_time()) {
! return os::Linux::fast_thread_cpu_time(CLOCK_THREAD_CPUTIME_ID);
+ #ifdef __APPLE__
! return os::thread_cpu_time(Thread::current(), true /* user + sys */);
+ #elif !defined(_ALLBSD_SOURCE)
+ if (os::Bsd::supports_fast_thread_cpu_time()) {
+ return os::Bsd::fast_thread_cpu_time(CLOCK_THREAD_CPUTIME_ID);
} else {
// return user + sys since the cost is the same
return slow_thread_cpu_time(Thread::current(), true /* user + sys */);
}
+ #endif
}
jlong os::thread_cpu_time(Thread* thread) {
+ #ifndef _ALLBSD_SOURCE
// consistent with what current_thread_cpu_time() returns
! if (os::Linux::supports_fast_thread_cpu_time()) {
! return os::Linux::fast_thread_cpu_time(thread_cpu_clockid(thread));
! if (os::Bsd::supports_fast_thread_cpu_time()) {
! return os::Bsd::fast_thread_cpu_time(thread_cpu_clockid(thread));
} else {
return slow_thread_cpu_time(thread, true /* user + sys */);
}
+ #endif
}
jlong os::current_thread_cpu_time(bool user_sys_cpu_time) {
if (user_sys_cpu_time && os::Linux::supports_fast_thread_cpu_time()) {
! return os::Linux::fast_thread_cpu_time(CLOCK_THREAD_CPUTIME_ID);
+ #ifdef __APPLE__
! return os::thread_cpu_time(Thread::current(), user_sys_cpu_time);
+ #elif !defined(_ALLBSD_SOURCE)
+ if (user_sys_cpu_time && os::Bsd::supports_fast_thread_cpu_time()) {
+ return os::Bsd::fast_thread_cpu_time(CLOCK_THREAD_CPUTIME_ID);
} else {
return slow_thread_cpu_time(Thread::current(), user_sys_cpu_time);
}
+ #endif
}
jlong os::thread_cpu_time(Thread *thread, bool user_sys_cpu_time) {
if (user_sys_cpu_time && os::Linux::supports_fast_thread_cpu_time()) {
! return os::Linux::fast_thread_cpu_time(thread_cpu_clockid(thread));
+ #ifdef __APPLE__
! struct thread_basic_info tinfo;
+ mach_msg_type_number_t tcount = THREAD_INFO_MAX;
+ kern_return_t kr;
+ mach_port_t mach_thread;
+
+ mach_thread = pthread_mach_thread_np(thread->osthread()->thread_id());
+ kr = thread_info(mach_thread, THREAD_BASIC_INFO, (thread_info_t)&tinfo, &tcount);
+ if (kr != KERN_SUCCESS)
+ return -1;
+
+ if (user_sys_cpu_time) {
+ jlong nanos;
+ nanos = ((jlong) tinfo.system_time.seconds + tinfo.user_time.seconds) * (jlong)1000000000;
+ nanos += ((jlong) tinfo.system_time.microseconds + (jlong) tinfo.user_time.microseconds) * (jlong)1000;
+ return nanos;
} else {
+ return ((jlong)tinfo.user_time.seconds * 1000000000) + ((jlong)tinfo.user_time.microseconds * (jlong)1000);
+ }
+ #elif !defined(_ALLBSD_SOURCE)
+ if (user_sys_cpu_time && os::Bsd::supports_fast_thread_cpu_time()) {
+ return os::Bsd::fast_thread_cpu_time(thread_cpu_clockid(thread));
+ } else {
return slow_thread_cpu_time(thread, user_sys_cpu_time);
}
+ #endif
}
+ #ifndef _ALLBSD_SOURCE
//
// -1 on error.
//
static jlong slow_thread_cpu_time(Thread *thread, bool user_sys_cpu_time) {
*** 4796,4806 ****
--- 5047,5057 ----
int idummy;
long ldummy;
FILE *fp;
// We first try accessing /proc/<pid>/cpu since this is faster to
! // process. If this file is not present (linux kernels 2.5 and above)
! // process. If this file is not present (bsd kernels 2.5 and above)
// then we open /proc/<pid>/stat.
if ( proc_pid_cpu_avail ) {
sprintf(proc_name, "/proc/%d/cpu", tid);
fp = fopen(proc_name, "r");
if ( fp != NULL ) {
*** 4816,4831 ****
--- 5067,5082 ----
}
else proc_pid_cpu_avail = false;
}
// The /proc/<tid>/stat aggregates per-process usage on
! // new Linux kernels 2.6+ where NPTL is supported.
! // new Bsd kernels 2.6+ where NPTL is supported.
// The /proc/self/task/<tid>/stat still has the per-thread usage.
// See bug 6328462.
// There can be no directory /proc/self/task on kernels 2.4 with NPTL
// and possibly in some other cases, so we check its availability.
! if (proc_task_unchecked && os::Linux::is_NPTL()) {
! if (proc_task_unchecked && os::Bsd::is_NPTL()) {
// This is executed only once
proc_task_unchecked = false;
fp = fopen("/proc/self/task", "r");
if (fp != NULL) {
proc_stat_path = "/proc/self/task/%d/stat";
*** 4862,4871 ****
--- 5113,5123 ----
return ((jlong)sys_time + (jlong)user_time) * (1000000000 / clock_tics_per_sec);
} else {
return (jlong)user_time * (1000000000 / clock_tics_per_sec);
}
}
+ #endif
void os::current_thread_cpu_time_info(jvmtiTimerInfo *info_ptr) {
info_ptr->max_value = ALL_64_BITS; // will not wrap in less than 64 bits
info_ptr->may_skip_backward = false; // elapsed time not wall time
info_ptr->may_skip_forward = false; // elapsed time not wall time
*** 4878,4892 ****
--- 5130,5150 ----
info_ptr->may_skip_forward = false; // elapsed time not wall time
info_ptr->kind = JVMTI_TIMER_TOTAL_CPU; // user+system time is returned
}
bool os::is_thread_cpu_time_supported() {
+ #ifdef __APPLE__
return true;
+ #elif defined(_ALLBSD_SOURCE)
+ return false;
+ #else
+ return true;
+ #endif
}
// System loadavg support. Returns -1 if load average cannot be obtained.
! // Linux doesn't yet have a (official) notion of processor sets,
! // Bsd doesn't yet have a (official) notion of processor sets,
// so just return the system wide load average.
int os::loadavg(double loadavg[], int nelem) {
return ::getloadavg(loadavg, nelem);
}
*** 4973,4983 ****
--- 5231,5241 ----
// utility to compute the abstime argument to timedwait:
// millis is the relative timeout time
// abstime will be the absolute timeout time
// TODO: replace compute_abstime() with unpackTime()
! static struct timespec* compute_abstime(struct timespec* abstime, jlong millis) {
if (millis < 0) millis = 0;
struct timeval now;
int status = gettimeofday(&now, NULL);
assert(status == 0, "gettimeofday");
jlong seconds = millis / 1000;
*** 5025,5035 ****
--- 5283,5293 ----
++ _nParked ;
while (_Event < 0) {
status = pthread_cond_wait(_cond, _mutex);
// for some reason, under 2.7 lwp_cond_wait() may return ETIME ...
// Treat this the same as if the wait was interrupted
! if (status == ETIMEDOUT) { status = EINTR; }
assert_status(status == 0 || status == EINTR, status, "cond_wait");
}
-- _nParked ;
// In theory we could move the ST of 0 into _Event past the unlock(),
*** 5077,5096 ****
--- 5335,5354 ----
//
// TODO: properly differentiate simultaneous notify+interrupt.
// In that case, we should propagate the notify to another waiter.
while (_Event < 0) {
! status = os::Linux::safe_cond_timedwait(_cond, _mutex, &abst);
! status = os::Bsd::safe_cond_timedwait(_cond, _mutex, &abst);
if (status != 0 && WorkAroundNPTLTimedWaitHang) {
pthread_cond_destroy (_cond);
pthread_cond_init (_cond, NULL) ;
}
assert_status(status == 0 || status == EINTR ||
- status == ETIME || status == ETIMEDOUT,
status, "cond_timedwait");
if (!FilterSpuriousWakeups) break ; // previous semantics
- if (status == ETIME || status == ETIMEDOUT) break ;
// We consume and ignore EINTR and spurious wakeups.
}
--_nParked ;
if (_Event >= 0) {
ret = OS_OK;
*** 5145,5155 ****
--- 5403,5413 ----
// JSR166
// -------------------------------------------------------
/*
! * The solaris and linux implementations of park/unpark are fairly
! * The solaris and bsd implementations of park/unpark are fairly
* conservative for now, but can be improved. They currently use a
* mutex/condvar pair, plus a a count.
* Park decrements count if > 0, else does a condvar wait. Unpark
* sets count to 1 and signals condvar. Only one thread ever waits
* on the condvar. Contention seen when trying to park implies that someone
*** 5160,5188 ****
--- 5418,5446 ----
#define NANOSECS_PER_SEC 1000000000
#define NANOSECS_PER_MILLISEC 1000000
#define MAX_SECS 100000000
/*
! * This code is common to linux and solaris and will be moved to a
! * This code is common to bsd and solaris and will be moved to a
* common place in dolphin.
*
* The passed in time value is either a relative time in nanoseconds
* or an absolute time in milliseconds. Either way it has to be unpacked
* into suitable seconds and nanoseconds components and stored in the
* given timespec structure.
* Given time is a 64-bit value and the time_t used in the timespec is only
! * a signed-32-bit value (except on 64-bit Linux) we have to watch for
! * a signed-32-bit value (except on 64-bit Bsd) we have to watch for
* overflow if times way in the future are given. Further on Solaris versions
* prior to 10 there is a restriction (see cond_timedwait) that the specified
* number of seconds, in abstime, is less than current_time + 100,000,000.
* As it will be 28 years before "now + 100000000" will overflow we can
* ignore overflow and just impose a hard-limit on seconds using the value
* of "now + 100,000,000". This places a limit on the timeout of about 3.17
* years from "now".
*/
! static void unpackTime(struct timespec* absTime, bool isAbsolute, jlong time) {
assert (time > 0, "convertTime");
struct timeval now;
int status = gettimeofday(&now, NULL);
assert(status == 0, "gettimeofday");
*** 5238,5248 ****
--- 5496,5506 ----
if (Thread::is_interrupted(thread, false)) {
return;
}
// Next, demultiplex/decode time arguments
! struct timespec absTime;
if (time < 0 || (isAbsolute && time == 0) ) { // don't wait at all
return;
}
if (time > 0) {
unpackTime(&absTime, isAbsolute, time);
*** 5274,5284 ****
--- 5532,5542 ----
#ifdef ASSERT
// Don't catch signals while blocked; let the running threads have the signals.
// (This allows a debugger to break into the running thread.)
sigset_t oldsigs;
! sigset_t* allowdebug_blocked = os::Linux::allowdebug_blocked_signals();
! sigset_t* allowdebug_blocked = os::Bsd::allowdebug_blocked_signals();
pthread_sigmask(SIG_BLOCK, allowdebug_blocked, &oldsigs);
#endif
OSThreadWaitState osts(thread->osthread(), false /* not Object.wait() */);
jt->set_suspend_equivalent();
*** 5285,5302 ****
--- 5543,5560 ----
// cleared by handle_special_suspend_equivalent_condition() or java_suspend_self()
if (time == 0) {
status = pthread_cond_wait (_cond, _mutex) ;
} else {
! status = os::Linux::safe_cond_timedwait (_cond, _mutex, &absTime) ;
! status = os::Bsd::safe_cond_timedwait (_cond, _mutex, &absTime) ;
if (status != 0 && WorkAroundNPTLTimedWaitHang) {
pthread_cond_destroy (_cond) ;
pthread_cond_init (_cond, NULL);
}
}
assert_status(status == 0 || status == EINTR ||
- status == ETIME || status == ETIMEDOUT,
status, "cond_timedwait");
#ifdef ASSERT
pthread_sigmask(SIG_SETMASK, &oldsigs, NULL);
#endif
*** 5335,5385 ****
--- 5593,5639 ----
assert (status == 0, "invariant") ;
}
}
+ /* Darwin has no "environ" in a dynamic library. */
+ #ifdef __APPLE__
+ #include <crt_externs.h>
+ #define environ (*_NSGetEnviron())
+ #else
extern char** environ;
#ifndef __NR_fork
#define __NR_fork IA32_ONLY(2) IA64_ONLY(not defined) AMD64_ONLY(57)
#endif
#ifndef __NR_execve
#define __NR_execve IA32_ONLY(11) IA64_ONLY(1033) AMD64_ONLY(59)
#endif
// Run the specified command in a separate process. Return its exit value,
// or -1 on failure (e.g. can't fork a new process).
// Unlike system(), this function can be called from signal handler. It
// doesn't block SIGINT et al.
int os::fork_and_exec(char* cmd) {
const char * argv[4] = {"sh", "-c", cmd, NULL};
! // fork() in LinuxThreads/NPTL is not async-safe. It needs to run
! // fork() in BsdThreads/NPTL is not async-safe. It needs to run
// pthread_atfork handlers and reset pthread library. All we need is a
// separate process to execve. Make a direct syscall to fork process.
// On IA64 there's no fork syscall, we have to use fork() and hope for
// the best...
! pid_t pid = NOT_IA64(syscall(__NR_fork);)
IA64_ONLY(fork();)
! pid_t pid = fork();
if (pid < 0) {
// fork failed
return -1;
} else if (pid == 0) {
// child process
! // execve() in LinuxThreads will call pthread_kill_other_threads_np()
! // execve() in BsdThreads will call pthread_kill_other_threads_np()
// first to kill every thread on the thread list. Because this list is
// not reset by fork() (see notes above), execve() will instead kill
// every thread in the parent process. We know this is the only thread
// in the new process, so make a system call directly.
// IA64 should use normal execve() from glibc to match the glibc fork()
// above.
NOT_IA64(syscall(__NR_execve, "/bin/sh", argv, environ);)
IA64_ONLY(execve("/bin/sh", (char* const*)argv, environ);)
+ execve("/bin/sh", (char* const*)argv, environ);
// execve failed
_exit(-1);
} else {
*** 5451,5531 ****
--- 5705,5709 ----
strcat(libmawtpath, motifstr);
if (::stat(libmawtpath, &statbuf) == 0) return false;
return true;
}
#ifdef JAVASE_EMBEDDED
//
// A thread to watch the '/dev/mem_notify' device, which will tell us when the OS is running low on memory.
//
MemNotifyThread* MemNotifyThread::_memnotify_thread = NULL;
// ctor
//
MemNotifyThread::MemNotifyThread(int fd): Thread() {
assert(memnotify_thread() == NULL, "we can only allocate one MemNotifyThread");
_fd = fd;
if (os::create_thread(this, os::os_thread)) {
_memnotify_thread = this;
os::set_priority(this, NearMaxPriority);
os::start_thread(this);
}
}
// Where all the work gets done
//
void MemNotifyThread::run() {
assert(this == memnotify_thread(), "expected the singleton MemNotifyThread");
// Set up the select arguments
fd_set rfds;
if (_fd != -1) {
FD_ZERO(&rfds);
FD_SET(_fd, &rfds);
}
// Now wait for the mem_notify device to wake up
while (1) {
// Wait for the mem_notify device to signal us..
int rc = select(_fd+1, _fd != -1 ? &rfds : NULL, NULL, NULL, NULL);
if (rc == -1) {
perror("select!\n");
break;
} else if (rc) {
//ssize_t free_before = os::available_memory();
//tty->print ("Notified: Free: %dK \n",os::available_memory()/1024);
// The kernel is telling us there is not much memory left...
// try to do something about that
// If we are not already in a GC, try one.
if (!Universe::heap()->is_gc_active()) {
Universe::heap()->collect(GCCause::_allocation_failure);
//ssize_t free_after = os::available_memory();
//tty->print ("Post-Notify: Free: %dK\n",free_after/1024);
//tty->print ("GC freed: %dK\n", (free_after - free_before)/1024);
}
// We might want to do something like the following if we find the GC's are not helping...
// Universe::heap()->size_policy()->set_gc_time_limit_exceeded(true);
}
}
}
//
// See if the /dev/mem_notify device exists, and if so, start a thread to monitor it.
//
void MemNotifyThread::start() {
int fd;
fd = open ("/dev/mem_notify", O_RDONLY, 0);
if (fd < 0) {
return;
}
if (memnotify_thread() == NULL) {
new MemNotifyThread(fd);
}
}
#endif // JAVASE_EMBEDDED
src/os/bsd/vm/os_bsd.cpp
Index
Unified diffs
Context diffs
Sdiffs
Patch
New
Old
Previous File
Next File