< prev index next >

src/hotspot/os/bsd/os_bsd.cpp

Print this page




 913   struct timespec tp;
 914   if (::clock_getres(CLOCK_MONOTONIC, &res) == 0 &&
 915       ::clock_gettime(CLOCK_MONOTONIC, &tp)  == 0) {
 916     // yes, monotonic clock is supported
 917     _clock_gettime = ::clock_gettime;
 918   }
 919 }
 920 #endif
 921 
 922 
 923 
 924 #ifdef __APPLE__
 925 
 926 jlong os::javaTimeNanos() {
 927   const uint64_t tm = mach_absolute_time();
 928   const uint64_t now = (tm * Bsd::_timebase_info.numer) / Bsd::_timebase_info.denom;
 929   const uint64_t prev = Bsd::_max_abstime;
 930   if (now <= prev) {
 931     return prev;   // same or retrograde time;
 932   }
 933   const uint64_t obsv = Atomic::cmpxchg(now, &Bsd::_max_abstime, prev);
 934   assert(obsv >= prev, "invariant");   // Monotonicity
 935   // If the CAS succeeded then we're done and return "now".
 936   // If the CAS failed and the observed value "obsv" is >= now then
 937   // we should return "obsv".  If the CAS failed and now > obsv > prv then
 938   // some other thread raced this thread and installed a new value, in which case
 939   // we could either (a) retry the entire operation, (b) retry trying to install now
 940   // or (c) just return obsv.  We use (c).   No loop is required although in some cases
 941   // we might discard a higher "now" value in deference to a slightly lower but freshly
 942   // installed obsv value.   That's entirely benign -- it admits no new orderings compared
 943   // to (a) or (b) -- and greatly reduces coherence traffic.
 944   // We might also condition (c) on the magnitude of the delta between obsv and now.
 945   // Avoiding excessive CAS operations to hot RW locations is critical.
 946   // See https://blogs.oracle.com/dave/entry/cas_and_cache_trivia_invalidate
 947   return (prev == obsv) ? now : obsv;
 948 }
 949 
 950 #else // __APPLE__
 951 
 952 jlong os::javaTimeNanos() {
 953   if (os::supports_monotonic_clock()) {


1816 
1817   // Initialize signal semaphore
1818   sig_sem = new Semaphore();
1819 }
1820 
1821 void os::signal_notify(int sig) {
1822   if (sig_sem != NULL) {
1823     Atomic::inc(&pending_signals[sig]);
1824     sig_sem->signal();
1825   } else {
1826     // Signal thread is not created with ReduceSignalUsage and jdk_misc_signal_init
1827     // initialization isn't called.
1828     assert(ReduceSignalUsage, "signal semaphore should be created");
1829   }
1830 }
1831 
1832 static int check_pending_signals() {
1833   for (;;) {
1834     for (int i = 0; i < NSIG + 1; i++) {
1835       jint n = pending_signals[i];
1836       if (n > 0 && n == Atomic::cmpxchg(n - 1, &pending_signals[i], n)) {
1837         return i;
1838       }
1839     }
1840     JavaThread *thread = JavaThread::current();
1841     ThreadBlockInVM tbivm(thread);
1842 
1843     bool threadIsSuspended;
1844     do {
1845       thread->set_suspend_equivalent();
1846       // cleared by handle_special_suspend_equivalent_condition() or java_suspend_self()
1847       sig_sem->wait();
1848 
1849       // were we externally suspended while we were waiting?
1850       threadIsSuspended = thread->handle_special_suspend_equivalent_condition();
1851       if (threadIsSuspended) {
1852         // The semaphore has been incremented, but while we were waiting
1853         // another thread suspended us. We don't want to continue running
1854         // while suspended because that would surprise the thread that
1855         // suspended us.
1856         sig_sem->signal();


1877 // Solaris allocates memory by pages.
1878 int os::vm_allocation_granularity() {
1879   assert(os::Bsd::page_size() != -1, "must call os::init");
1880   return os::Bsd::page_size();
1881 }
1882 
1883 // Rationale behind this function:
1884 //  current (Mon Apr 25 20:12:18 MSD 2005) oprofile drops samples without executable
1885 //  mapping for address (see lookup_dcookie() in the kernel module), thus we cannot get
1886 //  samples for JITted code. Here we create private executable mapping over the code cache
1887 //  and then we can use standard (well, almost, as mapping can change) way to provide
1888 //  info for the reporting script by storing timestamp and location of symbol
1889 void bsd_wrap_code(char* base, size_t size) {
1890   static volatile jint cnt = 0;
1891 
1892   if (!UseOprofile) {
1893     return;
1894   }
1895 
1896   char buf[PATH_MAX + 1];
1897   int num = Atomic::add(1, &cnt);
1898 
1899   snprintf(buf, PATH_MAX + 1, "%s/hs-vm-%d-%d",
1900            os::get_temp_directory(), os::current_process_id(), num);
1901   unlink(buf);
1902 
1903   int fd = ::open(buf, O_CREAT | O_RDWR, S_IRWXU);
1904 
1905   if (fd != -1) {
1906     off_t rv = ::lseek(fd, size-2, SEEK_SET);
1907     if (rv != (off_t)-1) {
1908       if (::write(fd, "", 1) == 1) {
1909         mmap(base, size,
1910              PROT_READ|PROT_WRITE|PROT_EXEC,
1911              MAP_PRIVATE|MAP_FIXED|MAP_NORESERVE, fd, 0);
1912       }
1913     }
1914     ::close(fd);
1915     unlink(buf);
1916   }
1917 }


3220       uint edx;
3221 
3222       __asm__ ("cpuid\n\t" : "+a" (eax), "+b" (ebx), "+c" (ecx), "+d" (edx) : );
3223 
3224       uint level_type = (ecx >> 8) & 0xFF;
3225       if (level_type == 0) {
3226         // Invalid level; end of topology
3227         break;
3228       }
3229       uint level_apic_id_shift = eax & ((1u << 5) - 1);
3230       total_bits += level_apic_id_shift;
3231     }
3232 
3233     uint max_apic_ids = 1u << total_bits;
3234     mapping = NEW_C_HEAP_ARRAY(int, max_apic_ids, mtInternal);
3235 
3236     for (uint i = 0; i < max_apic_ids; ++i) {
3237       mapping[i] = -1;
3238     }
3239 
3240     if (!Atomic::replace_if_null(mapping, &apic_to_processor_mapping)) {
3241       FREE_C_HEAP_ARRAY(int, mapping);
3242       mapping = Atomic::load_acquire(&apic_to_processor_mapping);
3243     }
3244   }
3245 
3246   return mapping;
3247 }
3248 
3249 uint os::processor_id() {
3250   volatile int* mapping = get_apic_to_processor_mapping();
3251 
3252   uint eax = 0xb;
3253   uint ebx;
3254   uint ecx = 0;
3255   uint edx;
3256 
3257   __asm__ ("cpuid\n\t" : "+a" (eax), "+b" (ebx), "+c" (ecx), "+d" (edx) : );
3258 
3259   // Map from APIC id to a unique logical processor ID in the expected
3260   // [0, num_processors) range.
3261 
3262   uint apic_id = edx;
3263   int processor_id = Atomic::load(&mapping[apic_id]);
3264 
3265   while (processor_id < 0) {
3266     if (Atomic::cmpxchg(-2, &mapping[apic_id], -1)) {
3267       Atomic::store(Atomic::add(1, &next_processor_id) - 1, &mapping[apic_id]);
3268     }
3269     processor_id = Atomic::load(&mapping[apic_id]);
3270   }
3271 
3272   return (uint)processor_id;
3273 }
3274 #endif
3275 
3276 void os::set_native_thread_name(const char *name) {
3277 #if defined(__APPLE__) && MAC_OS_X_VERSION_MIN_REQUIRED > MAC_OS_X_VERSION_10_5
3278   // This is only supported in Snow Leopard and beyond
3279   if (name != NULL) {
3280     // Add a "Java: " prefix to the name
3281     char buf[MAXTHREADNAMESIZE];
3282     snprintf(buf, sizeof(buf), "Java: %s", name);
3283     pthread_setname_np(buf);
3284   }
3285 #endif
3286 }
3287 




 913   struct timespec tp;
 914   if (::clock_getres(CLOCK_MONOTONIC, &res) == 0 &&
 915       ::clock_gettime(CLOCK_MONOTONIC, &tp)  == 0) {
 916     // yes, monotonic clock is supported
 917     _clock_gettime = ::clock_gettime;
 918   }
 919 }
 920 #endif
 921 
 922 
 923 
 924 #ifdef __APPLE__
 925 
 926 jlong os::javaTimeNanos() {
 927   const uint64_t tm = mach_absolute_time();
 928   const uint64_t now = (tm * Bsd::_timebase_info.numer) / Bsd::_timebase_info.denom;
 929   const uint64_t prev = Bsd::_max_abstime;
 930   if (now <= prev) {
 931     return prev;   // same or retrograde time;
 932   }
 933   const uint64_t obsv = Atomic::cmpxchg(&Bsd::_max_abstime, prev, now);
 934   assert(obsv >= prev, "invariant");   // Monotonicity
 935   // If the CAS succeeded then we're done and return "now".
 936   // If the CAS failed and the observed value "obsv" is >= now then
 937   // we should return "obsv".  If the CAS failed and now > obsv > prv then
 938   // some other thread raced this thread and installed a new value, in which case
 939   // we could either (a) retry the entire operation, (b) retry trying to install now
 940   // or (c) just return obsv.  We use (c).   No loop is required although in some cases
 941   // we might discard a higher "now" value in deference to a slightly lower but freshly
 942   // installed obsv value.   That's entirely benign -- it admits no new orderings compared
 943   // to (a) or (b) -- and greatly reduces coherence traffic.
 944   // We might also condition (c) on the magnitude of the delta between obsv and now.
 945   // Avoiding excessive CAS operations to hot RW locations is critical.
 946   // See https://blogs.oracle.com/dave/entry/cas_and_cache_trivia_invalidate
 947   return (prev == obsv) ? now : obsv;
 948 }
 949 
 950 #else // __APPLE__
 951 
 952 jlong os::javaTimeNanos() {
 953   if (os::supports_monotonic_clock()) {


1816 
1817   // Initialize signal semaphore
1818   sig_sem = new Semaphore();
1819 }
1820 
1821 void os::signal_notify(int sig) {
1822   if (sig_sem != NULL) {
1823     Atomic::inc(&pending_signals[sig]);
1824     sig_sem->signal();
1825   } else {
1826     // Signal thread is not created with ReduceSignalUsage and jdk_misc_signal_init
1827     // initialization isn't called.
1828     assert(ReduceSignalUsage, "signal semaphore should be created");
1829   }
1830 }
1831 
1832 static int check_pending_signals() {
1833   for (;;) {
1834     for (int i = 0; i < NSIG + 1; i++) {
1835       jint n = pending_signals[i];
1836       if (n > 0 && n == Atomic::cmpxchg(&pending_signals[i], n, n - 1)) {
1837         return i;
1838       }
1839     }
1840     JavaThread *thread = JavaThread::current();
1841     ThreadBlockInVM tbivm(thread);
1842 
1843     bool threadIsSuspended;
1844     do {
1845       thread->set_suspend_equivalent();
1846       // cleared by handle_special_suspend_equivalent_condition() or java_suspend_self()
1847       sig_sem->wait();
1848 
1849       // were we externally suspended while we were waiting?
1850       threadIsSuspended = thread->handle_special_suspend_equivalent_condition();
1851       if (threadIsSuspended) {
1852         // The semaphore has been incremented, but while we were waiting
1853         // another thread suspended us. We don't want to continue running
1854         // while suspended because that would surprise the thread that
1855         // suspended us.
1856         sig_sem->signal();


1877 // Solaris allocates memory by pages.
1878 int os::vm_allocation_granularity() {
1879   assert(os::Bsd::page_size() != -1, "must call os::init");
1880   return os::Bsd::page_size();
1881 }
1882 
1883 // Rationale behind this function:
1884 //  current (Mon Apr 25 20:12:18 MSD 2005) oprofile drops samples without executable
1885 //  mapping for address (see lookup_dcookie() in the kernel module), thus we cannot get
1886 //  samples for JITted code. Here we create private executable mapping over the code cache
1887 //  and then we can use standard (well, almost, as mapping can change) way to provide
1888 //  info for the reporting script by storing timestamp and location of symbol
1889 void bsd_wrap_code(char* base, size_t size) {
1890   static volatile jint cnt = 0;
1891 
1892   if (!UseOprofile) {
1893     return;
1894   }
1895 
1896   char buf[PATH_MAX + 1];
1897   int num = Atomic::add(&cnt, 1);
1898 
1899   snprintf(buf, PATH_MAX + 1, "%s/hs-vm-%d-%d",
1900            os::get_temp_directory(), os::current_process_id(), num);
1901   unlink(buf);
1902 
1903   int fd = ::open(buf, O_CREAT | O_RDWR, S_IRWXU);
1904 
1905   if (fd != -1) {
1906     off_t rv = ::lseek(fd, size-2, SEEK_SET);
1907     if (rv != (off_t)-1) {
1908       if (::write(fd, "", 1) == 1) {
1909         mmap(base, size,
1910              PROT_READ|PROT_WRITE|PROT_EXEC,
1911              MAP_PRIVATE|MAP_FIXED|MAP_NORESERVE, fd, 0);
1912       }
1913     }
1914     ::close(fd);
1915     unlink(buf);
1916   }
1917 }


3220       uint edx;
3221 
3222       __asm__ ("cpuid\n\t" : "+a" (eax), "+b" (ebx), "+c" (ecx), "+d" (edx) : );
3223 
3224       uint level_type = (ecx >> 8) & 0xFF;
3225       if (level_type == 0) {
3226         // Invalid level; end of topology
3227         break;
3228       }
3229       uint level_apic_id_shift = eax & ((1u << 5) - 1);
3230       total_bits += level_apic_id_shift;
3231     }
3232 
3233     uint max_apic_ids = 1u << total_bits;
3234     mapping = NEW_C_HEAP_ARRAY(int, max_apic_ids, mtInternal);
3235 
3236     for (uint i = 0; i < max_apic_ids; ++i) {
3237       mapping[i] = -1;
3238     }
3239 
3240     if (!Atomic::replace_if_null(&apic_to_processor_mapping, mapping)) {
3241       FREE_C_HEAP_ARRAY(int, mapping);
3242       mapping = Atomic::load_acquire(&apic_to_processor_mapping);
3243     }
3244   }
3245 
3246   return mapping;
3247 }
3248 
3249 uint os::processor_id() {
3250   volatile int* mapping = get_apic_to_processor_mapping();
3251 
3252   uint eax = 0xb;
3253   uint ebx;
3254   uint ecx = 0;
3255   uint edx;
3256 
3257   __asm__ ("cpuid\n\t" : "+a" (eax), "+b" (ebx), "+c" (ecx), "+d" (edx) : );
3258 
3259   // Map from APIC id to a unique logical processor ID in the expected
3260   // [0, num_processors) range.
3261 
3262   uint apic_id = edx;
3263   int processor_id = Atomic::load(&mapping[apic_id]);
3264 
3265   while (processor_id < 0) {
3266     if (Atomic::cmpxchg(&mapping[apic_id], -1, -2)) {
3267       Atomic::store(&mapping[apic_id], Atomic::add(&next_processor_id, 1) - 1);
3268     }
3269     processor_id = Atomic::load(&mapping[apic_id]);
3270   }
3271 
3272   return (uint)processor_id;
3273 }
3274 #endif
3275 
3276 void os::set_native_thread_name(const char *name) {
3277 #if defined(__APPLE__) && MAC_OS_X_VERSION_MIN_REQUIRED > MAC_OS_X_VERSION_10_5
3278   // This is only supported in Snow Leopard and beyond
3279   if (name != NULL) {
3280     // Add a "Java: " prefix to the name
3281     char buf[MAXTHREADNAMESIZE];
3282     snprintf(buf, sizeof(buf), "Java: %s", name);
3283     pthread_setname_np(buf);
3284   }
3285 #endif
3286 }
3287 


< prev index next >