913 struct timespec tp;
914 if (::clock_getres(CLOCK_MONOTONIC, &res) == 0 &&
915 ::clock_gettime(CLOCK_MONOTONIC, &tp) == 0) {
916 // yes, monotonic clock is supported
917 _clock_gettime = ::clock_gettime;
918 }
919 }
920 #endif
921
922
923
924 #ifdef __APPLE__
925
926 jlong os::javaTimeNanos() {
927 const uint64_t tm = mach_absolute_time();
928 const uint64_t now = (tm * Bsd::_timebase_info.numer) / Bsd::_timebase_info.denom;
929 const uint64_t prev = Bsd::_max_abstime;
930 if (now <= prev) {
931 return prev; // same or retrograde time;
932 }
933 const uint64_t obsv = Atomic::cmpxchg(now, &Bsd::_max_abstime, prev);
934 assert(obsv >= prev, "invariant"); // Monotonicity
935 // If the CAS succeeded then we're done and return "now".
936 // If the CAS failed and the observed value "obsv" is >= now then
937 // we should return "obsv". If the CAS failed and now > obsv > prv then
938 // some other thread raced this thread and installed a new value, in which case
939 // we could either (a) retry the entire operation, (b) retry trying to install now
940 // or (c) just return obsv. We use (c). No loop is required although in some cases
941 // we might discard a higher "now" value in deference to a slightly lower but freshly
942 // installed obsv value. That's entirely benign -- it admits no new orderings compared
943 // to (a) or (b) -- and greatly reduces coherence traffic.
944 // We might also condition (c) on the magnitude of the delta between obsv and now.
945 // Avoiding excessive CAS operations to hot RW locations is critical.
946 // See https://blogs.oracle.com/dave/entry/cas_and_cache_trivia_invalidate
947 return (prev == obsv) ? now : obsv;
948 }
949
950 #else // __APPLE__
951
952 jlong os::javaTimeNanos() {
953 if (os::supports_monotonic_clock()) {
1816
1817 // Initialize signal semaphore
1818 sig_sem = new Semaphore();
1819 }
1820
1821 void os::signal_notify(int sig) {
1822 if (sig_sem != NULL) {
1823 Atomic::inc(&pending_signals[sig]);
1824 sig_sem->signal();
1825 } else {
1826 // Signal thread is not created with ReduceSignalUsage and jdk_misc_signal_init
1827 // initialization isn't called.
1828 assert(ReduceSignalUsage, "signal semaphore should be created");
1829 }
1830 }
1831
1832 static int check_pending_signals() {
1833 for (;;) {
1834 for (int i = 0; i < NSIG + 1; i++) {
1835 jint n = pending_signals[i];
1836 if (n > 0 && n == Atomic::cmpxchg(n - 1, &pending_signals[i], n)) {
1837 return i;
1838 }
1839 }
1840 JavaThread *thread = JavaThread::current();
1841 ThreadBlockInVM tbivm(thread);
1842
1843 bool threadIsSuspended;
1844 do {
1845 thread->set_suspend_equivalent();
1846 // cleared by handle_special_suspend_equivalent_condition() or java_suspend_self()
1847 sig_sem->wait();
1848
1849 // were we externally suspended while we were waiting?
1850 threadIsSuspended = thread->handle_special_suspend_equivalent_condition();
1851 if (threadIsSuspended) {
1852 // The semaphore has been incremented, but while we were waiting
1853 // another thread suspended us. We don't want to continue running
1854 // while suspended because that would surprise the thread that
1855 // suspended us.
1856 sig_sem->signal();
3220 uint edx;
3221
3222 __asm__ ("cpuid\n\t" : "+a" (eax), "+b" (ebx), "+c" (ecx), "+d" (edx) : );
3223
3224 uint level_type = (ecx >> 8) & 0xFF;
3225 if (level_type == 0) {
3226 // Invalid level; end of topology
3227 break;
3228 }
3229 uint level_apic_id_shift = eax & ((1u << 5) - 1);
3230 total_bits += level_apic_id_shift;
3231 }
3232
3233 uint max_apic_ids = 1u << total_bits;
3234 mapping = NEW_C_HEAP_ARRAY(int, max_apic_ids, mtInternal);
3235
3236 for (uint i = 0; i < max_apic_ids; ++i) {
3237 mapping[i] = -1;
3238 }
3239
3240 if (!Atomic::replace_if_null(mapping, &apic_to_processor_mapping)) {
3241 FREE_C_HEAP_ARRAY(int, mapping);
3242 mapping = Atomic::load_acquire(&apic_to_processor_mapping);
3243 }
3244 }
3245
3246 return mapping;
3247 }
3248
3249 uint os::processor_id() {
3250 volatile int* mapping = get_apic_to_processor_mapping();
3251
3252 uint eax = 0xb;
3253 uint ebx;
3254 uint ecx = 0;
3255 uint edx;
3256
3257 __asm__ ("cpuid\n\t" : "+a" (eax), "+b" (ebx), "+c" (ecx), "+d" (edx) : );
3258
3259 // Map from APIC id to a unique logical processor ID in the expected
3260 // [0, num_processors) range.
3261
3262 uint apic_id = edx;
3263 int processor_id = Atomic::load(&mapping[apic_id]);
3264
3265 while (processor_id < 0) {
3266 if (Atomic::cmpxchg(-2, &mapping[apic_id], -1)) {
3267 Atomic::store(&mapping[apic_id], Atomic::add(&next_processor_id, 1) - 1);
3268 }
3269 processor_id = Atomic::load(&mapping[apic_id]);
3270 }
3271
3272 return (uint)processor_id;
3273 }
3274 #endif
3275
3276 void os::set_native_thread_name(const char *name) {
3277 #if defined(__APPLE__) && MAC_OS_X_VERSION_MIN_REQUIRED > MAC_OS_X_VERSION_10_5
3278 // This is only supported in Snow Leopard and beyond
3279 if (name != NULL) {
3280 // Add a "Java: " prefix to the name
3281 char buf[MAXTHREADNAMESIZE];
3282 snprintf(buf, sizeof(buf), "Java: %s", name);
3283 pthread_setname_np(buf);
3284 }
3285 #endif
3286 }
|
913 struct timespec tp;
914 if (::clock_getres(CLOCK_MONOTONIC, &res) == 0 &&
915 ::clock_gettime(CLOCK_MONOTONIC, &tp) == 0) {
916 // yes, monotonic clock is supported
917 _clock_gettime = ::clock_gettime;
918 }
919 }
920 #endif
921
922
923
924 #ifdef __APPLE__
925
926 jlong os::javaTimeNanos() {
927 const uint64_t tm = mach_absolute_time();
928 const uint64_t now = (tm * Bsd::_timebase_info.numer) / Bsd::_timebase_info.denom;
929 const uint64_t prev = Bsd::_max_abstime;
930 if (now <= prev) {
931 return prev; // same or retrograde time;
932 }
933 const uint64_t obsv = Atomic::cmpxchg(&Bsd::_max_abstime, prev, now);
934 assert(obsv >= prev, "invariant"); // Monotonicity
935 // If the CAS succeeded then we're done and return "now".
936 // If the CAS failed and the observed value "obsv" is >= now then
937 // we should return "obsv". If the CAS failed and now > obsv > prv then
938 // some other thread raced this thread and installed a new value, in which case
939 // we could either (a) retry the entire operation, (b) retry trying to install now
940 // or (c) just return obsv. We use (c). No loop is required although in some cases
941 // we might discard a higher "now" value in deference to a slightly lower but freshly
942 // installed obsv value. That's entirely benign -- it admits no new orderings compared
943 // to (a) or (b) -- and greatly reduces coherence traffic.
944 // We might also condition (c) on the magnitude of the delta between obsv and now.
945 // Avoiding excessive CAS operations to hot RW locations is critical.
946 // See https://blogs.oracle.com/dave/entry/cas_and_cache_trivia_invalidate
947 return (prev == obsv) ? now : obsv;
948 }
949
950 #else // __APPLE__
951
952 jlong os::javaTimeNanos() {
953 if (os::supports_monotonic_clock()) {
1816
1817 // Initialize signal semaphore
1818 sig_sem = new Semaphore();
1819 }
1820
1821 void os::signal_notify(int sig) {
1822 if (sig_sem != NULL) {
1823 Atomic::inc(&pending_signals[sig]);
1824 sig_sem->signal();
1825 } else {
1826 // Signal thread is not created with ReduceSignalUsage and jdk_misc_signal_init
1827 // initialization isn't called.
1828 assert(ReduceSignalUsage, "signal semaphore should be created");
1829 }
1830 }
1831
1832 static int check_pending_signals() {
1833 for (;;) {
1834 for (int i = 0; i < NSIG + 1; i++) {
1835 jint n = pending_signals[i];
1836 if (n > 0 && n == Atomic::cmpxchg(&pending_signals[i], n, n - 1)) {
1837 return i;
1838 }
1839 }
1840 JavaThread *thread = JavaThread::current();
1841 ThreadBlockInVM tbivm(thread);
1842
1843 bool threadIsSuspended;
1844 do {
1845 thread->set_suspend_equivalent();
1846 // cleared by handle_special_suspend_equivalent_condition() or java_suspend_self()
1847 sig_sem->wait();
1848
1849 // were we externally suspended while we were waiting?
1850 threadIsSuspended = thread->handle_special_suspend_equivalent_condition();
1851 if (threadIsSuspended) {
1852 // The semaphore has been incremented, but while we were waiting
1853 // another thread suspended us. We don't want to continue running
1854 // while suspended because that would surprise the thread that
1855 // suspended us.
1856 sig_sem->signal();
3220 uint edx;
3221
3222 __asm__ ("cpuid\n\t" : "+a" (eax), "+b" (ebx), "+c" (ecx), "+d" (edx) : );
3223
3224 uint level_type = (ecx >> 8) & 0xFF;
3225 if (level_type == 0) {
3226 // Invalid level; end of topology
3227 break;
3228 }
3229 uint level_apic_id_shift = eax & ((1u << 5) - 1);
3230 total_bits += level_apic_id_shift;
3231 }
3232
3233 uint max_apic_ids = 1u << total_bits;
3234 mapping = NEW_C_HEAP_ARRAY(int, max_apic_ids, mtInternal);
3235
3236 for (uint i = 0; i < max_apic_ids; ++i) {
3237 mapping[i] = -1;
3238 }
3239
3240 if (!Atomic::replace_if_null(&apic_to_processor_mapping, mapping)) {
3241 FREE_C_HEAP_ARRAY(int, mapping);
3242 mapping = Atomic::load_acquire(&apic_to_processor_mapping);
3243 }
3244 }
3245
3246 return mapping;
3247 }
3248
3249 uint os::processor_id() {
3250 volatile int* mapping = get_apic_to_processor_mapping();
3251
3252 uint eax = 0xb;
3253 uint ebx;
3254 uint ecx = 0;
3255 uint edx;
3256
3257 __asm__ ("cpuid\n\t" : "+a" (eax), "+b" (ebx), "+c" (ecx), "+d" (edx) : );
3258
3259 // Map from APIC id to a unique logical processor ID in the expected
3260 // [0, num_processors) range.
3261
3262 uint apic_id = edx;
3263 int processor_id = Atomic::load(&mapping[apic_id]);
3264
3265 while (processor_id < 0) {
3266 if (Atomic::cmpxchg(&mapping[apic_id], -1, -2)) {
3267 Atomic::store(&mapping[apic_id], Atomic::add(&next_processor_id, 1) - 1);
3268 }
3269 processor_id = Atomic::load(&mapping[apic_id]);
3270 }
3271
3272 return (uint)processor_id;
3273 }
3274 #endif
3275
3276 void os::set_native_thread_name(const char *name) {
3277 #if defined(__APPLE__) && MAC_OS_X_VERSION_MIN_REQUIRED > MAC_OS_X_VERSION_10_5
3278 // This is only supported in Snow Leopard and beyond
3279 if (name != NULL) {
3280 // Add a "Java: " prefix to the name
3281 char buf[MAXTHREADNAMESIZE];
3282 snprintf(buf, sizeof(buf), "Java: %s", name);
3283 pthread_setname_np(buf);
3284 }
3285 #endif
3286 }
|