< prev index next >

src/hotspot/share/prims/jni.cpp

Print this page

        

*** 271,281 **** JNIHistogramElement::JNIHistogramElement(const char* elementName) { _name = elementName; uintx count = 0; ! while (Atomic::cmpxchg(1, &JNIHistogram_lock, 0) != 0) { while (Atomic::load_acquire(&JNIHistogram_lock) != 0) { count +=1; if ( (WarnOnStalledSpinLock > 0) && (count % WarnOnStalledSpinLock == 0)) { warning("JNIHistogram_lock seems to be stalled"); --- 271,281 ---- JNIHistogramElement::JNIHistogramElement(const char* elementName) { _name = elementName; uintx count = 0; ! while (Atomic::cmpxchg(&JNIHistogram_lock, 0, 1) != 0) { while (Atomic::load_acquire(&JNIHistogram_lock) != 0) { count +=1; if ( (WarnOnStalledSpinLock > 0) && (count % WarnOnStalledSpinLock == 0)) { warning("JNIHistogram_lock seems to be stalled");
*** 3231,3241 **** static bool initializeDirectBufferSupport(JNIEnv* env, JavaThread* thread) { if (directBufferSupportInitializeFailed) { return false; } ! if (Atomic::cmpxchg(1, &directBufferSupportInitializeStarted, 0) == 0) { if (!lookupDirectBufferClasses(env)) { directBufferSupportInitializeFailed = 1; return false; } --- 3231,3241 ---- static bool initializeDirectBufferSupport(JNIEnv* env, JavaThread* thread) { if (directBufferSupportInitializeFailed) { return false; } ! if (Atomic::cmpxchg(&directBufferSupportInitializeStarted, 0, 1) == 0) { if (!lookupDirectBufferClasses(env)) { directBufferSupportInitializeFailed = 1; return false; }
*** 3687,3697 **** void copy_jni_function_table(const struct JNINativeInterface_ *new_jni_NativeInterface) { assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint"); intptr_t *a = (intptr_t *) jni_functions(); intptr_t *b = (intptr_t *) new_jni_NativeInterface; for (uint i=0; i < sizeof(struct JNINativeInterface_)/sizeof(void *); i++) { ! Atomic::store(*b++, a++); } } void quicken_jni_functions() { // Replace Get<Primitive>Field with fast versions --- 3687,3697 ---- void copy_jni_function_table(const struct JNINativeInterface_ *new_jni_NativeInterface) { assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint"); intptr_t *a = (intptr_t *) jni_functions(); intptr_t *b = (intptr_t *) new_jni_NativeInterface; for (uint i=0; i < sizeof(struct JNINativeInterface_)/sizeof(void *); i++) { ! Atomic::store(a++, *b++); } } void quicken_jni_functions() { // Replace Get<Primitive>Field with fast versions
*** 3809,3821 **** // but __sync_lock_test_and_set is not guaranteed to do what we want // on all architectures. So we check it works before relying on it. #if defined(ZERO) && defined(ASSERT) { jint a = 0xcafebabe; ! jint b = Atomic::xchg((jint) 0xdeadbeef, &a); void *c = &a; ! void *d = Atomic::xchg(&b, &c); assert(a == (jint) 0xdeadbeef && b == (jint) 0xcafebabe, "Atomic::xchg() works"); assert(c == &b && d == &a, "Atomic::xchg() works"); } #endif // ZERO && ASSERT --- 3809,3821 ---- // but __sync_lock_test_and_set is not guaranteed to do what we want // on all architectures. So we check it works before relying on it. #if defined(ZERO) && defined(ASSERT) { jint a = 0xcafebabe; ! jint b = Atomic::xchg(&a, (jint) 0xdeadbeef); void *c = &a; ! void *d = Atomic::xchg(&c, &b); assert(a == (jint) 0xdeadbeef && b == (jint) 0xcafebabe, "Atomic::xchg() works"); assert(c == &b && d == &a, "Atomic::xchg() works"); } #endif // ZERO && ASSERT
*** 3827,3840 **** // one thread can call this method at a time // We use Atomic::xchg rather than Atomic::add/dec since on some platforms // the add/dec implementations are dependent on whether we are running // on a multiprocessor Atomic::xchg does not have this problem. ! if (Atomic::xchg(1, &vm_created) == 1) { return JNI_EEXIST; // already created, or create attempt in progress } ! if (Atomic::xchg(0, &safe_to_recreate_vm) == 0) { return JNI_ERR; // someone tried and failed and retry not allowed. } assert(vm_created == 1, "vm_created is true during the creation"); --- 3827,3840 ---- // one thread can call this method at a time // We use Atomic::xchg rather than Atomic::add/dec since on some platforms // the add/dec implementations are dependent on whether we are running // on a multiprocessor Atomic::xchg does not have this problem. ! if (Atomic::xchg(&vm_created, 1) == 1) { return JNI_EEXIST; // already created, or create attempt in progress } ! if (Atomic::xchg(&safe_to_recreate_vm, 0) == 0) { return JNI_ERR; // someone tried and failed and retry not allowed. } assert(vm_created == 1, "vm_created is true during the creation");
< prev index next >