< prev index next >

src/hotspot/share/prims/jni.cpp

Print this page




 256   }
 257   guarantee(InstanceKlass::cast(k)->contains_field_offset(offset),
 258       "Bug in native code: jfieldID offset must address interior of object");
 259 }
 260 
 261 // Wrapper to trace JNI functions
 262 
 263 #ifdef ASSERT
 264   Histogram* JNIHistogram;
 265   static volatile int JNIHistogram_lock = 0;
 266 
 267   class JNIHistogramElement : public HistogramElement {
 268     public:
 269      JNIHistogramElement(const char* name);
 270   };
 271 
 272   JNIHistogramElement::JNIHistogramElement(const char* elementName) {
 273     _name = elementName;
 274     uintx count = 0;
 275 
 276     while (Atomic::cmpxchg(1, &JNIHistogram_lock, 0) != 0) {
 277       while (Atomic::load_acquire(&JNIHistogram_lock) != 0) {
 278         count +=1;
 279         if ( (WarnOnStalledSpinLock > 0)
 280           && (count % WarnOnStalledSpinLock == 0)) {
 281           warning("JNIHistogram_lock seems to be stalled");
 282         }
 283       }
 284      }
 285 
 286 
 287     if(JNIHistogram == NULL)
 288       JNIHistogram = new Histogram("JNI Call Counts",100);
 289 
 290     JNIHistogram->add_element(this);
 291     Atomic::dec(&JNIHistogram_lock);
 292   }
 293 
 294   #define JNICountWrapper(arg)                                     \
 295      static JNIHistogramElement* e = new JNIHistogramElement(arg); \
 296       /* There is a MT-race condition in VC++. So we need to make sure that that e has been initialized */ \


3216   return result;
3217 }
3218 
3219 // These lookups are done with the NULL (bootstrap) ClassLoader to
3220 // circumvent any security checks that would be done by jni_FindClass.
3221 JNI_ENTRY(bool, lookupDirectBufferClasses(JNIEnv* env))
3222 {
3223   if ((bufferClass           = lookupOne(env, "java/nio/Buffer", thread))           == NULL) { return false; }
3224   if ((directBufferClass     = lookupOne(env, "sun/nio/ch/DirectBuffer", thread))   == NULL) { return false; }
3225   if ((directByteBufferClass = lookupOne(env, "java/nio/DirectByteBuffer", thread)) == NULL) { return false; }
3226   return true;
3227 }
3228 JNI_END
3229 
3230 
3231 static bool initializeDirectBufferSupport(JNIEnv* env, JavaThread* thread) {
3232   if (directBufferSupportInitializeFailed) {
3233     return false;
3234   }
3235 
3236   if (Atomic::cmpxchg(1, &directBufferSupportInitializeStarted, 0) == 0) {
3237     if (!lookupDirectBufferClasses(env)) {
3238       directBufferSupportInitializeFailed = 1;
3239       return false;
3240     }
3241 
3242     // Make global references for these
3243     bufferClass           = (jclass) env->NewGlobalRef(bufferClass);
3244     directBufferClass     = (jclass) env->NewGlobalRef(directBufferClass);
3245     directByteBufferClass = (jclass) env->NewGlobalRef(directByteBufferClass);
3246 
3247     // Get needed field and method IDs
3248     directByteBufferConstructor = env->GetMethodID(directByteBufferClass, "<init>", "(JI)V");
3249     if (env->ExceptionCheck()) {
3250       env->ExceptionClear();
3251       directBufferSupportInitializeFailed = 1;
3252       return false;
3253     }
3254     directBufferAddressField    = env->GetFieldID(bufferClass, "address", "J");
3255     if (env->ExceptionCheck()) {
3256       env->ExceptionClear();


3672 
3673     jni_GetObjectRefType,
3674 
3675     // Module features
3676 
3677     jni_GetModule
3678 };
3679 
3680 
3681 // For jvmti use to modify jni function table.
3682 // Java threads in native contiues to run until it is transitioned
3683 // to VM at safepoint. Before the transition or before it is blocked
3684 // for safepoint it may access jni function table. VM could crash if
3685 // any java thread access the jni function table in the middle of memcpy.
3686 // To avoid this each function pointers are copied automically.
3687 void copy_jni_function_table(const struct JNINativeInterface_ *new_jni_NativeInterface) {
3688   assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint");
3689   intptr_t *a = (intptr_t *) jni_functions();
3690   intptr_t *b = (intptr_t *) new_jni_NativeInterface;
3691   for (uint i=0; i <  sizeof(struct JNINativeInterface_)/sizeof(void *); i++) {
3692     Atomic::store(*b++, a++);
3693   }
3694 }
3695 
3696 void quicken_jni_functions() {
3697   // Replace Get<Primitive>Field with fast versions
3698   if (UseFastJNIAccessors && !VerifyJNIFields && !CountJNICalls && !CheckJNICalls) {
3699     address func;
3700     func = JNI_FastGetField::generate_fast_get_boolean_field();
3701     if (func != (address)-1) {
3702       jni_NativeInterface.GetBooleanField = (GetBooleanField_t)func;
3703     }
3704     func = JNI_FastGetField::generate_fast_get_byte_field();
3705     if (func != (address)-1) {
3706       jni_NativeInterface.GetByteField = (GetByteField_t)func;
3707     }
3708     func = JNI_FastGetField::generate_fast_get_char_field();
3709     if (func != (address)-1) {
3710       jni_NativeInterface.GetCharField = (GetCharField_t)func;
3711     }
3712     func = JNI_FastGetField::generate_fast_get_short_field();


3794   }
3795   return ret;
3796 }
3797 
3798 DT_RETURN_MARK_DECL(CreateJavaVM, jint
3799                     , HOTSPOT_JNI_CREATEJAVAVM_RETURN(_ret_ref));
3800 
3801 static jint JNI_CreateJavaVM_inner(JavaVM **vm, void **penv, void *args) {
3802   HOTSPOT_JNI_CREATEJAVAVM_ENTRY((void **) vm, penv, args);
3803 
3804   jint result = JNI_ERR;
3805   DT_RETURN_MARK(CreateJavaVM, jint, (const jint&)result);
3806 
3807   // We're about to use Atomic::xchg for synchronization.  Some Zero
3808   // platforms use the GCC builtin __sync_lock_test_and_set for this,
3809   // but __sync_lock_test_and_set is not guaranteed to do what we want
3810   // on all architectures.  So we check it works before relying on it.
3811 #if defined(ZERO) && defined(ASSERT)
3812   {
3813     jint a = 0xcafebabe;
3814     jint b = Atomic::xchg((jint) 0xdeadbeef, &a);
3815     void *c = &a;
3816     void *d = Atomic::xchg(&b, &c);
3817     assert(a == (jint) 0xdeadbeef && b == (jint) 0xcafebabe, "Atomic::xchg() works");
3818     assert(c == &b && d == &a, "Atomic::xchg() works");
3819   }
3820 #endif // ZERO && ASSERT
3821 
3822   // At the moment it's only possible to have one Java VM,
3823   // since some of the runtime state is in global variables.
3824 
3825   // We cannot use our mutex locks here, since they only work on
3826   // Threads. We do an atomic compare and exchange to ensure only
3827   // one thread can call this method at a time
3828 
3829   // We use Atomic::xchg rather than Atomic::add/dec since on some platforms
3830   // the add/dec implementations are dependent on whether we are running
3831   // on a multiprocessor Atomic::xchg does not have this problem.
3832   if (Atomic::xchg(1, &vm_created) == 1) {
3833     return JNI_EEXIST;   // already created, or create attempt in progress
3834   }
3835   if (Atomic::xchg(0, &safe_to_recreate_vm) == 0) {
3836     return JNI_ERR;  // someone tried and failed and retry not allowed.
3837   }
3838 
3839   assert(vm_created == 1, "vm_created is true during the creation");
3840 
3841   /**
3842    * Certain errors during initialization are recoverable and do not
3843    * prevent this method from being called again at a later time
3844    * (perhaps with different arguments).  However, at a certain
3845    * point during initialization if an error occurs we cannot allow
3846    * this function to be called again (or it will crash).  In those
3847    * situations, the 'canTryAgain' flag is set to false, which atomically
3848    * sets safe_to_recreate_vm to 1, such that any new call to
3849    * JNI_CreateJavaVM will immediately fail using the above logic.
3850    */
3851   bool can_try_again = true;
3852 
3853   result = Threads::create_vm((JavaVMInitArgs*) args, &can_try_again);
3854   if (result == JNI_OK) {
3855     JavaThread *thread = JavaThread::current();




 256   }
 257   guarantee(InstanceKlass::cast(k)->contains_field_offset(offset),
 258       "Bug in native code: jfieldID offset must address interior of object");
 259 }
 260 
 261 // Wrapper to trace JNI functions
 262 
 263 #ifdef ASSERT
 264   Histogram* JNIHistogram;
 265   static volatile int JNIHistogram_lock = 0;
 266 
 267   class JNIHistogramElement : public HistogramElement {
 268     public:
 269      JNIHistogramElement(const char* name);
 270   };
 271 
 272   JNIHistogramElement::JNIHistogramElement(const char* elementName) {
 273     _name = elementName;
 274     uintx count = 0;
 275 
 276     while (Atomic::cmpxchg(&JNIHistogram_lock, 0, 1) != 0) {
 277       while (Atomic::load_acquire(&JNIHistogram_lock) != 0) {
 278         count +=1;
 279         if ( (WarnOnStalledSpinLock > 0)
 280           && (count % WarnOnStalledSpinLock == 0)) {
 281           warning("JNIHistogram_lock seems to be stalled");
 282         }
 283       }
 284      }
 285 
 286 
 287     if(JNIHistogram == NULL)
 288       JNIHistogram = new Histogram("JNI Call Counts",100);
 289 
 290     JNIHistogram->add_element(this);
 291     Atomic::dec(&JNIHistogram_lock);
 292   }
 293 
 294   #define JNICountWrapper(arg)                                     \
 295      static JNIHistogramElement* e = new JNIHistogramElement(arg); \
 296       /* There is a MT-race condition in VC++. So we need to make sure that that e has been initialized */ \


3216   return result;
3217 }
3218 
3219 // These lookups are done with the NULL (bootstrap) ClassLoader to
3220 // circumvent any security checks that would be done by jni_FindClass.
3221 JNI_ENTRY(bool, lookupDirectBufferClasses(JNIEnv* env))
3222 {
3223   if ((bufferClass           = lookupOne(env, "java/nio/Buffer", thread))           == NULL) { return false; }
3224   if ((directBufferClass     = lookupOne(env, "sun/nio/ch/DirectBuffer", thread))   == NULL) { return false; }
3225   if ((directByteBufferClass = lookupOne(env, "java/nio/DirectByteBuffer", thread)) == NULL) { return false; }
3226   return true;
3227 }
3228 JNI_END
3229 
3230 
3231 static bool initializeDirectBufferSupport(JNIEnv* env, JavaThread* thread) {
3232   if (directBufferSupportInitializeFailed) {
3233     return false;
3234   }
3235 
3236   if (Atomic::cmpxchg(&directBufferSupportInitializeStarted, 0, 1) == 0) {
3237     if (!lookupDirectBufferClasses(env)) {
3238       directBufferSupportInitializeFailed = 1;
3239       return false;
3240     }
3241 
3242     // Make global references for these
3243     bufferClass           = (jclass) env->NewGlobalRef(bufferClass);
3244     directBufferClass     = (jclass) env->NewGlobalRef(directBufferClass);
3245     directByteBufferClass = (jclass) env->NewGlobalRef(directByteBufferClass);
3246 
3247     // Get needed field and method IDs
3248     directByteBufferConstructor = env->GetMethodID(directByteBufferClass, "<init>", "(JI)V");
3249     if (env->ExceptionCheck()) {
3250       env->ExceptionClear();
3251       directBufferSupportInitializeFailed = 1;
3252       return false;
3253     }
3254     directBufferAddressField    = env->GetFieldID(bufferClass, "address", "J");
3255     if (env->ExceptionCheck()) {
3256       env->ExceptionClear();


3672 
3673     jni_GetObjectRefType,
3674 
3675     // Module features
3676 
3677     jni_GetModule
3678 };
3679 
3680 
3681 // For jvmti use to modify jni function table.
3682 // Java threads in native contiues to run until it is transitioned
3683 // to VM at safepoint. Before the transition or before it is blocked
3684 // for safepoint it may access jni function table. VM could crash if
3685 // any java thread access the jni function table in the middle of memcpy.
3686 // To avoid this each function pointers are copied automically.
3687 void copy_jni_function_table(const struct JNINativeInterface_ *new_jni_NativeInterface) {
3688   assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint");
3689   intptr_t *a = (intptr_t *) jni_functions();
3690   intptr_t *b = (intptr_t *) new_jni_NativeInterface;
3691   for (uint i=0; i <  sizeof(struct JNINativeInterface_)/sizeof(void *); i++) {
3692     Atomic::store(a++, *b++);
3693   }
3694 }
3695 
3696 void quicken_jni_functions() {
3697   // Replace Get<Primitive>Field with fast versions
3698   if (UseFastJNIAccessors && !VerifyJNIFields && !CountJNICalls && !CheckJNICalls) {
3699     address func;
3700     func = JNI_FastGetField::generate_fast_get_boolean_field();
3701     if (func != (address)-1) {
3702       jni_NativeInterface.GetBooleanField = (GetBooleanField_t)func;
3703     }
3704     func = JNI_FastGetField::generate_fast_get_byte_field();
3705     if (func != (address)-1) {
3706       jni_NativeInterface.GetByteField = (GetByteField_t)func;
3707     }
3708     func = JNI_FastGetField::generate_fast_get_char_field();
3709     if (func != (address)-1) {
3710       jni_NativeInterface.GetCharField = (GetCharField_t)func;
3711     }
3712     func = JNI_FastGetField::generate_fast_get_short_field();


3794   }
3795   return ret;
3796 }
3797 
3798 DT_RETURN_MARK_DECL(CreateJavaVM, jint
3799                     , HOTSPOT_JNI_CREATEJAVAVM_RETURN(_ret_ref));
3800 
3801 static jint JNI_CreateJavaVM_inner(JavaVM **vm, void **penv, void *args) {
3802   HOTSPOT_JNI_CREATEJAVAVM_ENTRY((void **) vm, penv, args);
3803 
3804   jint result = JNI_ERR;
3805   DT_RETURN_MARK(CreateJavaVM, jint, (const jint&)result);
3806 
3807   // We're about to use Atomic::xchg for synchronization.  Some Zero
3808   // platforms use the GCC builtin __sync_lock_test_and_set for this,
3809   // but __sync_lock_test_and_set is not guaranteed to do what we want
3810   // on all architectures.  So we check it works before relying on it.
3811 #if defined(ZERO) && defined(ASSERT)
3812   {
3813     jint a = 0xcafebabe;
3814     jint b = Atomic::xchg(&a, (jint) 0xdeadbeef);
3815     void *c = &a;
3816     void *d = Atomic::xchg(&c, &b);
3817     assert(a == (jint) 0xdeadbeef && b == (jint) 0xcafebabe, "Atomic::xchg() works");
3818     assert(c == &b && d == &a, "Atomic::xchg() works");
3819   }
3820 #endif // ZERO && ASSERT
3821 
3822   // At the moment it's only possible to have one Java VM,
3823   // since some of the runtime state is in global variables.
3824 
3825   // We cannot use our mutex locks here, since they only work on
3826   // Threads. We do an atomic compare and exchange to ensure only
3827   // one thread can call this method at a time
3828 
3829   // We use Atomic::xchg rather than Atomic::add/dec since on some platforms
3830   // the add/dec implementations are dependent on whether we are running
3831   // on a multiprocessor Atomic::xchg does not have this problem.
3832   if (Atomic::xchg(&vm_created, 1) == 1) {
3833     return JNI_EEXIST;   // already created, or create attempt in progress
3834   }
3835   if (Atomic::xchg(&safe_to_recreate_vm, 0) == 0) {
3836     return JNI_ERR;  // someone tried and failed and retry not allowed.
3837   }
3838 
3839   assert(vm_created == 1, "vm_created is true during the creation");
3840 
3841   /**
3842    * Certain errors during initialization are recoverable and do not
3843    * prevent this method from being called again at a later time
3844    * (perhaps with different arguments).  However, at a certain
3845    * point during initialization if an error occurs we cannot allow
3846    * this function to be called again (or it will crash).  In those
3847    * situations, the 'canTryAgain' flag is set to false, which atomically
3848    * sets safe_to_recreate_vm to 1, such that any new call to
3849    * JNI_CreateJavaVM will immediately fail using the above logic.
3850    */
3851   bool can_try_again = true;
3852 
3853   result = Threads::create_vm((JavaVMInitArgs*) args, &can_try_again);
3854   if (result == JNI_OK) {
3855     JavaThread *thread = JavaThread::current();


< prev index next >