< prev index next >

src/hotspot/share/oops/instanceKlass.cpp

Print this page

        

*** 1095,1105 **** Klass* volatile* k = adr_implementor(); if (k == NULL) { return NULL; } else { // This load races with inserts, and therefore needs acquire. ! Klass* kls = OrderAccess::load_acquire(k); if (kls != NULL && !kls->is_loader_alive()) { return NULL; // don't return unloaded class } else { return kls; } --- 1095,1105 ---- Klass* volatile* k = adr_implementor(); if (k == NULL) { return NULL; } else { // This load races with inserts, and therefore needs acquire. ! Klass* kls = Atomic::load_acquire(k); if (kls != NULL && !kls->is_loader_alive()) { return NULL; // don't return unloaded class } else { return kls; }
*** 1111,1121 **** assert_locked_or_safepoint(Compile_lock); assert(is_interface(), "not interface"); Klass* volatile* addr = adr_implementor(); assert(addr != NULL, "null addr"); if (addr != NULL) { ! OrderAccess::release_store(addr, k); } } int InstanceKlass::nof_implementors() const { Klass* k = implementor(); --- 1111,1121 ---- assert_locked_or_safepoint(Compile_lock); assert(is_interface(), "not interface"); Klass* volatile* addr = adr_implementor(); assert(addr != NULL, "null addr"); if (addr != NULL) { ! Atomic::release_store(addr, k); } } int InstanceKlass::nof_implementors() const { Klass* k = implementor();
*** 1368,1385 **** void InstanceKlass::mask_for(const methodHandle& method, int bci, InterpreterOopMap* entry_for) { // Lazily create the _oop_map_cache at first request // Lock-free access requires load_acquire. ! OopMapCache* oop_map_cache = OrderAccess::load_acquire(&_oop_map_cache); if (oop_map_cache == NULL) { MutexLocker x(OopMapCacheAlloc_lock); // Check if _oop_map_cache was allocated while we were waiting for this lock if ((oop_map_cache = _oop_map_cache) == NULL) { oop_map_cache = new OopMapCache(); // Ensure _oop_map_cache is stable, since it is examined without a lock ! OrderAccess::release_store(&_oop_map_cache, oop_map_cache); } } // _oop_map_cache is constant after init; lookup below does its own locking. oop_map_cache->lookup(method, bci, entry_for); } --- 1368,1385 ---- void InstanceKlass::mask_for(const methodHandle& method, int bci, InterpreterOopMap* entry_for) { // Lazily create the _oop_map_cache at first request // Lock-free access requires load_acquire. ! OopMapCache* oop_map_cache = Atomic::load_acquire(&_oop_map_cache); if (oop_map_cache == NULL) { MutexLocker x(OopMapCacheAlloc_lock); // Check if _oop_map_cache was allocated while we were waiting for this lock if ((oop_map_cache = _oop_map_cache) == NULL) { oop_map_cache = new OopMapCache(); // Ensure _oop_map_cache is stable, since it is examined without a lock ! Atomic::release_store(&_oop_map_cache, oop_map_cache); } } // _oop_map_cache is constant after init; lookup below does its own locking. oop_map_cache->lookup(method, bci, entry_for); }
*** 2112,2122 **** id = new_id; // The jmethodID cache can be read while unlocked so we have to // make sure the new jmethodID is complete before installing it // in the cache. ! OrderAccess::release_store(&jmeths[idnum+1], id); } else { *to_dealloc_id_p = new_id; // save new id for later delete } return id; } --- 2112,2122 ---- id = new_id; // The jmethodID cache can be read while unlocked so we have to // make sure the new jmethodID is complete before installing it // in the cache. ! Atomic::release_store(&jmeths[idnum+1], id); } else { *to_dealloc_id_p = new_id; // save new id for later delete } return id; }
*** 2194,2204 **** assert(is_loader_alive(), "this klass should be live"); if (is_interface()) { assert (ClassUnloading, "only called for ClassUnloading"); for (;;) { // Use load_acquire due to competing with inserts ! Klass* impl = OrderAccess::load_acquire(adr_implementor()); if (impl != NULL && !impl->is_loader_alive()) { // NULL this field, might be an unloaded klass or NULL Klass* volatile* klass = adr_implementor(); if (Atomic::cmpxchg((Klass*)NULL, klass, impl) == impl) { // Successfully unlinking implementor. --- 2194,2204 ---- assert(is_loader_alive(), "this klass should be live"); if (is_interface()) { assert (ClassUnloading, "only called for ClassUnloading"); for (;;) { // Use load_acquire due to competing with inserts ! Klass* impl = Atomic::load_acquire(adr_implementor()); if (impl != NULL && !impl->is_loader_alive()) { // NULL this field, might be an unloaded klass or NULL Klass* volatile* klass = adr_implementor(); if (Atomic::cmpxchg((Klass*)NULL, klass, impl) == impl) { // Successfully unlinking implementor.
< prev index next >