< prev index next >

src/hotspot/share/oops/instanceKlass.cpp

Print this page
rev 47404 : [mq]: load_ptr_acquire
rev 47406 : [mq]: assembler_cmpxchg

*** 1107,1126 **** void InstanceKlass::mask_for(const methodHandle& method, int bci, InterpreterOopMap* entry_for) { // Lazily create the _oop_map_cache at first request ! // Lock-free access requires load_ptr_acquire. ! OopMapCache* oop_map_cache = ! static_cast<OopMapCache*>(OrderAccess::load_ptr_acquire(&_oop_map_cache)); if (oop_map_cache == NULL) { MutexLocker x(OopMapCacheAlloc_lock); // Check if _oop_map_cache was allocated while we were waiting for this lock if ((oop_map_cache = _oop_map_cache) == NULL) { oop_map_cache = new OopMapCache(); // Ensure _oop_map_cache is stable, since it is examined without a lock ! OrderAccess::release_store_ptr(&_oop_map_cache, oop_map_cache); } } // _oop_map_cache is constant after init; lookup below does its own locking. oop_map_cache->lookup(method, bci, entry_for); } --- 1107,1125 ---- void InstanceKlass::mask_for(const methodHandle& method, int bci, InterpreterOopMap* entry_for) { // Lazily create the _oop_map_cache at first request ! // Lock-free access requires load_acquire. ! OopMapCache* oop_map_cache = OrderAccess::load_acquire(&_oop_map_cache); if (oop_map_cache == NULL) { MutexLocker x(OopMapCacheAlloc_lock); // Check if _oop_map_cache was allocated while we were waiting for this lock if ((oop_map_cache = _oop_map_cache) == NULL) { oop_map_cache = new OopMapCache(); // Ensure _oop_map_cache is stable, since it is examined without a lock ! OrderAccess::release_store(&_oop_map_cache, oop_map_cache); } } // _oop_map_cache is constant after init; lookup below does its own locking. oop_map_cache->lookup(method, bci, entry_for); }
*** 1670,1680 **** // We use a double-check locking idiom here because this cache is // performance sensitive. In the normal system, this cache only // transitions from NULL to non-NULL which is safe because we use // release_set_methods_jmethod_ids() to advertise the new cache. // A partially constructed cache should never be seen by a racing ! // thread. We also use release_store_ptr() to save a new jmethodID // in the cache so a partially constructed jmethodID should never be // seen either. Cache reads of existing jmethodIDs proceed without a // lock, but cache writes of a new jmethodID requires uniqueness and // creation of the cache itself requires no leaks so a lock is // generally acquired in those two cases. --- 1669,1679 ---- // We use a double-check locking idiom here because this cache is // performance sensitive. In the normal system, this cache only // transitions from NULL to non-NULL which is safe because we use // release_set_methods_jmethod_ids() to advertise the new cache. // A partially constructed cache should never be seen by a racing ! // thread. We also use release_store() to save a new jmethodID // in the cache so a partially constructed jmethodID should never be // seen either. Cache reads of existing jmethodIDs proceed without a // lock, but cache writes of a new jmethodID requires uniqueness and // creation of the cache itself requires no leaks so a lock is // generally acquired in those two cases.
*** 1829,1839 **** id = new_id; // The jmethodID cache can be read while unlocked so we have to // make sure the new jmethodID is complete before installing it // in the cache. ! OrderAccess::release_store_ptr(&jmeths[idnum+1], id); } else { *to_dealloc_id_p = new_id; // save new id for later delete } return id; } --- 1828,1838 ---- id = new_id; // The jmethodID cache can be read while unlocked so we have to // make sure the new jmethodID is complete before installing it // in the cache. ! OrderAccess::release_store(&jmeths[idnum+1], id); } else { *to_dealloc_id_p = new_id; // save new id for later delete } return id; }
< prev index next >