241 assert(_lock_state != ExclusiveLock, "Can only call once"); 242 assert(*_lock >= 0, "Can not content exclusive lock"); 243 244 // make counter negative to block out shared locks 245 do { 246 val = *_lock; 247 target = _MAGIC_ + *_lock; 248 } while (Atomic::cmpxchg(target, _lock, val) != val); 249 250 // wait for all readers to exit 251 while (*_lock != _MAGIC_) { 252 #ifdef _WINDOWS 253 os::naked_short_sleep(1); 254 #else 255 os::naked_yield(); 256 #endif 257 } 258 _lock_state = ExclusiveLock; 259 } 260 261 bool MallocSiteHashtableEntry::atomic_insert(const MallocSiteHashtableEntry* entry) { 262 return Atomic::cmpxchg(entry, (const MallocSiteHashtableEntry**)&_next, (const MallocSiteHashtableEntry*)NULL) == NULL; 263 } | 241 assert(_lock_state != ExclusiveLock, "Can only call once"); 242 assert(*_lock >= 0, "Can not content exclusive lock"); 243 244 // make counter negative to block out shared locks 245 do { 246 val = *_lock; 247 target = _MAGIC_ + *_lock; 248 } while (Atomic::cmpxchg(target, _lock, val) != val); 249 250 // wait for all readers to exit 251 while (*_lock != _MAGIC_) { 252 #ifdef _WINDOWS 253 os::naked_short_sleep(1); 254 #else 255 os::naked_yield(); 256 #endif 257 } 258 _lock_state = ExclusiveLock; 259 } 260 261 bool MallocSiteHashtableEntry::atomic_insert(MallocSiteHashtableEntry* entry) { 262 return Atomic::cmpxchg(entry, &_next, (MallocSiteHashtableEntry*)NULL) == NULL; 263 } |