< prev index next >

src/hotspot/share/services/mallocSiteTable.cpp

Print this page




 130  *  This method should not return NULL under normal circumstance.
 131  *  If NULL is returned, it indicates:
 132  *    1. Out of memory, it cannot allocate new hash entry.
 133  *    2. Overflow hash bucket.
 134  *  Under any of above circumstances, caller should handle the situation.
 135  */
 136 MallocSite* MallocSiteTable::lookup_or_add(const NativeCallStack& key, size_t* bucket_idx,
 137   size_t* pos_idx, MEMFLAGS flags) {
 138   assert(flags != mtNone, "Should have a real memory type");
 139   unsigned int index = hash_to_index(key.hash());
 140   *bucket_idx = (size_t)index;
 141   *pos_idx = 0;
 142 
 143   // First entry for this hash bucket
 144   if (_table[index] == NULL) {
 145     MallocSiteHashtableEntry* entry = new_entry(key, flags);
 146     // OOM check
 147     if (entry == NULL) return NULL;
 148 
 149     // swap in the head
 150     if (Atomic::cmpxchg_ptr((void*)entry, (volatile void *)&_table[index], NULL) == NULL) {
 151       return entry->data();
 152     }
 153 
 154     delete entry;
 155   }
 156 
 157   MallocSiteHashtableEntry* head = _table[index];
 158   while (head != NULL && (*pos_idx) <= MAX_BUCKET_LENGTH) {
 159     MallocSite* site = head->data();
 160     if (site->flags() == flags && site->equals(key)) {
 161       return head->data();
 162     }
 163 
 164     if (head->next() == NULL && (*pos_idx) < MAX_BUCKET_LENGTH) {
 165       MallocSiteHashtableEntry* entry = new_entry(key, flags);
 166       // OOM check
 167       if (entry == NULL) return NULL;
 168       if (head->atomic_insert(entry)) {
 169         (*pos_idx) ++;
 170         return entry->data();


 239   jint val;
 240 
 241   assert(_lock_state != ExclusiveLock, "Can only call once");
 242   assert(*_lock >= 0, "Can not content exclusive lock");
 243 
 244   // make counter negative to block out shared locks
 245   do {
 246     val = *_lock;
 247     target = _MAGIC_ + *_lock;
 248   } while (Atomic::cmpxchg(target, _lock, val) != val);
 249 
 250   // wait for all readers to exit
 251   while (*_lock != _MAGIC_) {
 252 #ifdef _WINDOWS
 253     os::naked_short_sleep(1);
 254 #else
 255     os::naked_yield();
 256 #endif
 257   }
 258   _lock_state = ExclusiveLock;




 259 }


 130  *  This method should not return NULL under normal circumstance.
 131  *  If NULL is returned, it indicates:
 132  *    1. Out of memory, it cannot allocate new hash entry.
 133  *    2. Overflow hash bucket.
 134  *  Under any of above circumstances, caller should handle the situation.
 135  */
 136 MallocSite* MallocSiteTable::lookup_or_add(const NativeCallStack& key, size_t* bucket_idx,
 137   size_t* pos_idx, MEMFLAGS flags) {
 138   assert(flags != mtNone, "Should have a real memory type");
 139   unsigned int index = hash_to_index(key.hash());
 140   *bucket_idx = (size_t)index;
 141   *pos_idx = 0;
 142 
 143   // First entry for this hash bucket
 144   if (_table[index] == NULL) {
 145     MallocSiteHashtableEntry* entry = new_entry(key, flags);
 146     // OOM check
 147     if (entry == NULL) return NULL;
 148 
 149     // swap in the head
 150     if (Atomic::cmpxchg(entry, &_table[index], (MallocSiteHashtableEntry*)NULL) == NULL) {
 151       return entry->data();
 152     }
 153 
 154     delete entry;
 155   }
 156 
 157   MallocSiteHashtableEntry* head = _table[index];
 158   while (head != NULL && (*pos_idx) <= MAX_BUCKET_LENGTH) {
 159     MallocSite* site = head->data();
 160     if (site->flags() == flags && site->equals(key)) {
 161       return head->data();
 162     }
 163 
 164     if (head->next() == NULL && (*pos_idx) < MAX_BUCKET_LENGTH) {
 165       MallocSiteHashtableEntry* entry = new_entry(key, flags);
 166       // OOM check
 167       if (entry == NULL) return NULL;
 168       if (head->atomic_insert(entry)) {
 169         (*pos_idx) ++;
 170         return entry->data();


 239   jint val;
 240 
 241   assert(_lock_state != ExclusiveLock, "Can only call once");
 242   assert(*_lock >= 0, "Can not content exclusive lock");
 243 
 244   // make counter negative to block out shared locks
 245   do {
 246     val = *_lock;
 247     target = _MAGIC_ + *_lock;
 248   } while (Atomic::cmpxchg(target, _lock, val) != val);
 249 
 250   // wait for all readers to exit
 251   while (*_lock != _MAGIC_) {
 252 #ifdef _WINDOWS
 253     os::naked_short_sleep(1);
 254 #else
 255     os::naked_yield();
 256 #endif
 257   }
 258   _lock_state = ExclusiveLock;
 259 }
 260 
 261 bool MallocSiteHashtableEntry::atomic_insert(MallocSiteHashtableEntry* entry) {
 262   return Atomic::cmpxchg(entry, &_next, (MallocSiteHashtableEntry*)NULL) == NULL;
 263 }
< prev index next >