< prev index next >

src/share/vm/services/mallocSiteTable.cpp

Print this page




 165       MallocSiteHashtableEntry* entry = new_entry(key, flags);
 166       // OOM check
 167       if (entry == NULL) return NULL;
 168       if (head->atomic_insert(entry)) {
 169         (*pos_idx) ++;
 170         return entry->data();
 171       }
 172       // contended, other thread won
 173       delete entry;
 174     }
 175     head = (MallocSiteHashtableEntry*)head->next();
 176     (*pos_idx) ++;
 177   }
 178   return NULL;
 179 }
 180 
 181 // Access malloc site
 182 MallocSite* MallocSiteTable::malloc_site(size_t bucket_idx, size_t pos_idx) {
 183   assert(bucket_idx < table_size, "Invalid bucket index");
 184   MallocSiteHashtableEntry* head = _table[bucket_idx];
 185   for (size_t index = 0; index < pos_idx && head != NULL;
 186     index ++, head = (MallocSiteHashtableEntry*)head->next());

 187   assert(head != NULL, "Invalid position index");
 188   return head->data();
 189 }
 190 
 191 // Allocates MallocSiteHashtableEntry object. Special call stack
 192 // (pre-installed allocation site) has to be used to avoid infinite
 193 // recursion.
 194 MallocSiteHashtableEntry* MallocSiteTable::new_entry(const NativeCallStack& key, MEMFLAGS flags) {
 195   void* p = AllocateHeap(sizeof(MallocSiteHashtableEntry), mtNMT,
 196     *hash_entry_allocation_stack(), AllocFailStrategy::RETURN_NULL);
 197   return ::new (p) MallocSiteHashtableEntry(key, flags);
 198 }
 199 
 200 void MallocSiteTable::reset() {
 201   for (int index = 0; index < table_size; index ++) {
 202     MallocSiteHashtableEntry* head = _table[index];
 203     _table[index] = NULL;
 204     delete_linked_list(head);
 205   }
 206 }


 239 
 240   assert(_lock_state != ExclusiveLock, "Can only call once");
 241   assert(*_lock >= 0, "Can not content exclusive lock");
 242 
 243   // make counter negative to block out shared locks
 244   do {
 245     val = *_lock;
 246     target = _MAGIC_ + *_lock;
 247   } while (Atomic::cmpxchg(target, _lock, val) != val);
 248 
 249   // wait for all readers to exit
 250   while (*_lock != _MAGIC_) {
 251 #ifdef _WINDOWS
 252     os::naked_short_sleep(1);
 253 #else
 254     os::naked_yield();
 255 #endif
 256   }
 257   _lock_state = ExclusiveLock;
 258 }
 259 
 260 


 165       MallocSiteHashtableEntry* entry = new_entry(key, flags);
 166       // OOM check
 167       if (entry == NULL) return NULL;
 168       if (head->atomic_insert(entry)) {
 169         (*pos_idx) ++;
 170         return entry->data();
 171       }
 172       // contended, other thread won
 173       delete entry;
 174     }
 175     head = (MallocSiteHashtableEntry*)head->next();
 176     (*pos_idx) ++;
 177   }
 178   return NULL;
 179 }
 180 
 181 // Access malloc site
 182 MallocSite* MallocSiteTable::malloc_site(size_t bucket_idx, size_t pos_idx) {
 183   assert(bucket_idx < table_size, "Invalid bucket index");
 184   MallocSiteHashtableEntry* head = _table[bucket_idx];
 185   for (size_t index = 0;
 186        index < pos_idx && head != NULL;
 187        index++, head = (MallocSiteHashtableEntry*)head->next()) {}
 188   assert(head != NULL, "Invalid position index");
 189   return head->data();
 190 }
 191 
 192 // Allocates MallocSiteHashtableEntry object. Special call stack
 193 // (pre-installed allocation site) has to be used to avoid infinite
 194 // recursion.
 195 MallocSiteHashtableEntry* MallocSiteTable::new_entry(const NativeCallStack& key, MEMFLAGS flags) {
 196   void* p = AllocateHeap(sizeof(MallocSiteHashtableEntry), mtNMT,
 197     *hash_entry_allocation_stack(), AllocFailStrategy::RETURN_NULL);
 198   return ::new (p) MallocSiteHashtableEntry(key, flags);
 199 }
 200 
 201 void MallocSiteTable::reset() {
 202   for (int index = 0; index < table_size; index ++) {
 203     MallocSiteHashtableEntry* head = _table[index];
 204     _table[index] = NULL;
 205     delete_linked_list(head);
 206   }
 207 }


 240 
 241   assert(_lock_state != ExclusiveLock, "Can only call once");
 242   assert(*_lock >= 0, "Can not content exclusive lock");
 243 
 244   // make counter negative to block out shared locks
 245   do {
 246     val = *_lock;
 247     target = _MAGIC_ + *_lock;
 248   } while (Atomic::cmpxchg(target, _lock, val) != val);
 249 
 250   // wait for all readers to exit
 251   while (*_lock != _MAGIC_) {
 252 #ifdef _WINDOWS
 253     os::naked_short_sleep(1);
 254 #else
 255     os::naked_yield();
 256 #endif
 257   }
 258   _lock_state = ExclusiveLock;
 259 }


< prev index next >