111 * This method should not return NULL under normal circumstance.
112 * If NULL is returned, it indicates:
113 * 1. Out of memory, it cannot allocate new hash entry.
114 * 2. Overflow hash bucket.
115 * Under any of above circumstances, caller should handle the situation.
116 */
117 MallocSite* MallocSiteTable::lookup_or_add(const NativeCallStack& key, size_t* bucket_idx,
118 size_t* pos_idx, MEMFLAGS flags) {
119 assert(flags != mtNone, "Should have a real memory type");
120 unsigned int index = hash_to_index(key.hash());
121 *bucket_idx = (size_t)index;
122 *pos_idx = 0;
123
124 // First entry for this hash bucket
125 if (_table[index] == NULL) {
126 MallocSiteHashtableEntry* entry = new_entry(key, flags);
127 // OOM check
128 if (entry == NULL) return NULL;
129
130 // swap in the head
131 if (Atomic::replace_if_null(entry, &_table[index])) {
132 return entry->data();
133 }
134
135 delete entry;
136 }
137
138 MallocSiteHashtableEntry* head = _table[index];
139 while (head != NULL && (*pos_idx) <= MAX_BUCKET_LENGTH) {
140 MallocSite* site = head->data();
141 if (site->flag() == flags && site->equals(key)) {
142 return head->data();
143 }
144
145 if (head->next() == NULL && (*pos_idx) < MAX_BUCKET_LENGTH) {
146 MallocSiteHashtableEntry* entry = new_entry(key, flags);
147 // OOM check
148 if (entry == NULL) return NULL;
149 if (head->atomic_insert(entry)) {
150 (*pos_idx) ++;
151 return entry->data();
212 AccessLock locker(&_access_count);
213 if (locker.sharedLock()) {
214 NOT_PRODUCT(_peak_count = MAX2(_peak_count, _access_count);)
215 return walk(walker);
216 }
217 return false;
218 }
219
220
221 void MallocSiteTable::AccessLock::exclusiveLock() {
222 int target;
223 int val;
224
225 assert(_lock_state != ExclusiveLock, "Can only call once");
226 assert(*_lock >= 0, "Can not content exclusive lock");
227
228 // make counter negative to block out shared locks
229 do {
230 val = *_lock;
231 target = _MAGIC_ + *_lock;
232 } while (Atomic::cmpxchg(target, _lock, val) != val);
233
234 // wait for all readers to exit
235 while (*_lock != _MAGIC_) {
236 #ifdef _WINDOWS
237 os::naked_short_sleep(1);
238 #else
239 os::naked_yield();
240 #endif
241 }
242 _lock_state = ExclusiveLock;
243 }
244
245 bool MallocSiteHashtableEntry::atomic_insert(MallocSiteHashtableEntry* entry) {
246 return Atomic::replace_if_null(entry, &_next);
247 }
|
111 * This method should not return NULL under normal circumstance.
112 * If NULL is returned, it indicates:
113 * 1. Out of memory, it cannot allocate new hash entry.
114 * 2. Overflow hash bucket.
115 * Under any of above circumstances, caller should handle the situation.
116 */
117 MallocSite* MallocSiteTable::lookup_or_add(const NativeCallStack& key, size_t* bucket_idx,
118 size_t* pos_idx, MEMFLAGS flags) {
119 assert(flags != mtNone, "Should have a real memory type");
120 unsigned int index = hash_to_index(key.hash());
121 *bucket_idx = (size_t)index;
122 *pos_idx = 0;
123
124 // First entry for this hash bucket
125 if (_table[index] == NULL) {
126 MallocSiteHashtableEntry* entry = new_entry(key, flags);
127 // OOM check
128 if (entry == NULL) return NULL;
129
130 // swap in the head
131 if (Atomic::replace_if_null(&_table[index], entry)) {
132 return entry->data();
133 }
134
135 delete entry;
136 }
137
138 MallocSiteHashtableEntry* head = _table[index];
139 while (head != NULL && (*pos_idx) <= MAX_BUCKET_LENGTH) {
140 MallocSite* site = head->data();
141 if (site->flag() == flags && site->equals(key)) {
142 return head->data();
143 }
144
145 if (head->next() == NULL && (*pos_idx) < MAX_BUCKET_LENGTH) {
146 MallocSiteHashtableEntry* entry = new_entry(key, flags);
147 // OOM check
148 if (entry == NULL) return NULL;
149 if (head->atomic_insert(entry)) {
150 (*pos_idx) ++;
151 return entry->data();
212 AccessLock locker(&_access_count);
213 if (locker.sharedLock()) {
214 NOT_PRODUCT(_peak_count = MAX2(_peak_count, _access_count);)
215 return walk(walker);
216 }
217 return false;
218 }
219
220
221 void MallocSiteTable::AccessLock::exclusiveLock() {
222 int target;
223 int val;
224
225 assert(_lock_state != ExclusiveLock, "Can only call once");
226 assert(*_lock >= 0, "Can not content exclusive lock");
227
228 // make counter negative to block out shared locks
229 do {
230 val = *_lock;
231 target = _MAGIC_ + *_lock;
232 } while (Atomic::cmpxchg(_lock, val, target) != val);
233
234 // wait for all readers to exit
235 while (*_lock != _MAGIC_) {
236 #ifdef _WINDOWS
237 os::naked_short_sleep(1);
238 #else
239 os::naked_yield();
240 #endif
241 }
242 _lock_state = ExclusiveLock;
243 }
244
245 bool MallocSiteHashtableEntry::atomic_insert(MallocSiteHashtableEntry* entry) {
246 return Atomic::replace_if_null(&_next, entry);
247 }
|