1 /* 2 * Copyright (c) 2014, 2018, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 #include "precompiled.hpp" 25 26 27 #include "memory/allocation.inline.hpp" 28 #include "runtime/atomic.hpp" 29 #include "services/mallocSiteTable.hpp" 30 31 // Malloc site hashtable buckets 32 MallocSiteHashtableEntry* MallocSiteTable::_table[MallocSiteTable::table_size]; 33 const NativeCallStack* MallocSiteTable::_hash_entry_allocation_stack = NULL; 34 const MallocSiteHashtableEntry* MallocSiteTable::_hash_entry_allocation_site = NULL; 35 36 // concurrent access counter 37 volatile int MallocSiteTable::_access_count = 0; 38 39 // Tracking hashtable contention 40 NOT_PRODUCT(int MallocSiteTable::_peak_count = 0;) 41 42 43 /* 44 * Initialize malloc site table. 45 * Hashtable entry is malloc'd, so it can cause infinite recursion. 46 * To avoid above problem, we pre-initialize a hash entry for 47 * this allocation site. 48 * The method is called during C runtime static variable initialization 49 * time, it is in single-threaded mode from JVM perspective. 50 */ 51 bool MallocSiteTable::initialize() { 52 assert((size_t)table_size <= MAX_MALLOCSITE_TABLE_SIZE, "Hashtable overflow"); 53 54 // Fake the call stack for hashtable entry allocation 55 assert(NMT_TrackingStackDepth > 1, "At least one tracking stack"); 56 57 // Create pseudo call stack for hashtable entry allocation 58 address pc[3]; 59 if (NMT_TrackingStackDepth >= 3) { 60 pc[2] = (address)MallocSiteTable::allocation_at; 61 } 62 if (NMT_TrackingStackDepth >= 2) { 63 pc[1] = (address)MallocSiteTable::lookup_or_add; 64 } 65 pc[0] = (address)MallocSiteTable::new_entry; 66 67 static const NativeCallStack stack(pc, MIN2(((int)(sizeof(pc) / sizeof(address))), ((int)NMT_TrackingStackDepth))); 68 static const MallocSiteHashtableEntry entry(stack, mtNMT); 69 70 assert(_hash_entry_allocation_stack == NULL && 71 _hash_entry_allocation_site == NULL, 72 "Already initailized"); 73 74 _hash_entry_allocation_stack = &stack; 75 _hash_entry_allocation_site = &entry; 76 77 // Add the allocation site to hashtable. 78 int index = hash_to_index(stack.hash()); 79 _table[index] = const_cast<MallocSiteHashtableEntry*>(&entry); 80 81 return true; 82 } 83 84 // Walks entries in the hashtable. 85 // It stops walk if the walker returns false. 86 bool MallocSiteTable::walk(MallocSiteWalker* walker) { 87 MallocSiteHashtableEntry* head; 88 for (int index = 0; index < table_size; index ++) { 89 head = _table[index]; 90 while (head != NULL) { 91 if (!walker->do_malloc_site(head->peek())) { 92 return false; 93 } 94 head = (MallocSiteHashtableEntry*)head->next(); 95 } 96 } 97 return true; 98 } 99 100 /* 101 * The hashtable does not have deletion policy on individual entry, 102 * and each linked list node is inserted via compare-and-swap, 103 * so each linked list is stable, the contention only happens 104 * at the end of linked list. 105 * This method should not return NULL under normal circumstance. 106 * If NULL is returned, it indicates: 107 * 1. Out of memory, it cannot allocate new hash entry. 108 * 2. Overflow hash bucket. 109 * Under any of above circumstances, caller should handle the situation. 110 */ 111 MallocSite* MallocSiteTable::lookup_or_add(const NativeCallStack& key, size_t* bucket_idx, 112 size_t* pos_idx, MEMFLAGS flags) { 113 assert(flags != mtNone, "Should have a real memory type"); 114 unsigned int index = hash_to_index(key.hash()); 115 *bucket_idx = (size_t)index; 116 *pos_idx = 0; 117 118 // First entry for this hash bucket 119 if (_table[index] == NULL) { 120 MallocSiteHashtableEntry* entry = new_entry(key, flags); 121 // OOM check 122 if (entry == NULL) return NULL; 123 124 // swap in the head 125 if (Atomic::replace_if_null(entry, &_table[index])) { 126 return entry->data(); 127 } 128 129 delete entry; 130 } 131 132 MallocSiteHashtableEntry* head = _table[index]; 133 while (head != NULL && (*pos_idx) <= MAX_BUCKET_LENGTH) { 134 MallocSite* site = head->data(); 135 if (site->flags() == flags && site->equals(key)) { 136 return head->data(); 137 } 138 139 if (head->next() == NULL && (*pos_idx) < MAX_BUCKET_LENGTH) { 140 MallocSiteHashtableEntry* entry = new_entry(key, flags); 141 // OOM check 142 if (entry == NULL) return NULL; 143 if (head->atomic_insert(entry)) { 144 (*pos_idx) ++; 145 return entry->data(); 146 } 147 // contended, other thread won 148 delete entry; 149 } 150 head = (MallocSiteHashtableEntry*)head->next(); 151 (*pos_idx) ++; 152 } 153 return NULL; 154 } 155 156 // Access malloc site 157 MallocSite* MallocSiteTable::malloc_site(size_t bucket_idx, size_t pos_idx) { 158 assert(bucket_idx < table_size, "Invalid bucket index"); 159 MallocSiteHashtableEntry* head = _table[bucket_idx]; 160 for (size_t index = 0; 161 index < pos_idx && head != NULL; 162 index++, head = (MallocSiteHashtableEntry*)head->next()) {} 163 assert(head != NULL, "Invalid position index"); 164 return head->data(); 165 } 166 167 // Allocates MallocSiteHashtableEntry object. Special call stack 168 // (pre-installed allocation site) has to be used to avoid infinite 169 // recursion. 170 MallocSiteHashtableEntry* MallocSiteTable::new_entry(const NativeCallStack& key, MEMFLAGS flags) { 171 void* p = AllocateHeap(sizeof(MallocSiteHashtableEntry), mtNMT, 172 *hash_entry_allocation_stack(), AllocFailStrategy::RETURN_NULL); 173 return ::new (p) MallocSiteHashtableEntry(key, flags); 174 } 175 176 void MallocSiteTable::reset() { 177 for (int index = 0; index < table_size; index ++) { 178 MallocSiteHashtableEntry* head = _table[index]; 179 _table[index] = NULL; 180 delete_linked_list(head); 181 } 182 183 _hash_entry_allocation_stack = NULL; 184 _hash_entry_allocation_site = NULL; 185 } 186 187 void MallocSiteTable::delete_linked_list(MallocSiteHashtableEntry* head) { 188 MallocSiteHashtableEntry* p; 189 while (head != NULL) { 190 p = head; 191 head = (MallocSiteHashtableEntry*)head->next(); 192 if (p != hash_entry_allocation_site()) { 193 delete p; 194 } 195 } 196 } 197 198 void MallocSiteTable::shutdown() { 199 AccessLock locker(&_access_count); 200 locker.exclusiveLock(); 201 reset(); 202 } 203 204 bool MallocSiteTable::walk_malloc_site(MallocSiteWalker* walker) { 205 assert(walker != NULL, "NuLL walker"); 206 AccessLock locker(&_access_count); 207 if (locker.sharedLock()) { 208 NOT_PRODUCT(_peak_count = MAX2(_peak_count, _access_count);) 209 return walk(walker); 210 } 211 return false; 212 } 213 214 215 void MallocSiteTable::AccessLock::exclusiveLock() { 216 int target; 217 int val; 218 219 assert(_lock_state != ExclusiveLock, "Can only call once"); 220 assert(*_lock >= 0, "Can not content exclusive lock"); 221 222 // make counter negative to block out shared locks 223 do { 224 val = *_lock; 225 target = _MAGIC_ + *_lock; 226 } while (Atomic::cmpxchg(target, _lock, val) != val); 227 228 // wait for all readers to exit 229 while (*_lock != _MAGIC_) { 230 #ifdef _WINDOWS 231 os::naked_short_sleep(1); 232 #else 233 os::naked_yield(); 234 #endif 235 } 236 _lock_state = ExclusiveLock; 237 } 238 239 bool MallocSiteHashtableEntry::atomic_insert(MallocSiteHashtableEntry* entry) { 240 return Atomic::replace_if_null(entry, &_next); 241 }