rev 47400 : [mq]: cmpxchg_ptr
1 /*
2 * Copyright (c) 2014, 2017, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #ifndef SHARE_VM_SERVICES_MALLOC_SITE_TABLE_HPP
26 #define SHARE_VM_SERVICES_MALLOC_SITE_TABLE_HPP
27
28 #if INCLUDE_NMT
29
30 #include "memory/allocation.hpp"
31 #include "runtime/atomic.hpp"
32 #include "services/allocationSite.hpp"
33 #include "services/mallocTracker.hpp"
34 #include "services/nmtCommon.hpp"
35 #include "utilities/nativeCallStack.hpp"
36
37 // MallocSite represents a code path that eventually calls
38 // os::malloc() to allocate memory
39 class MallocSite : public AllocationSite<MemoryCounter> {
40 private:
41 MEMFLAGS _flags;
42
43 public:
44 MallocSite() :
45 AllocationSite<MemoryCounter>(NativeCallStack::EMPTY_STACK), _flags(mtNone) {}
46
47 MallocSite(const NativeCallStack& stack, MEMFLAGS flags) :
48 AllocationSite<MemoryCounter>(stack), _flags(flags) {}
49
50
51 void allocate(size_t size) { data()->allocate(size); }
52 void deallocate(size_t size) { data()->deallocate(size); }
53
54 // Memory allocated from this code path
55 size_t size() const { return peek()->size(); }
56 // The number of calls were made
57 size_t count() const { return peek()->count(); }
58 MEMFLAGS flags() const { return (MEMFLAGS)_flags; }
59 };
60
61 // Malloc site hashtable entry
62 class MallocSiteHashtableEntry : public CHeapObj<mtNMT> {
63 private:
64 MallocSite _malloc_site;
65 MallocSiteHashtableEntry* _next;
66
67 public:
68 MallocSiteHashtableEntry() : _next(NULL) { }
69
70 MallocSiteHashtableEntry(NativeCallStack stack, MEMFLAGS flags):
71 _malloc_site(stack, flags), _next(NULL) {
72 assert(flags != mtNone, "Expect a real memory type");
73 }
74
75 inline const MallocSiteHashtableEntry* next() const {
76 return _next;
77 }
78
79 // Insert an entry atomically.
80 // Return true if the entry is inserted successfully.
81 // The operation can be failed due to contention from other thread.
82 bool atomic_insert(const MallocSiteHashtableEntry* entry);
83
84 void set_callsite(const MallocSite& site) {
85 _malloc_site = site;
86 }
87
88 inline const MallocSite* peek() const { return &_malloc_site; }
89 inline MallocSite* data() { return &_malloc_site; }
90
91 inline long hash() const { return _malloc_site.hash(); }
92 inline bool equals(const NativeCallStack& stack) const {
93 return _malloc_site.equals(stack);
94 }
95 // Allocation/deallocation on this allocation site
96 inline void allocate(size_t size) { _malloc_site.allocate(size); }
97 inline void deallocate(size_t size) { _malloc_site.deallocate(size); }
98 // Memory counters
99 inline size_t size() const { return _malloc_site.size(); }
100 inline size_t count() const { return _malloc_site.count(); }
101 };
102
103 // The walker walks every entry on MallocSiteTable
104 class MallocSiteWalker : public StackObj {
105 public:
106 virtual bool do_malloc_site(const MallocSite* e) { return false; }
107 };
108
109 /*
110 * Native memory tracking call site table.
111 * The table is only needed when detail tracking is enabled.
112 */
113 class MallocSiteTable : AllStatic {
114 private:
115 // The number of hash bucket in this hashtable. The number should
116 // be tuned if malloc activities changed significantly.
117 // The statistics data can be obtained via Jcmd
118 // jcmd <pid> VM.native_memory statistics.
119
120 // Currently, (number of buckets / number of entires) ratio is
121 // about 1 / 6
122 enum {
123 table_base_size = 128, // The base size is calculated from statistics to give
124 // table ratio around 1:6
125 table_size = (table_base_size * NMT_TrackingStackDepth - 1)
126 };
127
128
129 // This is a very special lock, that allows multiple shared accesses (sharedLock), but
130 // once exclusive access (exclusiveLock) is requested, all shared accesses are
131 // rejected forever.
132 class AccessLock : public StackObj {
133 enum LockState {
134 NoLock,
135 SharedLock,
136 ExclusiveLock
137 };
138
139 private:
140 // A very large negative number. The only possibility to "overflow"
141 // this number is when there are more than -min_jint threads in
142 // this process, which is not going to happen in foreseeable future.
143 const static int _MAGIC_ = min_jint;
144
145 LockState _lock_state;
146 volatile int* _lock;
147 public:
148 AccessLock(volatile int* lock) :
149 _lock(lock), _lock_state(NoLock) {
150 }
151
152 ~AccessLock() {
153 if (_lock_state == SharedLock) {
154 Atomic::dec((volatile jint*)_lock);
155 }
156 }
157 // Acquire shared lock.
158 // Return true if shared access is granted.
159 inline bool sharedLock() {
160 jint res = Atomic::add(1, _lock);
161 if (res < 0) {
162 Atomic::add(-1, _lock);
163 return false;
164 }
165 _lock_state = SharedLock;
166 return true;
167 }
168 // Acquire exclusive lock
169 void exclusiveLock();
170 };
171
172 public:
173 static bool initialize();
174 static void shutdown();
175
176 NOT_PRODUCT(static int access_peak_count() { return _peak_count; })
177
178 // Number of hash buckets
179 static inline int hash_buckets() { return (int)table_size; }
180
181 // Access and copy a call stack from this table. Shared lock should be
182 // acquired before access the entry.
183 static inline bool access_stack(NativeCallStack& stack, size_t bucket_idx,
184 size_t pos_idx) {
185 AccessLock locker(&_access_count);
186 if (locker.sharedLock()) {
187 NOT_PRODUCT(_peak_count = MAX2(_peak_count, _access_count);)
188 MallocSite* site = malloc_site(bucket_idx, pos_idx);
189 if (site != NULL) {
190 stack = *site->call_stack();
191 return true;
192 }
193 }
194 return false;
195 }
196
197 // Record a new allocation from specified call path.
198 // Return true if the allocation is recorded successfully, bucket_idx
199 // and pos_idx are also updated to indicate the entry where the allocation
200 // information was recorded.
201 // Return false only occurs under rare scenarios:
202 // 1. out of memory
203 // 2. overflow hash bucket
204 static inline bool allocation_at(const NativeCallStack& stack, size_t size,
205 size_t* bucket_idx, size_t* pos_idx, MEMFLAGS flags) {
206 AccessLock locker(&_access_count);
207 if (locker.sharedLock()) {
208 NOT_PRODUCT(_peak_count = MAX2(_peak_count, _access_count);)
209 MallocSite* site = lookup_or_add(stack, bucket_idx, pos_idx, flags);
210 if (site != NULL) site->allocate(size);
211 return site != NULL;
212 }
213 return false;
214 }
215
216 // Record memory deallocation. bucket_idx and pos_idx indicate where the allocation
217 // information was recorded.
218 static inline bool deallocation_at(size_t size, size_t bucket_idx, size_t pos_idx) {
219 AccessLock locker(&_access_count);
220 if (locker.sharedLock()) {
221 NOT_PRODUCT(_peak_count = MAX2(_peak_count, _access_count);)
222 MallocSite* site = malloc_site(bucket_idx, pos_idx);
223 if (site != NULL) {
224 site->deallocate(size);
225 return true;
226 }
227 }
228 return false;
229 }
230
231 // Walk this table.
232 static bool walk_malloc_site(MallocSiteWalker* walker);
233
234 private:
235 static MallocSiteHashtableEntry* new_entry(const NativeCallStack& key, MEMFLAGS flags);
236 static void reset();
237
238 // Delete a bucket linked list
239 static void delete_linked_list(MallocSiteHashtableEntry* head);
240
241 static MallocSite* lookup_or_add(const NativeCallStack& key, size_t* bucket_idx, size_t* pos_idx, MEMFLAGS flags);
242 static MallocSite* malloc_site(size_t bucket_idx, size_t pos_idx);
243 static bool walk(MallocSiteWalker* walker);
244
245 static inline unsigned int hash_to_index(unsigned int hash) {
246 return (hash % table_size);
247 }
248
249 static inline const NativeCallStack* hash_entry_allocation_stack() {
250 return (NativeCallStack*)_hash_entry_allocation_stack;
251 }
252
253 private:
254 // Counter for counting concurrent access
255 static volatile int _access_count;
256
257 // The callsite hashtable. It has to be a static table,
258 // since malloc call can come from C runtime linker.
259 static MallocSiteHashtableEntry* _table[table_size];
260
261
262 // Reserve enough memory for placing the objects
263
264 // The memory for hashtable entry allocation stack object
265 static size_t _hash_entry_allocation_stack[CALC_OBJ_SIZE_IN_TYPE(NativeCallStack, size_t)];
266 // The memory for hashtable entry allocation callsite object
267 static size_t _hash_entry_allocation_site[CALC_OBJ_SIZE_IN_TYPE(MallocSiteHashtableEntry, size_t)];
268 NOT_PRODUCT(static int _peak_count;)
269 };
270
271 #endif // INCLUDE_NMT
272 #endif // SHARE_VM_SERVICES_MALLOC_SITE_TABLE_HPP
--- EOF ---