1 /*
2 * Copyright (c) 2014, 2017, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24 #include "precompiled.hpp"
25
26
27 #include "memory/allocation.inline.hpp"
28 #include "runtime/atomic.hpp"
29 #include "services/mallocSiteTable.hpp"
30
31 /*
32 * Early os::malloc() calls come from initializations of static variables, long before entering any
33 * VM code. Upon the arrival of the first os::malloc() call, malloc site hashtable has to be
34 * initialized, along with the allocation site for the hashtable entries.
35 * To ensure that malloc site hashtable can be initialized without triggering any additional os::malloc()
36 * call, the hashtable bucket array and hashtable entry allocation site have to be static.
37 * It is not a problem for hashtable bucket, since it is an array of pointer type, C runtime just
38 * allocates a block memory and zero the memory for it.
39 * But for hashtable entry allocation site object, things get tricky. C runtime not only allocates
40 * memory for it, but also calls its constructor at some later time. If we initialize the allocation site
41 * at the first os::malloc() call, the object will be reinitialized when its constructor is called
42 * by C runtime.
43 * To workaround above issue, we declare a static size_t array with the size of the CallsiteHashtableEntry,
44 * the memory is used to instantiate CallsiteHashtableEntry for the hashtable entry allocation site.
45 * Given it is a primitive type array, C runtime will do nothing other than assign the memory block for the variable,
46 * which is exactly what we want.
47 * The same trick is also applied to create NativeCallStack object for CallsiteHashtableEntry memory allocation.
48 *
49 * Note: C++ object usually aligns to particular alignment, depends on compiler implementation, we declare
50 * the memory as size_t arrays, to ensure the memory is aligned to native machine word alignment.
51 */
52
53 // Reserve enough memory for NativeCallStack and MallocSiteHashtableEntry objects
54 size_t MallocSiteTable::_hash_entry_allocation_stack[CALC_OBJ_SIZE_IN_TYPE(NativeCallStack, size_t)];
55 size_t MallocSiteTable::_hash_entry_allocation_site[CALC_OBJ_SIZE_IN_TYPE(MallocSiteHashtableEntry, size_t)];
56
57 // Malloc site hashtable buckets
58 MallocSiteHashtableEntry* MallocSiteTable::_table[MallocSiteTable::table_size];
59
60 // concurrent access counter
61 volatile int MallocSiteTable::_access_count = 0;
62
63 // Tracking hashtable contention
64 NOT_PRODUCT(int MallocSiteTable::_peak_count = 0;)
65
66
67 /*
68 * Initialize malloc site table.
69 * Hashtable entry is malloc'd, so it can cause infinite recursion.
70 * To avoid above problem, we pre-initialize a hash entry for
71 * this allocation site.
72 * The method is called during C runtime static variable initialization
73 * time, it is in single-threaded mode from JVM perspective.
74 */
75 bool MallocSiteTable::initialize() {
76 assert(sizeof(_hash_entry_allocation_stack) >= sizeof(NativeCallStack), "Sanity Check");
77 assert(sizeof(_hash_entry_allocation_site) >= sizeof(MallocSiteHashtableEntry),
78 "Sanity Check");
79 assert((size_t)table_size <= MAX_MALLOCSITE_TABLE_SIZE, "Hashtable overflow");
80
81 // Fake the call stack for hashtable entry allocation
82 assert(NMT_TrackingStackDepth > 1, "At least one tracking stack");
83
84 // Create pseudo call stack for hashtable entry allocation
85 address pc[3];
86 if (NMT_TrackingStackDepth >= 3) {
87 pc[2] = (address)MallocSiteTable::allocation_at;
88 }
89 if (NMT_TrackingStackDepth >= 2) {
90 pc[1] = (address)MallocSiteTable::lookup_or_add;
91 }
92 pc[0] = (address)MallocSiteTable::new_entry;
93
94 // Instantiate NativeCallStack object, have to use placement new operator. (see comments above)
95 NativeCallStack* stack = ::new ((void*)_hash_entry_allocation_stack)
96 NativeCallStack(pc, MIN2(((int)(sizeof(pc) / sizeof(address))), ((int)NMT_TrackingStackDepth)));
97
98 // Instantiate hash entry for hashtable entry allocation callsite
99 MallocSiteHashtableEntry* entry = ::new ((void*)_hash_entry_allocation_site)
100 MallocSiteHashtableEntry(*stack, mtNMT);
101
102 // Add the allocation site to hashtable.
103 int index = hash_to_index(stack->hash());
104 _table[index] = entry;
105
106 return true;
107 }
108
109 // Walks entries in the hashtable.
110 // It stops walk if the walker returns false.
111 bool MallocSiteTable::walk(MallocSiteWalker* walker) {
112 MallocSiteHashtableEntry* head;
113 for (int index = 0; index < table_size; index ++) {
114 head = _table[index];
115 while (head != NULL) {
116 if (!walker->do_malloc_site(head->peek())) {
117 return false;
118 }
119 head = (MallocSiteHashtableEntry*)head->next();
120 }
121 }
122 return true;
123 }
124
187 index++, head = (MallocSiteHashtableEntry*)head->next()) {}
188 assert(head != NULL, "Invalid position index");
189 return head->data();
190 }
191
192 // Allocates MallocSiteHashtableEntry object. Special call stack
193 // (pre-installed allocation site) has to be used to avoid infinite
194 // recursion.
195 MallocSiteHashtableEntry* MallocSiteTable::new_entry(const NativeCallStack& key, MEMFLAGS flags) {
196 void* p = AllocateHeap(sizeof(MallocSiteHashtableEntry), mtNMT,
197 *hash_entry_allocation_stack(), AllocFailStrategy::RETURN_NULL);
198 return ::new (p) MallocSiteHashtableEntry(key, flags);
199 }
200
201 void MallocSiteTable::reset() {
202 for (int index = 0; index < table_size; index ++) {
203 MallocSiteHashtableEntry* head = _table[index];
204 _table[index] = NULL;
205 delete_linked_list(head);
206 }
207 }
208
209 void MallocSiteTable::delete_linked_list(MallocSiteHashtableEntry* head) {
210 MallocSiteHashtableEntry* p;
211 while (head != NULL) {
212 p = head;
213 head = (MallocSiteHashtableEntry*)head->next();
214 if (p != (MallocSiteHashtableEntry*)_hash_entry_allocation_site) {
215 delete p;
216 }
217 }
218 }
219
220 void MallocSiteTable::shutdown() {
221 AccessLock locker(&_access_count);
222 locker.exclusiveLock();
223 reset();
224 }
225
226 bool MallocSiteTable::walk_malloc_site(MallocSiteWalker* walker) {
227 assert(walker != NULL, "NuLL walker");
228 AccessLock locker(&_access_count);
229 if (locker.sharedLock()) {
230 NOT_PRODUCT(_peak_count = MAX2(_peak_count, _access_count);)
231 return walk(walker);
232 }
233 return false;
234 }
|
1 /*
2 * Copyright (c) 2014, 2018, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24 #include "precompiled.hpp"
25
26
27 #include "memory/allocation.inline.hpp"
28 #include "runtime/atomic.hpp"
29 #include "services/mallocSiteTable.hpp"
30
31 // Malloc site hashtable buckets
32 MallocSiteHashtableEntry* MallocSiteTable::_table[MallocSiteTable::table_size];
33 const NativeCallStack* MallocSiteTable::_hash_entry_allocation_stack = NULL;
34 const MallocSiteHashtableEntry* MallocSiteTable::_hash_entry_allocation_site = NULL;
35
36 // concurrent access counter
37 volatile int MallocSiteTable::_access_count = 0;
38
39 // Tracking hashtable contention
40 NOT_PRODUCT(int MallocSiteTable::_peak_count = 0;)
41
42
43 /*
44 * Initialize malloc site table.
45 * Hashtable entry is malloc'd, so it can cause infinite recursion.
46 * To avoid above problem, we pre-initialize a hash entry for
47 * this allocation site.
48 * The method is called during C runtime static variable initialization
49 * time, it is in single-threaded mode from JVM perspective.
50 */
51 bool MallocSiteTable::initialize() {
52 assert((size_t)table_size <= MAX_MALLOCSITE_TABLE_SIZE, "Hashtable overflow");
53
54 // Fake the call stack for hashtable entry allocation
55 assert(NMT_TrackingStackDepth > 1, "At least one tracking stack");
56
57 // Create pseudo call stack for hashtable entry allocation
58 address pc[3];
59 if (NMT_TrackingStackDepth >= 3) {
60 pc[2] = (address)MallocSiteTable::allocation_at;
61 }
62 if (NMT_TrackingStackDepth >= 2) {
63 pc[1] = (address)MallocSiteTable::lookup_or_add;
64 }
65 pc[0] = (address)MallocSiteTable::new_entry;
66
67 static const NativeCallStack stack(pc, MIN2(((int)(sizeof(pc) / sizeof(address))), ((int)NMT_TrackingStackDepth)));
68 static const MallocSiteHashtableEntry entry(stack, mtNMT);
69
70 assert(_hash_entry_allocation_stack == NULL &&
71 _hash_entry_allocation_site == NULL,
72 "Already initailized");
73
74 _hash_entry_allocation_stack = &stack;
75 _hash_entry_allocation_site = &entry;
76
77 // Add the allocation site to hashtable.
78 int index = hash_to_index(stack.hash());
79 _table[index] = const_cast<MallocSiteHashtableEntry*>(&entry);
80
81 return true;
82 }
83
84 // Walks entries in the hashtable.
85 // It stops walk if the walker returns false.
86 bool MallocSiteTable::walk(MallocSiteWalker* walker) {
87 MallocSiteHashtableEntry* head;
88 for (int index = 0; index < table_size; index ++) {
89 head = _table[index];
90 while (head != NULL) {
91 if (!walker->do_malloc_site(head->peek())) {
92 return false;
93 }
94 head = (MallocSiteHashtableEntry*)head->next();
95 }
96 }
97 return true;
98 }
99
162 index++, head = (MallocSiteHashtableEntry*)head->next()) {}
163 assert(head != NULL, "Invalid position index");
164 return head->data();
165 }
166
167 // Allocates MallocSiteHashtableEntry object. Special call stack
168 // (pre-installed allocation site) has to be used to avoid infinite
169 // recursion.
170 MallocSiteHashtableEntry* MallocSiteTable::new_entry(const NativeCallStack& key, MEMFLAGS flags) {
171 void* p = AllocateHeap(sizeof(MallocSiteHashtableEntry), mtNMT,
172 *hash_entry_allocation_stack(), AllocFailStrategy::RETURN_NULL);
173 return ::new (p) MallocSiteHashtableEntry(key, flags);
174 }
175
176 void MallocSiteTable::reset() {
177 for (int index = 0; index < table_size; index ++) {
178 MallocSiteHashtableEntry* head = _table[index];
179 _table[index] = NULL;
180 delete_linked_list(head);
181 }
182
183 _hash_entry_allocation_stack = NULL;
184 _hash_entry_allocation_site = NULL;
185 }
186
187 void MallocSiteTable::delete_linked_list(MallocSiteHashtableEntry* head) {
188 MallocSiteHashtableEntry* p;
189 while (head != NULL) {
190 p = head;
191 head = (MallocSiteHashtableEntry*)head->next();
192 if (p != hash_entry_allocation_site()) {
193 delete p;
194 }
195 }
196 }
197
198 void MallocSiteTable::shutdown() {
199 AccessLock locker(&_access_count);
200 locker.exclusiveLock();
201 reset();
202 }
203
204 bool MallocSiteTable::walk_malloc_site(MallocSiteWalker* walker) {
205 assert(walker != NULL, "NuLL walker");
206 AccessLock locker(&_access_count);
207 if (locker.sharedLock()) {
208 NOT_PRODUCT(_peak_count = MAX2(_peak_count, _access_count);)
209 return walk(walker);
210 }
211 return false;
212 }
|