1 /*
2 * Copyright (c) 2018, 2019, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25
26 #include "precompiled.hpp"
27 #include "logging/log.hpp"
28 #include "logging/logStream.hpp"
29 #include "memory/metaspace.hpp"
30 #include "memory/metaspace/chunkManager.hpp"
31 #include "memory/metaspace/metachunk.hpp"
32 #include "memory/metaspace/metaspaceCommon.hpp"
33 #include "memory/metaspace/virtualSpaceList.hpp"
34 #include "memory/metaspace/virtualSpaceNode.hpp"
35 #include "runtime/atomic.hpp"
36 #include "runtime/orderAccess.hpp"
37 #include "runtime/mutexLocker.hpp"
38 #include "runtime/safepoint.hpp"
39
40 namespace metaspace {
41
42
43 VirtualSpaceList::~VirtualSpaceList() {
44 VirtualSpaceListIterator iter(virtual_space_list());
45 while (iter.repeat()) {
46 VirtualSpaceNode* vsl = iter.get_next();
47 delete vsl;
48 }
49 }
50
51 void VirtualSpaceList::inc_reserved_words(size_t v) {
52 assert_lock_strong(MetaspaceExpand_lock);
53 _reserved_words = _reserved_words + v;
54 }
55 void VirtualSpaceList::dec_reserved_words(size_t v) {
56 assert_lock_strong(MetaspaceExpand_lock);
57 _reserved_words = _reserved_words - v;
58 }
59
60 #define assert_committed_below_limit() \
61 assert(MetaspaceUtils::committed_bytes() <= MaxMetaspaceSize, \
62 "Too much committed memory. Committed: " SIZE_FORMAT \
63 " limit (MaxMetaspaceSize): " SIZE_FORMAT, \
64 MetaspaceUtils::committed_bytes(), MaxMetaspaceSize);
65
66 void VirtualSpaceList::inc_committed_words(size_t v) {
67 assert_lock_strong(MetaspaceExpand_lock);
68 _committed_words = _committed_words + v;
69
70 assert_committed_below_limit();
71 }
72 void VirtualSpaceList::dec_committed_words(size_t v) {
73 assert_lock_strong(MetaspaceExpand_lock);
74 _committed_words = _committed_words - v;
75
76 assert_committed_below_limit();
77 }
78
79 void VirtualSpaceList::inc_virtual_space_count() {
80 assert_lock_strong(MetaspaceExpand_lock);
81 _virtual_space_count++;
82 }
83
84 void VirtualSpaceList::dec_virtual_space_count() {
85 assert_lock_strong(MetaspaceExpand_lock);
86 _virtual_space_count--;
87 }
88
89 // Walk the list of VirtualSpaceNodes and delete
90 // nodes with a 0 container_count. Remove Metachunks in
91 // the node from their respective freelists.
92 void VirtualSpaceList::purge(ChunkManager* chunk_manager) {
93 assert_lock_strong(MetaspaceExpand_lock);
94 // Don't use a VirtualSpaceListIterator because this
95 // list is being changed and a straightforward use of an iterator is not safe.
96 VirtualSpaceNode* prev_vsl = virtual_space_list();
97 VirtualSpaceNode* next_vsl = prev_vsl;
98 int num_purged_nodes = 0;
99 while (next_vsl != NULL) {
100 VirtualSpaceNode* vsl = next_vsl;
101 DEBUG_ONLY(vsl->verify(false);)
102 next_vsl = vsl->next();
103 // Don't free the current virtual space since it will likely
104 // be needed soon.
105 if (vsl->container_count() == 0 && vsl != current_virtual_space()) {
106 log_trace(gc, metaspace, freelist)("Purging VirtualSpaceNode " PTR_FORMAT " (capacity: " SIZE_FORMAT
107 ", used: " SIZE_FORMAT ").", p2i(vsl), vsl->capacity_words_in_vs(), vsl->used_words_in_vs());
108 DEBUG_ONLY(Atomic::inc(&g_internal_statistics.num_vsnodes_purged));
109 // Unlink it from the list
110 if (prev_vsl == vsl) {
111 // This is the case of the current node being the first node.
112 assert(vsl == virtual_space_list(), "Expected to be the first node");
113 set_virtual_space_list(vsl->next());
114 } else {
115 prev_vsl->set_next(vsl->next());
116 }
117
118 vsl->purge(chunk_manager);
119 dec_reserved_words(vsl->reserved_words());
120 dec_committed_words(vsl->committed_words());
121 dec_virtual_space_count();
122 delete vsl;
123 num_purged_nodes ++;
124 } else {
125 prev_vsl = vsl;
126 }
127 }
128
129 // Verify list
130 #ifdef ASSERT
131 if (num_purged_nodes > 0) {
132 verify(false);
133 }
134 #endif
135 }
136
137
138 // This function looks at the mmap regions in the metaspace without locking.
139 // The chunks are added with store ordering and not deleted except for at
140 // unloading time during a safepoint.
141 VirtualSpaceNode* VirtualSpaceList::find_enclosing_space(const void* ptr) {
142 // List should be stable enough to use an iterator here because removing virtual
143 // space nodes is only allowed at a safepoint.
144 if (is_within_envelope((address)ptr)) {
145 VirtualSpaceListIterator iter(virtual_space_list());
146 while (iter.repeat()) {
147 VirtualSpaceNode* vsn = iter.get_next();
148 if (vsn->contains(ptr)) {
149 return vsn;
150 }
151 }
152 }
153 return NULL;
154 }
155
156 void VirtualSpaceList::retire_current_virtual_space() {
157 assert_lock_strong(MetaspaceExpand_lock);
158
159 VirtualSpaceNode* vsn = current_virtual_space();
160
161 ChunkManager* cm = is_class() ? Metaspace::chunk_manager_class() :
162 Metaspace::chunk_manager_metadata();
163
164 vsn->retire(cm);
165 }
166
167 VirtualSpaceList::VirtualSpaceList(size_t word_size) :
168 _virtual_space_list(NULL),
169 _current_virtual_space(NULL),
170 _is_class(false),
171 _reserved_words(0),
172 _committed_words(0),
173 _virtual_space_count(0),
174 _envelope_lo((address)max_uintx),
175 _envelope_hi(NULL) {
176 MutexLocker cl(MetaspaceExpand_lock, Mutex::_no_safepoint_check_flag);
177 create_new_virtual_space(word_size);
178 }
179
180 VirtualSpaceList::VirtualSpaceList(ReservedSpace rs) :
181 _virtual_space_list(NULL),
182 _current_virtual_space(NULL),
183 _is_class(true),
184 _reserved_words(0),
185 _committed_words(0),
186 _virtual_space_count(0),
187 _envelope_lo((address)max_uintx),
188 _envelope_hi(NULL) {
189 MutexLocker cl(MetaspaceExpand_lock, Mutex::_no_safepoint_check_flag);
190 VirtualSpaceNode* class_entry = new VirtualSpaceNode(is_class(), rs);
191 bool succeeded = class_entry->initialize();
192 if (succeeded) {
193 expand_envelope_to_include_node(class_entry);
194 // ensure lock-free iteration sees fully initialized node
195 OrderAccess::storestore();
196 link_vs(class_entry);
197 }
198 }
199
200 size_t VirtualSpaceList::free_bytes() {
201 return current_virtual_space()->free_words_in_vs() * BytesPerWord;
202 }
203
204 // Allocate another meta virtual space and add it to the list.
205 bool VirtualSpaceList::create_new_virtual_space(size_t vs_word_size) {
206 assert_lock_strong(MetaspaceExpand_lock);
207
208 if (is_class()) {
209 assert(false, "We currently don't support more than one VirtualSpace for"
210 " the compressed class space. The initialization of the"
211 " CCS uses another code path and should not hit this path.");
212 return false;
213 }
214
215 if (vs_word_size == 0) {
216 assert(false, "vs_word_size should always be at least _reserve_alignment large.");
217 return false;
218 }
219
220 // Reserve the space
221 size_t vs_byte_size = vs_word_size * BytesPerWord;
222 assert_is_aligned(vs_byte_size, Metaspace::reserve_alignment());
223
224 // Allocate the meta virtual space and initialize it.
225 VirtualSpaceNode* new_entry = new VirtualSpaceNode(is_class(), vs_byte_size);
226 if (!new_entry->initialize()) {
227 delete new_entry;
228 return false;
229 } else {
230 assert(new_entry->reserved_words() == vs_word_size,
231 "Reserved memory size differs from requested memory size");
232 expand_envelope_to_include_node(new_entry);
233 // ensure lock-free iteration sees fully initialized node
234 OrderAccess::storestore();
235 link_vs(new_entry);
236 DEBUG_ONLY(Atomic::inc(&g_internal_statistics.num_vsnodes_created));
237 return true;
238 }
239
240 DEBUG_ONLY(verify(false);)
241
242 }
243
244 void VirtualSpaceList::link_vs(VirtualSpaceNode* new_entry) {
245 if (virtual_space_list() == NULL) {
246 set_virtual_space_list(new_entry);
247 } else {
248 current_virtual_space()->set_next(new_entry);
249 }
250 set_current_virtual_space(new_entry);
251 inc_reserved_words(new_entry->reserved_words());
252 inc_committed_words(new_entry->committed_words());
253 inc_virtual_space_count();
254 #ifdef ASSERT
255 new_entry->mangle();
256 #endif
257 LogTarget(Trace, gc, metaspace) lt;
258 if (lt.is_enabled()) {
259 LogStream ls(lt);
260 VirtualSpaceNode* vsl = current_virtual_space();
261 ResourceMark rm;
262 vsl->print_on(&ls);
263 }
264 }
265
266 bool VirtualSpaceList::expand_node_by(VirtualSpaceNode* node,
267 size_t min_words,
268 size_t preferred_words) {
269 size_t before = node->committed_words();
270
271 bool result = node->expand_by(min_words, preferred_words);
272
273 size_t after = node->committed_words();
274
275 // after and before can be the same if the memory was pre-committed.
276 assert(after >= before, "Inconsistency");
277 inc_committed_words(after - before);
278
279 return result;
280 }
281
282 bool VirtualSpaceList::expand_by(size_t min_words, size_t preferred_words) {
283 assert_is_aligned(min_words, Metaspace::commit_alignment_words());
284 assert_is_aligned(preferred_words, Metaspace::commit_alignment_words());
285 assert(min_words <= preferred_words, "Invalid arguments");
286
287 const char* const class_or_not = (is_class() ? "class" : "non-class");
288
289 if (!MetaspaceGC::can_expand(min_words, this->is_class())) {
290 log_trace(gc, metaspace, freelist)("Cannot expand %s virtual space list.",
291 class_or_not);
292 return false;
293 }
294
295 size_t allowed_expansion_words = MetaspaceGC::allowed_expansion();
296 if (allowed_expansion_words < min_words) {
297 log_trace(gc, metaspace, freelist)("Cannot expand %s virtual space list (must try gc first).",
298 class_or_not);
299 return false;
300 }
301
302 size_t max_expansion_words = MIN2(preferred_words, allowed_expansion_words);
303
304 // Commit more memory from the the current virtual space.
305 bool vs_expanded = expand_node_by(current_virtual_space(),
306 min_words,
307 max_expansion_words);
308 if (vs_expanded) {
309 log_trace(gc, metaspace, freelist)("Expanded %s virtual space list.",
310 class_or_not);
311 return true;
312 }
313 log_trace(gc, metaspace, freelist)("%s virtual space list: retire current node.",
314 class_or_not);
315 retire_current_virtual_space();
316
317 // Get another virtual space.
318 size_t grow_vs_words = MAX2((size_t)VirtualSpaceSize, preferred_words);
319 grow_vs_words = align_up(grow_vs_words, Metaspace::reserve_alignment_words());
320
321 if (create_new_virtual_space(grow_vs_words)) {
322 if (current_virtual_space()->is_pre_committed()) {
323 // The memory was pre-committed, so we are done here.
324 assert(min_words <= current_virtual_space()->committed_words(),
325 "The new VirtualSpace was pre-committed, so it"
326 "should be large enough to fit the alloc request.");
327 return true;
328 }
329
330 return expand_node_by(current_virtual_space(),
331 min_words,
332 max_expansion_words);
333 }
334
335 return false;
336 }
337
338 // Given a chunk, calculate the largest possible padding space which
339 // could be required when allocating it.
340 static size_t largest_possible_padding_size_for_chunk(size_t chunk_word_size, bool is_class) {
341 const ChunkIndex chunk_type = get_chunk_type_by_size(chunk_word_size, is_class);
342 if (chunk_type != HumongousIndex) {
343 // Normal, non-humongous chunks are allocated at chunk size
344 // boundaries, so the largest padding space required would be that
345 // minus the smallest chunk size.
346 const size_t smallest_chunk_size = is_class ? ClassSpecializedChunk : SpecializedChunk;
347 return chunk_word_size - smallest_chunk_size;
348 } else {
349 // Humongous chunks are allocated at smallest-chunksize
350 // boundaries, so there is no padding required.
351 return 0;
352 }
353 }
354
355
356 Metachunk* VirtualSpaceList::get_new_chunk(size_t chunk_word_size, size_t suggested_commit_granularity) {
357
358 // Allocate a chunk out of the current virtual space.
359 Metachunk* next = current_virtual_space()->get_chunk_vs(chunk_word_size);
360
361 if (next != NULL) {
362 return next;
363 }
364
365 // The expand amount is currently only determined by the requested sizes
366 // and not how much committed memory is left in the current virtual space.
367
368 // We must have enough space for the requested size and any
369 // additional reqired padding chunks.
370 const size_t size_for_padding = largest_possible_padding_size_for_chunk(chunk_word_size, this->is_class());
371
372 size_t min_word_size = align_up(chunk_word_size + size_for_padding, Metaspace::commit_alignment_words());
373 size_t preferred_word_size = align_up(suggested_commit_granularity, Metaspace::commit_alignment_words());
374 if (min_word_size >= preferred_word_size) {
375 // Can happen when humongous chunks are allocated.
376 preferred_word_size = min_word_size;
377 }
378
379 bool expanded = expand_by(min_word_size, preferred_word_size);
380 if (expanded) {
381 next = current_virtual_space()->get_chunk_vs(chunk_word_size);
382 assert(next != NULL, "The allocation was expected to succeed after the expansion");
383 }
384
385 return next;
386 }
387
388 void VirtualSpaceList::print_on(outputStream* st, size_t scale) const {
389 st->print_cr(SIZE_FORMAT " nodes, current node: " PTR_FORMAT,
390 _virtual_space_count, p2i(_current_virtual_space));
391 VirtualSpaceListIterator iter(virtual_space_list());
392 while (iter.repeat()) {
393 st->cr();
394 VirtualSpaceNode* node = iter.get_next();
395 node->print_on(st, scale);
396 }
397 }
398
399 void VirtualSpaceList::print_map(outputStream* st) const {
400 VirtualSpaceNode* list = virtual_space_list();
401 VirtualSpaceListIterator iter(list);
402 unsigned i = 0;
403 while (iter.repeat()) {
404 st->print_cr("Node %u:", i);
405 VirtualSpaceNode* node = iter.get_next();
406 node->print_map(st, this->is_class());
407 i ++;
408 }
409 }
410
411 // Given a node, expand range such that it includes the node.
412 void VirtualSpaceList::expand_envelope_to_include_node(const VirtualSpaceNode* node) {
413 _envelope_lo = MIN2(_envelope_lo, (address)node->low_boundary());
414 _envelope_hi = MAX2(_envelope_hi, (address)node->high_boundary());
415 }
416
417
418 #ifdef ASSERT
419 void VirtualSpaceList::verify(bool slow) {
420 VirtualSpaceNode* list = virtual_space_list();
421 VirtualSpaceListIterator iter(list);
422 size_t reserved = 0;
423 size_t committed = 0;
424 size_t node_count = 0;
425 while (iter.repeat()) {
426 VirtualSpaceNode* node = iter.get_next();
427 if (slow) {
428 node->verify(true);
429 }
430 // Check that the node resides fully within our envelope.
431 assert((address)node->low_boundary() >= _envelope_lo && (address)node->high_boundary() <= _envelope_hi,
432 "Node " SIZE_FORMAT " [" PTR_FORMAT ", " PTR_FORMAT ") outside envelope [" PTR_FORMAT ", " PTR_FORMAT ").",
433 node_count, p2i(node->low_boundary()), p2i(node->high_boundary()), p2i(_envelope_lo), p2i(_envelope_hi));
434 reserved += node->reserved_words();
435 committed += node->committed_words();
436 node_count ++;
437 }
438 assert(reserved == reserved_words() && committed == committed_words() && node_count == _virtual_space_count,
439 "Mismatch: reserved real: " SIZE_FORMAT " expected: " SIZE_FORMAT
440 ", committed real: " SIZE_FORMAT " expected: " SIZE_FORMAT
441 ", node count real: " SIZE_FORMAT " expected: " SIZE_FORMAT ".",
442 reserved, reserved_words(), committed, committed_words(),
443 node_count, _virtual_space_count);
444 }
445 #endif // ASSERT
446
447 } // namespace metaspace
|
1 /*
2 * Copyright (c) 2018, 2020, Oracle and/or its affiliates. All rights reserved.
3 * Copyright (c) 2018, 2020 SAP SE. All rights reserved.
4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5 *
6 * This code is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 only, as
8 * published by the Free Software Foundation.
9 *
10 * This code is distributed in the hope that it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
13 * version 2 for more details (a copy is included in the LICENSE file that
14 * accompanied this code).
15 *
16 * You should have received a copy of the GNU General Public License version
17 * 2 along with this work; if not, write to the Free Software Foundation,
18 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
19 *
20 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
21 * or visit www.oracle.com if you need additional information or have any
22 * questions.
23 *
24 */
25
26 #include "precompiled.hpp"
27 #include "logging/log.hpp"
28 #include "memory/metaspace.hpp"
29 #include "memory/metaspace/chunkManager.hpp"
30 #include "memory/metaspace/counter.hpp"
31 #include "memory/metaspace/commitLimiter.hpp"
32 #include "memory/metaspace/counter.hpp"
33 #include "memory/metaspace/freeChunkList.hpp"
34 #include "memory/metaspace/metaspaceContext.hpp"
35 #include "memory/metaspace/virtualSpaceList.hpp"
36 #include "memory/metaspace/virtualSpaceNode.hpp"
37 #include "runtime/mutexLocker.hpp"
38
39
40 namespace metaspace {
41
42 #define LOGFMT "VsList @" PTR_FORMAT " (%s)"
43 #define LOGFMT_ARGS p2i(this), this->_name
44
45 // Create a new, empty, expandable list.
46 VirtualSpaceList::VirtualSpaceList(const char* name, CommitLimiter* commit_limiter)
47 : _name(name),
48 _first_node(NULL),
49 _can_expand(true),
50 _can_purge(true),
51 _commit_limiter(commit_limiter),
52 _reserved_words_counter(),
53 _committed_words_counter()
54 {
55 }
56
57 // Create a new list. The list will contain one node only, which uses the given ReservedSpace.
58 // It will be not expandable beyond that first node.
59 VirtualSpaceList::VirtualSpaceList(const char* name, ReservedSpace rs, CommitLimiter* commit_limiter)
60 : _name(name),
61 _first_node(NULL),
62 _can_expand(false),
63 _can_purge(false),
64 _commit_limiter(commit_limiter),
65 _reserved_words_counter(),
66 _committed_words_counter()
67 {
68 // Create the first node spanning the existing ReservedSpace. This will be the only node created
69 // for this list since we cannot expand.
70 VirtualSpaceNode* vsn = VirtualSpaceNode::create_node(rs, _commit_limiter,
71 &_reserved_words_counter, &_committed_words_counter);
72 assert(vsn != NULL, "node creation failed");
73 _first_node = vsn;
74 _first_node->set_next(NULL);
75 _nodes_counter.increment();
76 }
77
78 VirtualSpaceList::~VirtualSpaceList() {
79 assert_lock_strong(MetaspaceExpand_lock);
80 // Note: normally, there is no reason ever to delete a vslist since they are
81 // global objects, but for gtests it makes sense to allow this.
82 VirtualSpaceNode* vsn = _first_node;
83 VirtualSpaceNode* vsn2 = vsn;
84 while (vsn != NULL) {
85 vsn2 = vsn->next();
86 delete vsn;
87 vsn = vsn2;
88 }
89 }
90
91 // Create a new node and append it to the list. After
92 // this function, _current_node shall point to a new empty node.
93 // List must be expandable for this to work.
94 void VirtualSpaceList::create_new_node() {
95 assert(_can_expand, "List is not expandable");
96 assert_lock_strong(MetaspaceExpand_lock);
97
98 VirtualSpaceNode* vsn = VirtualSpaceNode::create_node(Settings::virtual_space_node_default_word_size(),
99 _commit_limiter,
100 &_reserved_words_counter, &_committed_words_counter);
101 assert(vsn != NULL, "node creation failed");
102 vsn->set_next(_first_node);
103 _first_node = vsn;
104 _nodes_counter.increment();
105 }
106
107 // Allocate a root chunk from this list.
108 // Note: this just returns a chunk whose memory is reserved; no memory is committed yet.
109 // Hence, before using this chunk, it must be committed.
110 // Also, no limits are checked, since no committing takes place.
111 Metachunk* VirtualSpaceList::allocate_root_chunk() {
112 assert_lock_strong(MetaspaceExpand_lock);
113
114 if (_first_node == NULL ||
115 _first_node->free_words() == 0) {
116
117 // Since all allocations from a VirtualSpaceNode happen in
118 // root-chunk-size units, and the node size must be root-chunk-size aligned,
119 // we should never have left-over space.
120 assert(_first_node == NULL ||
121 _first_node->free_words() == 0, "Sanity");
122
123 if (_can_expand) {
124 create_new_node();
125 UL2(debug, "added new node (now: %d).", num_nodes());
126 } else {
127 UL(debug, "list cannot expand.");
128 return NULL; // We cannot expand this list.
129 }
130 }
131
132 Metachunk* c = _first_node->allocate_root_chunk();
133
134 assert(c != NULL, "This should have worked");
135
136 return c;
137
138 }
139
140 // Attempts to purge nodes. This will remove and delete nodes which only contain free chunks.
141 // The free chunks are removed from the freelists before the nodes are deleted.
142 // Return number of purged nodes.
143 int VirtualSpaceList::purge(FreeChunkListVector* freelists) {
144
145 assert_lock_strong(MetaspaceExpand_lock);
146
147 if (_can_purge == false) {
148 return 0;
149 }
150
151 UL(debug, "purging.");
152
153 VirtualSpaceNode* vsn = _first_node;
154 VirtualSpaceNode* prev_vsn = NULL;
155 int num = 0, num_purged = 0;
156 while (vsn != NULL) {
157 VirtualSpaceNode* next_vsn = vsn->next();
158 bool purged = vsn->attempt_purge(freelists);
159 if (purged) {
160 // Note: from now on do not dereference vsn!
161 UL2(debug, "purged node @" PTR_FORMAT ".", p2i(vsn));
162 if (_first_node == vsn) {
163 _first_node = next_vsn;
164 }
165 DEBUG_ONLY(vsn = (VirtualSpaceNode*)((uintptr_t)(0xdeadbeef));)
166 if (prev_vsn != NULL) {
167 prev_vsn->set_next(next_vsn);
168 }
169 num_purged ++;
170 _nodes_counter.decrement();
171 } else {
172 prev_vsn = vsn;
173 }
174 vsn = next_vsn;
175 num ++;
176 }
177
178 UL2(debug, "purged %d nodes (now: %d)", num_purged, num_nodes());
179
180 return num_purged;
181
182 }
183
184 // Print all nodes in this space list.
185 void VirtualSpaceList::print_on(outputStream* st) const {
186 MutexLocker fcl(MetaspaceExpand_lock, Mutex::_no_safepoint_check_flag);
187
188 st->print_cr("vsl %s:", _name);
189 const VirtualSpaceNode* vsn = _first_node;
190 int n = 0;
191 while (vsn != NULL) {
192 st->print("- node #%d: ", n);
193 vsn->print_on(st);
194 vsn = vsn->next();
195 n ++;
196 }
197 st->print_cr("- total %d nodes, " SIZE_FORMAT " reserved words, " SIZE_FORMAT " committed words.",
198 n, reserved_words(), committed_words());
199 }
200
201 #ifdef ASSERT
202 void VirtualSpaceList::verify_locked(bool slow) const {
203
204 assert_lock_strong(MetaspaceExpand_lock);
205
206 assert(_name != NULL, "Sanity");
207
208 int n = 0;
209
210 if (_first_node != NULL) {
211
212 size_t total_reserved_words = 0;
213 size_t total_committed_words = 0;
214 const VirtualSpaceNode* vsn = _first_node;
215 while (vsn != NULL) {
216 n ++;
217 vsn->verify_locked(slow);
218 total_reserved_words += vsn->word_size();
219 total_committed_words += vsn->committed_words();
220 vsn = vsn->next();
221 }
222
223 _nodes_counter.check(n);
224 _reserved_words_counter.check(total_reserved_words);
225 _committed_words_counter.check(total_committed_words);
226
227 } else {
228
229 _reserved_words_counter.check(0);
230 _committed_words_counter.check(0);
231
232 }
233 }
234
235 void VirtualSpaceList::verify(bool slow) const {
236 MutexLocker fcl(MetaspaceExpand_lock, Mutex::_no_safepoint_check_flag);
237 verify_locked(slow);
238 }
239 #endif
240
241 // Returns true if this pointer is contained in one of our nodes.
242 bool VirtualSpaceList::contains(const MetaWord* p) const {
243 const VirtualSpaceNode* vsn = _first_node;
244 while (vsn != NULL) {
245 if (vsn->contains(p)) {
246 return true;
247 }
248 vsn = vsn->next();
249 }
250 return false;
251 }
252
253 // Returns true if the vslist is not expandable and no more root chunks
254 // can be allocated.
255 bool VirtualSpaceList::is_full() const {
256 if (!_can_expand && _first_node != NULL && _first_node->free_words() == 0) {
257 return true;
258 }
259 return false;
260 }
261
262 // Convenience methods to return the global class-space chunkmanager
263 // and non-class chunkmanager, respectively.
264 VirtualSpaceList* VirtualSpaceList::vslist_class() {
265 return MetaspaceContext::context_class() == NULL ? NULL : MetaspaceContext::context_class()->vslist();
266 }
267
268 VirtualSpaceList* VirtualSpaceList::vslist_nonclass() {
269 return MetaspaceContext::context_nonclass() == NULL ? NULL : MetaspaceContext::context_nonclass()->vslist();
270 }
271
272
273
274 } // namespace metaspace
|