5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "precompiled.hpp"
26
27 #include "logging/log.hpp"
28 #include "logging/logStream.hpp"
29 #include "memory/metaspace/metachunk.hpp"
30 #include "memory/metaspace.hpp"
31 #include "memory/metaspace/chunkManager.hpp"
32 #include "memory/metaspace/metaDebug.hpp"
33 #include "memory/metaspace/metaspaceCommon.hpp"
34 #include "memory/metaspace/occupancyMap.hpp"
35 #include "memory/metaspace/virtualSpaceNode.hpp"
36 #include "memory/virtualspace.hpp"
37 #include "runtime/os.hpp"
38 #include "services/memTracker.hpp"
39 #include "utilities/copy.hpp"
40 #include "utilities/debug.hpp"
41 #include "utilities/globalDefinitions.hpp"
42
43 namespace metaspace {
44
45 // Decide if large pages should be committed when the memory is reserved.
46 static bool should_commit_large_pages_when_reserving(size_t bytes) {
47 if (UseLargePages && UseLargePagesInMetaspace && !os::can_commit_large_page_memory()) {
48 size_t words = bytes / BytesPerWord;
49 bool is_class = false; // We never reserve large pages for the class space.
50 if (MetaspaceGC::can_expand(words, is_class) &&
51 MetaspaceGC::allowed_expansion() >= words) {
52 return true;
53 }
54 }
55
56 return false;
57 }
58
59 // byte_size is the size of the associated virtualspace.
60 VirtualSpaceNode::VirtualSpaceNode(bool is_class, size_t bytes) :
61 _next(NULL), _is_class(is_class), _rs(), _top(NULL), _container_count(0), _occupancy_map(NULL) {
62 assert_is_aligned(bytes, Metaspace::reserve_alignment());
63 bool large_pages = should_commit_large_pages_when_reserving(bytes);
64 _rs = ReservedSpace(bytes, Metaspace::reserve_alignment(), large_pages);
65
66 if (_rs.is_reserved()) {
67 assert(_rs.base() != NULL, "Catch if we get a NULL address");
68 assert(_rs.size() != 0, "Catch if we get a 0 size");
69 assert_is_aligned(_rs.base(), Metaspace::reserve_alignment());
70 assert_is_aligned(_rs.size(), Metaspace::reserve_alignment());
71
72 MemTracker::record_virtual_memory_type((address)_rs.base(), mtClass);
73 }
74 }
75
76 void VirtualSpaceNode::purge(ChunkManager* chunk_manager) {
77 // When a node is purged, lets give it a thorough examination.
78 DEBUG_ONLY(verify(true);)
79 Metachunk* chunk = first_chunk();
80 Metachunk* invalid_chunk = (Metachunk*) top();
81 while (chunk < invalid_chunk ) {
82 assert(chunk->is_tagged_free(), "Should be tagged free");
83 MetaWord* next = ((MetaWord*)chunk) + chunk->word_size();
84 chunk_manager->remove_chunk(chunk);
85 chunk->remove_sentinel();
86 assert(chunk->next() == NULL &&
87 chunk->prev() == NULL,
88 "Was not removed from its list");
89 chunk = (Metachunk*) next;
90 }
91 }
92
93 void VirtualSpaceNode::print_map(outputStream* st, bool is_class) const {
94
95 if (bottom() == top()) {
96 return;
97 }
98
99 const size_t spec_chunk_size = is_class ? ClassSpecializedChunk : SpecializedChunk;
100 const size_t small_chunk_size = is_class ? ClassSmallChunk : SmallChunk;
101 const size_t med_chunk_size = is_class ? ClassMediumChunk : MediumChunk;
102
103 int line_len = 100;
104 const size_t section_len = align_up(spec_chunk_size * line_len, med_chunk_size);
105 line_len = (int)(section_len / spec_chunk_size);
106
107 static const int NUM_LINES = 4;
108
109 char* lines[NUM_LINES];
110 for (int i = 0; i < NUM_LINES; i ++) {
111 lines[i] = (char*)os::malloc(line_len, mtInternal);
112 }
113 int pos = 0;
114 const MetaWord* p = bottom();
115 const Metachunk* chunk = (const Metachunk*)p;
116 const MetaWord* chunk_end = p + chunk->word_size();
117 while (p < top()) {
118 if (pos == line_len) {
119 pos = 0;
120 for (int i = 0; i < NUM_LINES; i ++) {
121 st->fill_to(22);
122 st->print_raw(lines[i], line_len);
123 st->cr();
124 }
125 }
126 if (pos == 0) {
127 st->print(PTR_FORMAT ":", p2i(p));
128 }
129 if (p == chunk_end) {
130 chunk = (Metachunk*)p;
131 chunk_end = p + chunk->word_size();
132 }
133 // line 1: chunk starting points (a dot if that area is a chunk start).
134 lines[0][pos] = p == (const MetaWord*)chunk ? '.' : ' ';
135
136 // Line 2: chunk type (x=spec, s=small, m=medium, h=humongous), uppercase if
137 // chunk is in use.
138 const bool chunk_is_free = ((Metachunk*)chunk)->is_tagged_free();
139 if (chunk->word_size() == spec_chunk_size) {
140 lines[1][pos] = chunk_is_free ? 'x' : 'X';
141 } else if (chunk->word_size() == small_chunk_size) {
142 lines[1][pos] = chunk_is_free ? 's' : 'S';
143 } else if (chunk->word_size() == med_chunk_size) {
144 lines[1][pos] = chunk_is_free ? 'm' : 'M';
145 } else if (chunk->word_size() > med_chunk_size) {
146 lines[1][pos] = chunk_is_free ? 'h' : 'H';
147 } else {
148 ShouldNotReachHere();
149 }
150
151 // Line 3: chunk origin
152 const ChunkOrigin origin = chunk->get_origin();
153 lines[2][pos] = origin == origin_normal ? ' ' : '0' + (int) origin;
154
155 // Line 4: Virgin chunk? Virgin chunks are chunks created as a byproduct of padding or splitting,
156 // but were never used.
157 lines[3][pos] = chunk->get_use_count() > 0 ? ' ' : 'v';
158
159 p += spec_chunk_size;
160 pos ++;
161 }
162 if (pos > 0) {
163 for (int i = 0; i < NUM_LINES; i ++) {
164 st->fill_to(22);
165 st->print_raw(lines[i], line_len);
166 st->cr();
167 }
168 }
169 for (int i = 0; i < NUM_LINES; i ++) {
170 os::free(lines[i]);
171 }
172 }
173
174
175 #ifdef ASSERT
176
177 // Verify counters, all chunks in this list node and the occupancy map.
178 void VirtualSpaceNode::verify(bool slow) {
179 log_trace(gc, metaspace, freelist)("verifying %s virtual space node (%s).",
180 (is_class() ? "class space" : "metaspace"), (slow ? "slow" : "quick"));
181 // Fast mode: just verify chunk counters and basic geometry
182 // Slow mode: verify chunks and occupancy map
183 uintx num_in_use_chunks = 0;
184 Metachunk* chunk = first_chunk();
185 Metachunk* invalid_chunk = (Metachunk*) top();
186
187 // Iterate the chunks in this node and verify each chunk.
188 while (chunk < invalid_chunk ) {
189 if (slow) {
190 do_verify_chunk(chunk);
191 }
192 if (!chunk->is_tagged_free()) {
193 num_in_use_chunks ++;
194 }
195 const size_t s = chunk->word_size();
196 // Prevent endless loop on invalid chunk size.
197 assert(is_valid_chunksize(is_class(), s), "Invalid chunk size: " SIZE_FORMAT ".", s);
198 MetaWord* next = ((MetaWord*)chunk) + s;
199 chunk = (Metachunk*) next;
200 }
201 assert(_container_count == num_in_use_chunks, "Container count mismatch (real: " UINTX_FORMAT
202 ", counter: " UINTX_FORMAT ".", num_in_use_chunks, _container_count);
203 // Also verify the occupancy map.
204 if (slow) {
205 occupancy_map()->verify(bottom(), top());
206 }
207 }
208
209 // Verify that all free chunks in this node are ideally merged
210 // (there not should be multiple small chunks where a large chunk could exist.)
211 void VirtualSpaceNode::verify_free_chunks_are_ideally_merged() {
212 Metachunk* chunk = first_chunk();
213 Metachunk* invalid_chunk = (Metachunk*) top();
214 // Shorthands.
215 const size_t size_med = (is_class() ? ClassMediumChunk : MediumChunk) * BytesPerWord;
216 const size_t size_small = (is_class() ? ClassSmallChunk : SmallChunk) * BytesPerWord;
217 int num_free_chunks_since_last_med_boundary = -1;
218 int num_free_chunks_since_last_small_boundary = -1;
219 bool error = false;
220 char err[256];
221 while (!error && chunk < invalid_chunk ) {
222 // Test for missed chunk merge opportunities: count number of free chunks since last chunk boundary.
223 // Reset the counter when encountering a non-free chunk.
224 if (chunk->get_chunk_type() != HumongousIndex) {
225 if (chunk->is_tagged_free()) {
226 // Count successive free, non-humongous chunks.
227 if (is_aligned(chunk, size_small)) {
228 if (num_free_chunks_since_last_small_boundary > 0) {
229 error = true;
230 jio_snprintf(err, sizeof(err), "Missed chunk merge opportunity to merge a small chunk preceding " PTR_FORMAT ".", p2i(chunk));
231 } else {
232 num_free_chunks_since_last_small_boundary = 0;
233 }
234 } else if (num_free_chunks_since_last_small_boundary != -1) {
235 num_free_chunks_since_last_small_boundary ++;
236 }
237 if (is_aligned(chunk, size_med)) {
238 if (num_free_chunks_since_last_med_boundary > 0) {
239 error = true;
240 jio_snprintf(err, sizeof(err), "Missed chunk merge opportunity to merge a medium chunk preceding " PTR_FORMAT ".", p2i(chunk));
241 } else {
242 num_free_chunks_since_last_med_boundary = 0;
243 }
244 } else if (num_free_chunks_since_last_med_boundary != -1) {
245 num_free_chunks_since_last_med_boundary ++;
246 }
247 } else {
248 // Encountering a non-free chunk, reset counters.
249 num_free_chunks_since_last_med_boundary = -1;
250 num_free_chunks_since_last_small_boundary = -1;
251 }
252 } else {
253 // One cannot merge areas with a humongous chunk in the middle. Reset counters.
254 num_free_chunks_since_last_med_boundary = -1;
255 num_free_chunks_since_last_small_boundary = -1;
256 }
257
258 if (error) {
259 print_map(tty, is_class());
260 fatal("%s", err);
261 }
262
263 MetaWord* next = ((MetaWord*)chunk) + chunk->word_size();
264 chunk = (Metachunk*) next;
265 }
266 }
267 #endif // ASSERT
268
269 void VirtualSpaceNode::inc_container_count() {
270 assert_lock_strong(MetaspaceExpand_lock);
271 _container_count++;
272 }
273
274 void VirtualSpaceNode::dec_container_count() {
275 assert_lock_strong(MetaspaceExpand_lock);
276 _container_count--;
277 }
278
279 VirtualSpaceNode::~VirtualSpaceNode() {
280 _rs.release();
281 if (_occupancy_map != NULL) {
282 delete _occupancy_map;
283 }
284 #ifdef ASSERT
285 size_t word_size = sizeof(*this) / BytesPerWord;
286 Copy::fill_to_words((HeapWord*) this, word_size, 0xf1f1f1f1);
287 #endif
288 }
289
290 size_t VirtualSpaceNode::used_words_in_vs() const {
291 return pointer_delta(top(), bottom(), sizeof(MetaWord));
292 }
293
294 // Space committed in the VirtualSpace
295 size_t VirtualSpaceNode::capacity_words_in_vs() const {
296 return pointer_delta(end(), bottom(), sizeof(MetaWord));
297 }
298
299 size_t VirtualSpaceNode::free_words_in_vs() const {
300 return pointer_delta(end(), top(), sizeof(MetaWord));
301 }
302
303 // Given an address larger than top(), allocate padding chunks until top is at the given address.
304 void VirtualSpaceNode::allocate_padding_chunks_until_top_is_at(MetaWord* target_top) {
305
306 assert(target_top > top(), "Sanity");
307
308 // Padding chunks are added to the freelist.
309 ChunkManager* const chunk_manager = Metaspace::get_chunk_manager(is_class());
310
311 // shorthands
312 const size_t spec_word_size = chunk_manager->specialized_chunk_word_size();
313 const size_t small_word_size = chunk_manager->small_chunk_word_size();
314 const size_t med_word_size = chunk_manager->medium_chunk_word_size();
315
316 while (top() < target_top) {
317
318 // We could make this coding more generic, but right now we only deal with two possible chunk sizes
319 // for padding chunks, so it is not worth it.
320 size_t padding_chunk_word_size = small_word_size;
321 if (is_aligned(top(), small_word_size * sizeof(MetaWord)) == false) {
322 assert_is_aligned(top(), spec_word_size * sizeof(MetaWord)); // Should always hold true.
323 padding_chunk_word_size = spec_word_size;
324 }
325 MetaWord* here = top();
326 assert_is_aligned(here, padding_chunk_word_size * sizeof(MetaWord));
327 inc_top(padding_chunk_word_size);
328
329 // Create new padding chunk.
330 ChunkIndex padding_chunk_type = get_chunk_type_by_size(padding_chunk_word_size, is_class());
331 assert(padding_chunk_type == SpecializedIndex || padding_chunk_type == SmallIndex, "sanity");
332
333 Metachunk* const padding_chunk =
334 ::new (here) Metachunk(padding_chunk_type, is_class(), padding_chunk_word_size, this);
335 assert(padding_chunk == (Metachunk*)here, "Sanity");
336 DEBUG_ONLY(padding_chunk->set_origin(origin_pad);)
337 log_trace(gc, metaspace, freelist)("Created padding chunk in %s at "
338 PTR_FORMAT ", size " SIZE_FORMAT_HEX ".",
339 (is_class() ? "class space " : "metaspace"),
340 p2i(padding_chunk), padding_chunk->word_size() * sizeof(MetaWord));
341
342 // Mark chunk start in occupancy map.
343 occupancy_map()->set_chunk_starts_at_address((MetaWord*)padding_chunk, true);
344
345 // Chunks are born as in-use (see MetaChunk ctor). So, before returning
346 // the padding chunk to its chunk manager, mark it as in use (ChunkManager
347 // will assert that).
348 do_update_in_use_info_for_chunk(padding_chunk, true);
349
350 // Return Chunk to freelist.
351 inc_container_count();
352 chunk_manager->return_single_chunk(padding_chunk);
353 // Please note: at this point, ChunkManager::return_single_chunk()
354 // may already have merged the padding chunk with neighboring chunks, so
355 // it may have vanished at this point. Do not reference the padding
356 // chunk beyond this point.
357 }
358
359 assert(top() == target_top, "Sanity");
360
361 } // allocate_padding_chunks_until_top_is_at()
362
363 // Allocates the chunk from the virtual space only.
364 // This interface is also used internally for debugging. Not all
365 // chunks removed here are necessarily used for allocation.
366 Metachunk* VirtualSpaceNode::take_from_committed(size_t chunk_word_size) {
367 // Non-humongous chunks are to be allocated aligned to their chunk
368 // size. So, start addresses of medium chunks are aligned to medium
369 // chunk size, those of small chunks to small chunk size and so
370 // forth. This facilitates merging of free chunks and reduces
371 // fragmentation. Chunk sizes are spec < small < medium, with each
372 // larger chunk size being a multiple of the next smaller chunk
373 // size.
374 // Because of this alignment, me may need to create a number of padding
375 // chunks. These chunks are created and added to the freelist.
376
377 // The chunk manager to which we will give our padding chunks.
378 ChunkManager* const chunk_manager = Metaspace::get_chunk_manager(is_class());
379
380 // shorthands
381 const size_t spec_word_size = chunk_manager->specialized_chunk_word_size();
382 const size_t small_word_size = chunk_manager->small_chunk_word_size();
383 const size_t med_word_size = chunk_manager->medium_chunk_word_size();
384
385 assert(chunk_word_size == spec_word_size || chunk_word_size == small_word_size ||
386 chunk_word_size >= med_word_size, "Invalid chunk size requested.");
387
388 // Chunk alignment (in bytes) == chunk size unless humongous.
389 // Humongous chunks are aligned to the smallest chunk size (spec).
390 const size_t required_chunk_alignment = (chunk_word_size > med_word_size ?
391 spec_word_size : chunk_word_size) * sizeof(MetaWord);
392
393 // Do we have enough space to create the requested chunk plus
394 // any padding chunks needed?
395 MetaWord* const next_aligned =
396 static_cast<MetaWord*>(align_up(top(), required_chunk_alignment));
397 if (!is_available((next_aligned - top()) + chunk_word_size)) {
398 return NULL;
399 }
400
401 // Before allocating the requested chunk, allocate padding chunks if necessary.
402 // We only need to do this for small or medium chunks: specialized chunks are the
403 // smallest size, hence always aligned. Homungous chunks are allocated unaligned
404 // (implicitly, also aligned to smallest chunk size).
405 if ((chunk_word_size == med_word_size || chunk_word_size == small_word_size) && next_aligned > top()) {
406 log_trace(gc, metaspace, freelist)("Creating padding chunks in %s between %p and %p...",
407 (is_class() ? "class space " : "metaspace"),
408 top(), next_aligned);
409 allocate_padding_chunks_until_top_is_at(next_aligned);
410 // Now, top should be aligned correctly.
411 assert_is_aligned(top(), required_chunk_alignment);
412 }
413
414 // Now, top should be aligned correctly.
415 assert_is_aligned(top(), required_chunk_alignment);
416
417 // Bottom of the new chunk
418 MetaWord* chunk_limit = top();
419 assert(chunk_limit != NULL, "Not safe to call this method");
420
421 // The virtual spaces are always expanded by the
422 // commit granularity to enforce the following condition.
423 // Without this the is_available check will not work correctly.
424 assert(_virtual_space.committed_size() == _virtual_space.actual_committed_size(),
425 "The committed memory doesn't match the expanded memory.");
426
427 if (!is_available(chunk_word_size)) {
428 LogTarget(Trace, gc, metaspace, freelist) lt;
429 if (lt.is_enabled()) {
430 LogStream ls(lt);
431 ls.print("VirtualSpaceNode::take_from_committed() not available " SIZE_FORMAT " words ", chunk_word_size);
432 // Dump some information about the virtual space that is nearly full
433 print_on(&ls);
434 }
435 return NULL;
436 }
437
438 // Take the space (bump top on the current virtual space).
439 inc_top(chunk_word_size);
440
441 // Initialize the chunk
442 ChunkIndex chunk_type = get_chunk_type_by_size(chunk_word_size, is_class());
443 Metachunk* result = ::new (chunk_limit) Metachunk(chunk_type, is_class(), chunk_word_size, this);
444 assert(result == (Metachunk*)chunk_limit, "Sanity");
445 occupancy_map()->set_chunk_starts_at_address((MetaWord*)result, true);
446 do_update_in_use_info_for_chunk(result, true);
447
448 inc_container_count();
449
450 #ifdef ASSERT
451 EVERY_NTH(VerifyMetaspaceInterval)
452 chunk_manager->locked_verify(true);
453 verify(true);
454 END_EVERY_NTH
455 do_verify_chunk(result);
456 #endif
457
458 result->inc_use_count();
459
460 return result;
461 }
462
463
464 // Expand the virtual space (commit more of the reserved space)
465 bool VirtualSpaceNode::expand_by(size_t min_words, size_t preferred_words) {
466 size_t min_bytes = min_words * BytesPerWord;
467 size_t preferred_bytes = preferred_words * BytesPerWord;
468
469 size_t uncommitted = virtual_space()->reserved_size() - virtual_space()->actual_committed_size();
470
471 if (uncommitted < min_bytes) {
472 return false;
473 }
474
475 size_t commit = MIN2(preferred_bytes, uncommitted);
476 bool result = virtual_space()->expand_by(commit, false);
477
478 if (result) {
479 log_trace(gc, metaspace, freelist)("Expanded %s virtual space list node by " SIZE_FORMAT " words.",
480 (is_class() ? "class" : "non-class"), commit);
481 DEBUG_ONLY(Atomic::inc(&g_internal_statistics.num_committed_space_expanded));
482 } else {
483 log_trace(gc, metaspace, freelist)("Failed to expand %s virtual space list node by " SIZE_FORMAT " words.",
484 (is_class() ? "class" : "non-class"), commit);
485 }
486
487 assert(result, "Failed to commit memory");
488
489 return result;
490 }
491
492 Metachunk* VirtualSpaceNode::get_chunk_vs(size_t chunk_word_size) {
493 assert_lock_strong(MetaspaceExpand_lock);
494 Metachunk* result = take_from_committed(chunk_word_size);
495 return result;
496 }
497
498 bool VirtualSpaceNode::initialize() {
499
500 if (!_rs.is_reserved()) {
501 return false;
502 }
503
504 // These are necessary restriction to make sure that the virtual space always
505 // grows in steps of Metaspace::commit_alignment(). If both base and size are
506 // aligned only the middle alignment of the VirtualSpace is used.
507 assert_is_aligned(_rs.base(), Metaspace::commit_alignment());
508 assert_is_aligned(_rs.size(), Metaspace::commit_alignment());
509
510 // ReservedSpaces marked as special will have the entire memory
511 // pre-committed. Setting a committed size will make sure that
512 // committed_size and actual_committed_size agrees.
513 size_t pre_committed_size = _rs.special() ? _rs.size() : 0;
514
515 bool result = virtual_space()->initialize_with_granularity(_rs, pre_committed_size,
516 Metaspace::commit_alignment());
517 if (result) {
518 assert(virtual_space()->committed_size() == virtual_space()->actual_committed_size(),
519 "Checking that the pre-committed memory was registered by the VirtualSpace");
520
521 set_top((MetaWord*)virtual_space()->low());
522 }
523
524 // Initialize Occupancy Map.
525 const size_t smallest_chunk_size = is_class() ? ClassSpecializedChunk : SpecializedChunk;
526 _occupancy_map = new OccupancyMap(bottom(), reserved_words(), smallest_chunk_size);
527
528 return result;
529 }
530
531 void VirtualSpaceNode::print_on(outputStream* st, size_t scale) const {
532 size_t used_words = used_words_in_vs();
533 size_t commit_words = committed_words();
534 size_t res_words = reserved_words();
535 VirtualSpace* vs = virtual_space();
536
537 st->print("node @" PTR_FORMAT ": ", p2i(this));
538 st->print("reserved=");
539 print_scaled_words(st, res_words, scale);
540 st->print(", committed=");
541 print_scaled_words_and_percentage(st, commit_words, res_words, scale);
542 st->print(", used=");
543 print_scaled_words_and_percentage(st, used_words, res_words, scale);
544 st->cr();
545 st->print(" [" PTR_FORMAT ", " PTR_FORMAT ", "
546 PTR_FORMAT ", " PTR_FORMAT ")",
547 p2i(bottom()), p2i(top()), p2i(end()),
548 p2i(vs->high_boundary()));
549 }
550
551 #ifdef ASSERT
552 void VirtualSpaceNode::mangle() {
553 size_t word_size = capacity_words_in_vs();
554 Copy::fill_to_words((HeapWord*) low(), word_size, 0xf1f1f1f1);
555 }
556 #endif // ASSERT
557
558 void VirtualSpaceNode::retire(ChunkManager* chunk_manager) {
559 assert(is_class() == chunk_manager->is_class(), "Wrong ChunkManager?");
560 #ifdef ASSERT
561 verify(false);
562 EVERY_NTH(VerifyMetaspaceInterval)
563 verify(true);
564 END_EVERY_NTH
565 #endif
566 for (int i = (int)MediumIndex; i >= (int)ZeroIndex; --i) {
567 ChunkIndex index = (ChunkIndex)i;
568 size_t chunk_size = chunk_manager->size_by_index(index);
569
570 while (free_words_in_vs() >= chunk_size) {
571 Metachunk* chunk = get_chunk_vs(chunk_size);
572 // Chunk will be allocated aligned, so allocation may require
573 // additional padding chunks. That may cause above allocation to
574 // fail. Just ignore the failed allocation and continue with the
575 // next smaller chunk size. As the VirtualSpaceNode comitted
576 // size should be a multiple of the smallest chunk size, we
577 // should always be able to fill the VirtualSpace completely.
578 if (chunk == NULL) {
579 break;
580 }
581 chunk_manager->return_single_chunk(chunk);
582 }
583 }
584 assert(free_words_in_vs() == 0, "should be empty now");
585 }
586
587 } // namespace metaspace
588
|
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25
26 #include "precompiled.hpp"
27
28 #include "logging/log.hpp"
29
30 #include "memory/metaspace/chunkLevel.hpp"
31 #include "memory/metaspace/chunkTree.hpp"
32 #include "memory/metaspace/commitLimiter.hpp"
33 #include "memory/metaspace/constants.hpp"
34 #include "memory/metaspace/counter.hpp"
35 #include "memory/metaspace/metachunk.hpp"
36 #include "memory/metaspace/metaspaceCommon.hpp"
37 #include "memory/metaspace/runningCounters.hpp"
38 #include "memory/metaspace/virtualSpaceNode.hpp"
39
40 #include "runtime/mutexLocker.hpp"
41 #include "runtime/os.hpp"
42
43 #include "utilities/align.hpp"
44 #include "utilities/debug.hpp"
45 #include "utilities/globalDefinitions.hpp"
46
47 namespace metaspace {
48
49 #ifdef ASSERT
50 template <class T>
51 void check_is_aligned_to_commit_granule(T x) {
52 assert(is_aligned(x, constants::commit_granule_bytes), "Unaligned pointer");
53 }
54 #endif
55
56 // Given an address range, ensure it is committed.
57 //
58 // The range has to be aligned to granule size.
59 //
60 // Function will:
61 // - check how many granules in that region are uncommitted; If all are committed, it
62 // returns true immediately.
63 // - check if committing those uncommitted granules would bring us over the commit limit
64 // (GC threshold, MaxMetaspaceSize). If true, it returns false.
65 // - commit the memory.
66 // - mark the range as committed in the commit mask
67 //
68 // Returns true if success, false if it did hit a commit limit.
69 bool VirtualSpaceNode::commit_range(MetaWord* p, size_t word_size) {
70
71 DEBUG_ONLY(check_is_aligned_to_commit_granule(p);)
72 DEBUG_ONLY(check_is_aligned_to_commit_granule(word_size);)
73 assert_lock_strong(MetaspaceExpand_lock);
74
75 // First calculate how large the committed regions in this range are
76 const size_t committed_words_in_range = _commit_mask.get_committed_size_in_range(p, word_size);
77 DEBUG_ONLY(check_is_aligned_to_commit_granule(committed_words_in_range);)
78
79 // By how much words we would increase commit charge
80 // were we to commit the given address range completely.
81 const size_t commit_increase_words = word_size - committed_words_in_range;
82
83 if (commit_increase_words == 0) {
84 return true; // Already fully committed, nothing to do.
85 }
86
87 // Before committing any more memory, check limits.
88 if (_commit_limiter->possible_expansion_words() < commit_increase_words) {
89 return false;
90 }
91
92 // Commit...
93 if (os::commit_memory((char*)p, word_size * BytesPerWord, false) == false) {
94 vm_exit_out_of_memory(word_size * BytesPerWord, OOM_MMAP_ERROR, "Failed to commit metaspace.");
95 }
96
97 log_debug(gc, metaspace)("Increased metaspace by " SIZE_FORMAT " bytes.",
98 commit_increase_words * BytesPerWord);
99
100 // ... tell commit limiter...
101 _commit_limiter->increase_committed(commit_increase_words);
102
103 // ... update counters in containing vslist ...
104 _total_committed_words_counter->increment_by(commit_increase_words);
105
106 // ... and update the commit mask.
107 _commit_mask.mark_range_as_committed(p, word_size);
108
109 #ifdef ASSERT
110 // The commit boundary maintained in the CommitLimiter should be equal the sum of committed words
111 // in both class and non-class vslist (outside gtests).
112 if (_commit_limiter == CommitLimiter::globalLimiter()) {
113 assert(_commit_limiter->committed_words() == RunningCounters::committed_words(), "counter mismatch");
114 }
115 #endif
116
117 return true;
118
119 }
120
121 // Given an address range, ensure it is committed.
122 //
123 // The range does not have to be aligned to granule size. However, the function will always commit
124 // whole granules.
125 //
126 // Function will:
127 // - check how many granules in that region are uncommitted; If all are committed, it
128 // returns true immediately.
129 // - check if committing those uncommitted granules would bring us over the commit limit
130 // (GC threshold, MaxMetaspaceSize). If true, it returns false.
131 // - commit the memory.
132 // - mark the range as committed in the commit mask
133 //
134 // Returns true if success, false if it did hit a commit limit.
135 bool VirtualSpaceNode::ensure_range_is_committed(MetaWord* p, size_t word_size) {
136
137 assert_lock_strong(MetaspaceExpand_lock);
138 assert(p != NULL && word_size > 0, "Sanity");
139
140 MetaWord* p_start = align_down(p, constants::commit_granule_bytes);
141 MetaWord* p_end = align_up(p + word_size, constants::commit_granule_bytes);
142
143 // Todo: simple for now. Make it more intelligent late
144 return commit_range(p_start, p_end - p_start);
145
146 }
147
148 // Given an address range (which has to be aligned to commit granule size):
149 // - uncommit it
150 // - mark it as uncommitted in the commit mask
151 bool VirtualSpaceNode::uncommit_range(MetaWord* p, size_t word_size) {
152
153 DEBUG_ONLY(check_is_aligned_to_commit_granule(p);)
154 DEBUG_ONLY(check_is_aligned_to_commit_granule(word_size);)
155 assert_lock_strong(MetaspaceExpand_lock);
156
157 // First calculate how large the committed regions in this range are
158 const size_t committed_words_in_range = _commit_mask.get_committed_size_in_range(p, word_size);
159 DEBUG_ONLY(check_is_aligned_to_commit_granule(committed_words_in_range);)
160
161 if (committed_words_in_range == 0) {
162 return true; // Already fully uncommitted, nothing to do.
163 }
164
165 // Uncommit...
166 if (os::uncommit_memory((char*)p, word_size * BytesPerWord) == false) {
167 // Note: this can actually happen, since uncommit may increase the number of mappings.
168 fatal("Failed to uncommit metaspace.");
169 }
170
171 log_debug(gc, metaspace)("Decreased metaspace by " SIZE_FORMAT " bytes.",
172 committed_words_in_range * BytesPerWord);
173
174 // ... tell commit limiter...
175 _commit_limiter->decrease_committed(committed_words_in_range);
176
177 // ... and global counters...
178 _total_committed_words_counter->decrement_by(committed_words_in_range);
179
180 // ... and update the commit mask.
181 _commit_mask.mark_range_as_uncommitted(p, word_size);
182
183 #ifdef ASSERT
184 // The commit boundary maintained in the CommitLimiter should be equal the sum of committed words
185 // in both class and non-class vslist (outside gtests).
186 if (_commit_limiter == CommitLimiter::globalLimiter()) { // We are outside a test scenario
187 assert(_commit_limiter->committed_words() == RunningCounters::committed_words(), "counter mismatch");
188 }
189 #endif
190
191 return true;
192
193 }
194
195 //// creation, destruction ////
196
197 VirtualSpaceNode::VirtualSpaceNode(ReservedSpace rs,
198 CommitLimiter* limiter,
199 SizeCounter* reserve_counter,
200 SizeCounter* commit_counter)
201 : _next(NULL),
202 _base(rs.base()),
203 _word_size(rs.size() / BytesPerWord),
204 _used_words(0),
205 _commit_mask(rs.base(), rs.size() / BytesPerWord),
206 _chunk_tree_array(rs.base(), rs.size() / BytesPerWord),
207 _commit_limiter(limiter),
208 _total_reserved_words_counter(reserve_counter),
209 _total_committed_words_counter(commit_counter)
210 {
211 // Update reserved counter in vslist
212 _total_reserved_words_counter->increment_by(_word_size);
213 }
214
215 // Create a node of a given size
216 VirtualSpaceNode* VirtualSpaceNode::create_node(size_t word_size,
217 CommitLimiter* limiter,
218 SizeCounter* reserve_counter,
219 SizeCounter* commit_counter)
220 {
221
222 DEBUG_ONLY(check_is_aligned_to_commit_granule(word_size);)
223
224 ReservedSpace rs(word_size * BytesPerWord,
225 constants::commit_granule_bytes,
226 false, // TODO deal with large pages
227 false);
228
229 if (!rs.is_reserved()) {
230 vm_exit_out_of_memory(word_size * BytesPerWord, OOM_MMAP_ERROR, "Failed to reserve memory for metaspace");
231 }
232
233 reserve_counter->increment_by(word_size * BytesPerWord);
234
235 return create_node(rs, limiter, reserve_counter, commit_counter);
236
237 }
238
239 // Create a node over an existing space
240 VirtualSpaceNode* VirtualSpaceNode::create_node(ReservedSpace rs,
241 CommitLimiter* limiter,
242 SizeCounter* reserve_counter,
243 SizeCounter* commit_counter)
244 {
245 reserve_counter->increment_by(rs.size() * BytesPerWord);
246 return new VirtualSpaceNode(rs, limiter, reserve_counter, commit_counter);
247 }
248
249 VirtualSpaceNode::~VirtualSpaceNode() {
250 _rs.release();
251
252
253 // Update counters in vslist
254 _total_committed_words_counter->decrement_by(committed_words());
255 _total_reserved_words_counter->decrement_by(_word_size);
256
257 }
258
259
260
261 //// Chunk allocation, splitting, merging /////
262
263 // Allocate a root chunk from this node. Will fail and return NULL
264 // if the node is full.
265 // Note: this just returns a chunk whose memory is reserved; no memory is committed yet.
266 // Hence, before using this chunk, it must be committed.
267 // Also, no limits are checked, since no committing takes place.
268 Metachunk* VirtualSpaceNode::allocate_root_chunk() {
269
270 assert_lock_strong(MetaspaceExpand_lock);
271
272 assert_is_aligned(free_words, chklvl::MAX_CHUNK_WORD_SIZE);
273
274 if (free_words() >= chklvl::MAX_CHUNK_WORD_SIZE) {
275
276 MetaWord* loc = _base + _used_words;
277 _used_words += chklvl::MAX_CHUNK_WORD_SIZE;
278
279 // Create a new chunk tree for that new root node.
280 ChunkTree* tree = _chunk_tree_array.get_tree_by_address(loc);
281
282 // Create a root chunk header and initialize it;
283 Metachunk* c = tree->alloc_root_chunk_header();
284
285 // Wire it to the memory.
286 c->set_base(loc);
287
288 DEBUG_ONLY(c->verify(true);)
289 return c;
290
291 }
292
293 return NULL; // Node is full.
294
295 }
296
297 Metachunk* VirtualSpaceNode::split(chklvl_t target_level, Metachunk* c, Metachunk* splinters[chklvl::NUM_CHUNK_LEVELS]) {
298
299 assert_lock_strong(MetaspaceExpand_lock);
300
301 // Get the tree associated with this chunk and let it handle the splitting
302 ChunkTree* tree = _chunk_tree_array.get_tree_by_address(c->base());
303 return tree->split(target_level, c, splinters);
304
305 }
306
307 Metachunk* VirtualSpaceNode::merge(Metachunk* c, int num_merged[chklvl::NUM_CHUNK_LEVELS]) {
308
309 assert_lock_strong(MetaspaceExpand_lock);
310
311 // Get the tree associated with this chunk and let it handle the merging
312 ChunkTree* tree = _chunk_tree_array.get_tree_by_address(c->base());
313 return tree->merge(c, num_merged);
314
315 }
316
317 #ifdef ASSERT
318 // Verify counters and basic structure. Slow mode: verify all chunks in depth
319 void VirtualSpaceNode::verify(bool slow) const {
320
321 assert_lock_strong(MetaspaceExpand_lock);
322
323 assert(base() != NULL, "Invalid base");
324 assert_is_aligned(base(), chklvl::MAX_CHUNK_BYTE_SIZE);
325 assert(used_words() < word_size(), "Sanity");
326
327 // Since we only ever hand out root chunks from a vsnode, top should always be aligned
328 // to root chunk size.
329 assert_is_aligned(used_words(), chklvl::MAX_CHUNK_WORD_SIZE);
330
331 _commit_mask.verify(slow);
332 _chunk_tree_array.verify(slow);
333
334 }
335
336 // Returns sum of committed space, in words.
337 size_t VirtualSpaceNode::committed_words() const {
338 return _commit_mask.get_committed_size();
339 }
340 #endif
341
342
343 } // namespace metaspace
344
|