1 /* 2 * Copyright (c) 1997, 2019, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "memory/heap.hpp" 27 #include "oops/oop.inline.hpp" 28 #include "runtime/os.hpp" 29 #include "services/memTracker.hpp" 30 #include "utilities/align.hpp" 31 32 size_t CodeHeap::header_size() { 33 return sizeof(HeapBlock); 34 } 35 36 37 // Implementation of Heap 38 39 CodeHeap::CodeHeap(const char* name, const int code_blob_type) 40 : _code_blob_type(code_blob_type) { 41 _name = name; 42 _number_of_committed_segments = 0; 43 _number_of_reserved_segments = 0; 44 _segment_size = 0; 45 _log2_segment_size = 0; 46 _next_segment = 0; 47 _freelist = NULL; 48 _freelist_segments = 0; 49 _freelist_length = 0; 50 _max_allocated_capacity = 0; 51 _blob_count = 0; 52 _nmethod_count = 0; 53 _adapter_count = 0; 54 _full_count = 0; 55 } 56 57 58 // The segmap is marked free for that part of the heap 59 // which has not been allocated yet (beyond _next_segment). 60 // "Allocated" space in this context means there exists a 61 // HeapBlock or a FreeBlock describing this space. 62 void CodeHeap::mark_segmap_as_free(size_t beg, size_t end) { 63 assert( beg < _number_of_committed_segments, "interval begin out of bounds"); 64 assert(beg < end && end <= _number_of_committed_segments, "interval end out of bounds"); 65 // setup _segmap pointers for faster indexing 66 address p = (address)_segmap.low() + beg; 67 address q = (address)_segmap.low() + end; 68 // initialize interval 69 memset(p, free_sentinel, q-p); 70 } 71 72 // Don't get confused here. 73 // All existing blocks, no matter if they are used() or free(), 74 // have their segmap marked as used. This allows to find the 75 // block header (HeapBlock or FreeBlock) for any pointer 76 // within the allocated range (upper limit: _next_segment). 77 void CodeHeap::mark_segmap_as_used(size_t beg, size_t end) { 78 assert( beg < _number_of_committed_segments, "interval begin out of bounds"); 79 assert(beg < end && end <= _number_of_committed_segments, "interval end out of bounds"); 80 // setup _segmap pointers for faster indexing 81 address p = (address)_segmap.low() + beg; 82 address q = (address)_segmap.low() + end; 83 // initialize interval 84 int i = 0; 85 while (p < q) { 86 *p++ = i++; 87 if (i == free_sentinel) i = 1; 88 } 89 } 90 91 92 static size_t align_to_page_size(size_t size) { 93 const size_t alignment = (size_t)os::vm_page_size(); 94 assert(is_power_of_2(alignment), "no kidding ???"); 95 return (size + alignment - 1) & ~(alignment - 1); 96 } 97 98 99 void CodeHeap::on_code_mapping(char* base, size_t size) { 100 #ifdef LINUX 101 extern void linux_wrap_code(char* base, size_t size); 102 linux_wrap_code(base, size); 103 #endif 104 } 105 106 107 bool CodeHeap::reserve(ReservedSpace rs, size_t committed_size, size_t segment_size) { 108 assert(rs.size() >= committed_size, "reserved < committed"); 109 assert(segment_size >= sizeof(FreeBlock), "segment size is too small"); 110 assert(is_power_of_2(segment_size), "segment_size must be a power of 2"); 111 112 _segment_size = segment_size; 113 _log2_segment_size = exact_log2(segment_size); 114 115 // Reserve and initialize space for _memory. 116 size_t page_size = os::vm_page_size(); 117 if (os::can_execute_large_page_memory()) { 118 const size_t min_pages = 8; 119 page_size = MIN2(os::page_size_for_region_aligned(committed_size, min_pages), 120 os::page_size_for_region_aligned(rs.size(), min_pages)); 121 } 122 123 const size_t granularity = os::vm_allocation_granularity(); 124 const size_t c_size = align_up(committed_size, page_size); 125 126 os::trace_page_sizes(_name, committed_size, rs.size(), page_size, 127 rs.base(), rs.size()); 128 if (!_memory.initialize(rs, c_size)) { 129 return false; 130 } 131 132 on_code_mapping(_memory.low(), _memory.committed_size()); 133 _number_of_committed_segments = size_to_segments(_memory.committed_size()); 134 _number_of_reserved_segments = size_to_segments(_memory.reserved_size()); 135 assert(_number_of_reserved_segments >= _number_of_committed_segments, "just checking"); 136 const size_t reserved_segments_alignment = MAX2((size_t)os::vm_page_size(), granularity); 137 const size_t reserved_segments_size = align_up(_number_of_reserved_segments, reserved_segments_alignment); 138 const size_t committed_segments_size = align_to_page_size(_number_of_committed_segments); 139 140 // reserve space for _segmap 141 if (!_segmap.initialize(reserved_segments_size, committed_segments_size)) { 142 return false; 143 } 144 145 MemTracker::record_virtual_memory_type((address)_segmap.low_boundary(), mtCode); 146 147 assert(_segmap.committed_size() >= (size_t) _number_of_committed_segments, "could not commit enough space for segment map"); 148 assert(_segmap.reserved_size() >= (size_t) _number_of_reserved_segments , "could not reserve enough space for segment map"); 149 assert(_segmap.reserved_size() >= _segmap.committed_size() , "just checking"); 150 151 // initialize remaining instance variables 152 clear(); 153 return true; 154 } 155 156 157 bool CodeHeap::expand_by(size_t size) { 158 // expand _memory space 159 size_t dm = align_to_page_size(_memory.committed_size() + size) - _memory.committed_size(); 160 if (dm > 0) { 161 // Use at least the available uncommitted space if 'size' is larger 162 if (_memory.uncommitted_size() != 0 && dm > _memory.uncommitted_size()) { 163 dm = _memory.uncommitted_size(); 164 } 165 char* base = _memory.low() + _memory.committed_size(); 166 if (!_memory.expand_by(dm)) return false; 167 on_code_mapping(base, dm); 168 size_t i = _number_of_committed_segments; 169 _number_of_committed_segments = size_to_segments(_memory.committed_size()); 170 assert(_number_of_reserved_segments == size_to_segments(_memory.reserved_size()), "number of reserved segments should not change"); 171 assert(_number_of_reserved_segments >= _number_of_committed_segments, "just checking"); 172 // expand _segmap space 173 size_t ds = align_to_page_size(_number_of_committed_segments) - _segmap.committed_size(); 174 if ((ds > 0) && !_segmap.expand_by(ds)) { 175 return false; 176 } 177 assert(_segmap.committed_size() >= (size_t) _number_of_committed_segments, "just checking"); 178 // initialize additional segmap entries 179 mark_segmap_as_free(i, _number_of_committed_segments); 180 } 181 return true; 182 } 183 184 void CodeHeap::clear() { 185 _next_segment = 0; 186 mark_segmap_as_free(0, _number_of_committed_segments); 187 } 188 189 190 void* CodeHeap::allocate(size_t instance_size) { 191 size_t number_of_segments = size_to_segments(instance_size + header_size()); 192 assert(segments_to_size(number_of_segments) >= sizeof(FreeBlock), "not enough room for FreeList"); 193 194 // First check if we can satisfy request from freelist 195 NOT_PRODUCT(verify()); 196 HeapBlock* block = search_freelist(number_of_segments); 197 NOT_PRODUCT(verify()); 198 199 if (block != NULL) { 200 assert(!block->free(), "must be marked free"); 201 guarantee((char*) block >= _memory.low_boundary() && (char*) block < _memory.high(), 202 "The newly allocated block " INTPTR_FORMAT " is not within the heap " 203 "starting with " INTPTR_FORMAT " and ending with " INTPTR_FORMAT, 204 p2i(block), p2i(_memory.low_boundary()), p2i(_memory.high())); 205 DEBUG_ONLY(memset((void*)block->allocated_space(), badCodeHeapNewVal, instance_size)); 206 _max_allocated_capacity = MAX2(_max_allocated_capacity, allocated_capacity()); 207 _blob_count++; 208 return block->allocated_space(); 209 } 210 211 // Ensure minimum size for allocation to the heap. 212 number_of_segments = MAX2((int)CodeCacheMinBlockLength, (int)number_of_segments); 213 214 if (_next_segment + number_of_segments <= _number_of_committed_segments) { 215 mark_segmap_as_used(_next_segment, _next_segment + number_of_segments); 216 HeapBlock* b = block_at(_next_segment); 217 b->initialize(number_of_segments); 218 _next_segment += number_of_segments; 219 guarantee((char*) b >= _memory.low_boundary() && (char*) block < _memory.high(), 220 "The newly allocated block " INTPTR_FORMAT " is not within the heap " 221 "starting with " INTPTR_FORMAT " and ending with " INTPTR_FORMAT, 222 p2i(b), p2i(_memory.low_boundary()), p2i(_memory.high())); 223 DEBUG_ONLY(memset((void *)b->allocated_space(), badCodeHeapNewVal, instance_size)); 224 _max_allocated_capacity = MAX2(_max_allocated_capacity, allocated_capacity()); 225 _blob_count++; 226 return b->allocated_space(); 227 } else { 228 return NULL; 229 } 230 } 231 232 // Split the given block into two at the given segment. 233 // This is helpful when a block was allocated too large 234 // to trim off the unused space at the end (interpreter). 235 // It also helps with splitting a large free block during allocation. 236 // Usage state (used or free) must be set by caller since 237 // we don't know if the resulting blocks will be used or free. 238 // split_at is the segment number (relative to segment_for(b)) 239 // where the split happens. The segment with relative 240 // number split_at is the first segment of the split-off block. 241 HeapBlock* CodeHeap::split_block(HeapBlock *b, size_t split_at) { 242 if (b == NULL) return NULL; 243 // After the split, both blocks must have a size of at least CodeCacheMinBlockLength 244 assert((split_at >= CodeCacheMinBlockLength) && (split_at + CodeCacheMinBlockLength <= b->length()), 245 "split position(%d) out of range [0..%d]", (int)split_at, (int)b->length()); 246 size_t split_segment = segment_for(b) + split_at; 247 size_t b_size = b->length(); 248 size_t newb_size = b_size - split_at; 249 250 HeapBlock* newb = block_at(split_segment); 251 newb->set_length(newb_size); 252 mark_segmap_as_used(segment_for(newb), segment_for(newb) + newb_size); 253 b->set_length(split_at); 254 return newb; 255 } 256 257 void CodeHeap::deallocate_tail(void* p, size_t used_size) { 258 assert(p == find_start(p), "illegal deallocation"); 259 // Find start of HeapBlock 260 HeapBlock* b = (((HeapBlock *)p) - 1); 261 assert(b->allocated_space() == p, "sanity check"); 262 263 size_t actual_number_of_segments = b->length(); 264 size_t used_number_of_segments = size_to_segments(used_size + header_size()); 265 size_t unused_number_of_segments = actual_number_of_segments - used_number_of_segments; 266 guarantee(used_number_of_segments <= actual_number_of_segments, "Must be!"); 267 268 HeapBlock* f = split_block(b, used_number_of_segments); 269 DEBUG_ONLY(memset((void *)f->allocated_space(), badCodeHeapFreeVal, 270 segments_to_size(unused_number_of_segments) - sizeof(HeapBlock))); 271 add_to_freelist(f); 272 NOT_PRODUCT(verify()); 273 } 274 275 void CodeHeap::deallocate(void* p) { 276 assert(p == find_start(p), "illegal deallocation"); 277 // Find start of HeapBlock 278 HeapBlock* b = (((HeapBlock *)p) - 1); 279 assert(b->allocated_space() == p, "sanity check"); 280 guarantee((char*) b >= _memory.low_boundary() && (char*) b < _memory.high(), 281 "The block to be deallocated " INTPTR_FORMAT " is not within the heap " 282 "starting with " INTPTR_FORMAT " and ending with " INTPTR_FORMAT, 283 p2i(b), p2i(_memory.low_boundary()), p2i(_memory.high())); 284 DEBUG_ONLY(memset((void *)b->allocated_space(), badCodeHeapFreeVal, 285 segments_to_size(b->length()) - sizeof(HeapBlock))); 286 add_to_freelist(b); 287 NOT_PRODUCT(verify()); 288 } 289 290 /** 291 * Uses segment map to find the the start (header) of a nmethod. This works as follows: 292 * The memory of the code cache is divided into 'segments'. The size of a segment is 293 * determined by -XX:CodeCacheSegmentSize=XX. Allocation in the code cache can only 294 * happen at segment boundaries. A pointer in the code cache can be mapped to a segment 295 * by calling segment_for(addr). Each time memory is requested from the code cache, 296 * the segmap is updated accordingly. See the following example, which illustrates the 297 * state of code cache and the segment map: (seg -> segment, nm ->nmethod) 298 * 299 * code cache segmap 300 * ----------- --------- 301 * seg 1 | nm 1 | -> | 0 | 302 * seg 2 | nm 1 | -> | 1 | 303 * ... | nm 1 | -> | .. | 304 * seg m | nm 2 | -> | 0 | 305 * seg m+1 | nm 2 | -> | 1 | 306 * ... | nm 2 | -> | 2 | 307 * ... | nm 2 | -> | .. | 308 * ... | nm 2 | -> | 0xFE | 309 * seg m+n | nm 2 | -> | 1 | 310 * ... | nm 2 | -> | | 311 * 312 * A value of '0' in the segmap indicates that this segment contains the beginning of 313 * an nmethod. Let's walk through a simple example: If we want to find the start of 314 * an nmethod that falls into seg 2, we read the value of the segmap[2]. The value 315 * is an offset that points to the segment that contains the start of the nmethod. 316 * Another example: If we want to get the start of nm 2, and we happen to get a pointer 317 * that points to seg m+n, we first read seg[n+m], which returns '1'. So we have to 318 * do one more read of the segmap[m+n-1] to finally get the segment header. 319 */ 320 void* CodeHeap::find_start(void* p) const { 321 if (!contains(p)) { 322 return NULL; 323 } 324 size_t seg_idx = segment_for(p); 325 address seg_map = (address)_segmap.low(); 326 if (is_segment_unused(seg_map[seg_idx])) { 327 return NULL; 328 } 329 while (seg_map[seg_idx] > 0) { 330 seg_idx -= (int)seg_map[seg_idx]; 331 } 332 333 HeapBlock* h = block_at(seg_idx); 334 if (h->free()) { 335 return NULL; 336 } 337 return h->allocated_space(); 338 } 339 340 CodeBlob* CodeHeap::find_blob_unsafe(void* start) const { 341 CodeBlob* result = (CodeBlob*)CodeHeap::find_start(start); 342 if (result != NULL && result->blob_contains((address)start)) { 343 return result; 344 } 345 return NULL; 346 } 347 348 size_t CodeHeap::alignment_unit() const { 349 // this will be a power of two 350 return _segment_size; 351 } 352 353 354 size_t CodeHeap::alignment_offset() const { 355 // The lowest address in any allocated block will be 356 // equal to alignment_offset (mod alignment_unit). 357 return sizeof(HeapBlock) & (_segment_size - 1); 358 } 359 360 // Returns the current block if available and used. 361 // If not, it returns the subsequent block (if available), NULL otherwise. 362 // Free blocks are merged, therefore there is at most one free block 363 // between two used ones. As a result, the subsequent block (if available) is 364 // guaranteed to be used. 365 void* CodeHeap::next_used(HeapBlock* b) const { 366 if (b != NULL && b->free()) b = next_block(b); 367 assert(b == NULL || !b->free(), "must be in use or at end of heap"); 368 return (b == NULL) ? NULL : b->allocated_space(); 369 } 370 371 // Returns the first used HeapBlock 372 HeapBlock* CodeHeap::first_block() const { 373 if (_next_segment > 0) 374 return block_at(0); 375 return NULL; 376 } 377 378 HeapBlock* CodeHeap::block_start(void* q) const { 379 HeapBlock* b = (HeapBlock*)find_start(q); 380 if (b == NULL) return NULL; 381 return b - 1; 382 } 383 384 // Returns the next Heap block an offset into one 385 HeapBlock* CodeHeap::next_block(HeapBlock *b) const { 386 if (b == NULL) return NULL; 387 size_t i = segment_for(b) + b->length(); 388 if (i < _next_segment) 389 return block_at(i); 390 return NULL; 391 } 392 393 394 // Returns current capacity 395 size_t CodeHeap::capacity() const { 396 return _memory.committed_size(); 397 } 398 399 size_t CodeHeap::max_capacity() const { 400 return _memory.reserved_size(); 401 } 402 403 int CodeHeap::allocated_segments() const { 404 return (int)_next_segment; 405 } 406 407 size_t CodeHeap::allocated_capacity() const { 408 // size of used heap - size on freelist 409 return segments_to_size(_next_segment - _freelist_segments); 410 } 411 412 // Returns size of the unallocated heap block 413 size_t CodeHeap::heap_unallocated_capacity() const { 414 // Total number of segments - number currently used 415 return segments_to_size(_number_of_reserved_segments - _next_segment); 416 } 417 418 // Free list management 419 420 FreeBlock* CodeHeap::following_block(FreeBlock *b) { 421 return (FreeBlock*)(((address)b) + _segment_size * b->length()); 422 } 423 424 // Inserts block b after a 425 void CodeHeap::insert_after(FreeBlock* a, FreeBlock* b) { 426 assert(a != NULL && b != NULL, "must be real pointers"); 427 428 // Link b into the list after a 429 b->set_link(a->link()); 430 a->set_link(b); 431 432 // See if we can merge blocks 433 merge_right(b); // Try to make b bigger 434 merge_right(a); // Try to make a include b 435 } 436 437 // Try to merge this block with the following block 438 bool CodeHeap::merge_right(FreeBlock* a) { 439 assert(a->free(), "must be a free block"); 440 if (following_block(a) == a->link()) { 441 assert(a->link() != NULL && a->link()->free(), "must be free too"); 442 // Update block a to include the following block 443 a->set_length(a->length() + a->link()->length()); 444 a->set_link(a->link()->link()); 445 // Update find_start map 446 size_t beg = segment_for(a); 447 mark_segmap_as_used(beg, beg + a->length()); 448 _freelist_length--; 449 return true; 450 } 451 return false; 452 } 453 454 455 void CodeHeap::add_to_freelist(HeapBlock* a) { 456 FreeBlock* b = (FreeBlock*)a; 457 _freelist_length++; 458 459 assert(b != _freelist, "cannot be removed twice"); 460 461 462 // Mark as free and update free space count 463 _freelist_segments += b->length(); 464 b->set_free(); 465 466 // First element in list? 467 if (_freelist == NULL) { 468 b->set_link(NULL); 469 _freelist = b; 470 return; 471 } 472 473 // Since the freelist is ordered (smaller addresses -> larger addresses) and the 474 // element we want to insert into the freelist has a smaller address than the first 475 // element, we can simply add 'b' as the first element and we are done. 476 if (b < _freelist) { 477 // Insert first in list 478 b->set_link(_freelist); 479 _freelist = b; 480 merge_right(_freelist); 481 return; 482 } 483 484 // Scan for right place to put into list. List 485 // is sorted by increasing addresses 486 FreeBlock* prev = _freelist; 487 FreeBlock* cur = _freelist->link(); 488 while(cur != NULL && cur < b) { 489 assert(prev < cur, "Freelist must be ordered"); 490 prev = cur; 491 cur = cur->link(); 492 } 493 assert((prev < b) && (cur == NULL || b < cur), "free-list must be ordered"); 494 insert_after(prev, b); 495 } 496 497 /** 498 * Search freelist for an entry on the list with the best fit. 499 * @return NULL, if no one was found 500 */ 501 HeapBlock* CodeHeap::search_freelist(size_t length) { 502 FreeBlock* found_block = NULL; 503 FreeBlock* found_prev = NULL; 504 size_t found_length = _next_segment; // max it out to begin with 505 506 HeapBlock* res = NULL; 507 FreeBlock* prev = NULL; 508 FreeBlock* cur = _freelist; 509 510 length = length < CodeCacheMinBlockLength ? CodeCacheMinBlockLength : length; 511 512 // Search for best-fitting block 513 while(cur != NULL) { 514 size_t cur_length = cur->length(); 515 if (cur_length == length) { 516 // We have a perfect fit 517 found_block = cur; 518 found_prev = prev; 519 found_length = cur_length; 520 break; 521 } else if ((cur_length > length) && (cur_length < found_length)) { 522 // This is a new, closer fit. Remember block, its previous element, and its length 523 found_block = cur; 524 found_prev = prev; 525 found_length = cur_length; 526 } 527 // Next element in list 528 prev = cur; 529 cur = cur->link(); 530 } 531 532 if (found_block == NULL) { 533 // None found 534 return NULL; 535 } 536 537 // Exact (or at least good enough) fit. Remove from list. 538 // Don't leave anything on the freelist smaller than CodeCacheMinBlockLength. 539 if (found_length - length < CodeCacheMinBlockLength) { 540 _freelist_length--; 541 length = found_length; 542 if (found_prev == NULL) { 543 assert(_freelist == found_block, "sanity check"); 544 _freelist = _freelist->link(); 545 } else { 546 assert((found_prev->link() == found_block), "sanity check"); 547 // Unmap element 548 found_prev->set_link(found_block->link()); 549 } 550 res = found_block; 551 } else { 552 // Truncate the free block and return the truncated part 553 // as new HeapBlock. The remaining free block does not 554 // need to be updated, except for it's length. Truncating 555 // the segment map does not invalidate the leading part. 556 res = split_block(found_block, found_length - length); 557 } 558 559 res->set_used(); 560 _freelist_segments -= length; 561 return res; 562 } 563 564 //---------------------------------------------------------------------------- 565 // Non-product code 566 567 #ifndef PRODUCT 568 569 void CodeHeap::print() { 570 tty->print_cr("The Heap"); 571 } 572 573 void CodeHeap::verify() { 574 if (VerifyCodeCache) { 575 size_t len = 0; 576 int count = 0; 577 for(FreeBlock* b = _freelist; b != NULL; b = b->link()) { 578 len += b->length(); 579 count++; 580 // Check if we have merged all free blocks 581 assert(merge_right(b) == false, "Missed merging opportunity"); 582 } 583 // Verify that freelist contains the right amount of free space 584 assert(len == _freelist_segments, "wrong freelist"); 585 586 for(HeapBlock* h = first_block(); h != NULL; h = next_block(h)) { 587 if (h->free()) count--; 588 } 589 // Verify that the freelist contains the same number of blocks 590 // than free blocks found on the full list. 591 assert(count == 0, "missing free blocks"); 592 593 // Verify segment map marking. 594 // All allocated segments, no matter if in a free or used block, 595 // must be marked "in use". 596 address seg_map = (address)_segmap.low(); 597 size_t nseg = 0; 598 for(HeapBlock* b = first_block(); b != NULL; b = next_block(b)) { 599 size_t seg1 = segment_for(b); 600 size_t segn = seg1 + b->length(); 601 for (size_t i = seg1; i < segn; i++) { 602 nseg++; 603 if (is_segment_unused(seg_map[i])) { 604 warning("CodeHeap: unused segment. %d [%d..%d], %s block", (int)i, (int)seg1, (int)segn, b->free()? "free":"used"); 605 } 606 } 607 } 608 if (nseg != _next_segment) { 609 warning("CodeHeap: segment count mismatch. found %d, expected %d.", (int)nseg, (int)_next_segment); 610 } 611 612 // Verify that the number of free blocks is not out of hand. 613 static int free_block_threshold = 10000; 614 if (count > free_block_threshold) { 615 warning("CodeHeap: # of free blocks > %d", free_block_threshold); 616 // Double the warning limit 617 free_block_threshold *= 2; 618 } 619 } 620 } 621 622 #endif