1 /* 2 * Copyright (c) 1997, 2015, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "memory/heap.hpp" 27 #include "oops/oop.inline.hpp" 28 #include "runtime/os.hpp" 29 #include "services/memTracker.hpp" 30 #include "utilities/align.hpp" 31 32 size_t CodeHeap::header_size() { 33 return sizeof(HeapBlock); 34 } 35 36 37 // Implementation of Heap 38 39 CodeHeap::CodeHeap(const char* name, const int code_blob_type) 40 : _code_blob_type(code_blob_type) { 41 _name = name; 42 _number_of_committed_segments = 0; 43 _number_of_reserved_segments = 0; 44 _segment_size = 0; 45 _log2_segment_size = 0; 46 _next_segment = 0; 47 _freelist = NULL; 48 _freelist_segments = 0; 49 _freelist_length = 0; 50 _max_allocated_capacity = 0; 51 _blob_count = 0; 52 _nmethod_count = 0; 53 _adapter_count = 0; 54 _full_count = 0; 55 } 56 57 58 void CodeHeap::mark_segmap_as_free(size_t beg, size_t end) { 59 assert( beg < _number_of_committed_segments, "interval begin out of bounds"); 60 assert(beg < end && end <= _number_of_committed_segments, "interval end out of bounds"); 61 // setup _segmap pointers for faster indexing 62 address p = (address)_segmap.low() + beg; 63 address q = (address)_segmap.low() + end; 64 // initialize interval 65 while (p < q) *p++ = free_sentinel; 66 } 67 68 69 void CodeHeap::mark_segmap_as_used(size_t beg, size_t end) { 70 assert( beg < _number_of_committed_segments, "interval begin out of bounds"); 71 assert(beg < end && end <= _number_of_committed_segments, "interval end out of bounds"); 72 // setup _segmap pointers for faster indexing 73 address p = (address)_segmap.low() + beg; 74 address q = (address)_segmap.low() + end; 75 // initialize interval 76 int i = 0; 77 while (p < q) { 78 *p++ = i++; 79 if (i == free_sentinel) i = 1; 80 } 81 } 82 83 84 static size_t align_to_page_size(size_t size) { 85 const size_t alignment = (size_t)os::vm_page_size(); 86 assert(is_power_of_2(alignment), "no kidding ???"); 87 return (size + alignment - 1) & ~(alignment - 1); 88 } 89 90 91 void CodeHeap::on_code_mapping(char* base, size_t size) { 92 #ifdef LINUX 93 extern void linux_wrap_code(char* base, size_t size); 94 linux_wrap_code(base, size); 95 #endif 96 } 97 98 99 bool CodeHeap::reserve(ReservedSpace rs, size_t committed_size, size_t segment_size) { 100 assert(rs.size() >= committed_size, "reserved < committed"); 101 assert(segment_size >= sizeof(FreeBlock), "segment size is too small"); 102 assert(is_power_of_2(segment_size), "segment_size must be a power of 2"); 103 104 _segment_size = segment_size; 105 _log2_segment_size = exact_log2(segment_size); 106 107 // Reserve and initialize space for _memory. 108 size_t page_size = os::vm_page_size(); 109 if (os::can_execute_large_page_memory()) { 110 const size_t min_pages = 8; 111 page_size = MIN2(os::page_size_for_region_aligned(committed_size, min_pages), 112 os::page_size_for_region_aligned(rs.size(), min_pages)); 113 } 114 115 const size_t granularity = os::vm_allocation_granularity(); 116 const size_t c_size = align_up(committed_size, page_size); 117 118 os::trace_page_sizes(_name, committed_size, rs.size(), page_size, 119 rs.base(), rs.size()); 120 if (!_memory.initialize(rs, c_size)) { 121 return false; 122 } 123 124 on_code_mapping(_memory.low(), _memory.committed_size()); 125 _number_of_committed_segments = size_to_segments(_memory.committed_size()); 126 _number_of_reserved_segments = size_to_segments(_memory.reserved_size()); 127 assert(_number_of_reserved_segments >= _number_of_committed_segments, "just checking"); 128 const size_t reserved_segments_alignment = MAX2((size_t)os::vm_page_size(), granularity); 129 const size_t reserved_segments_size = align_up(_number_of_reserved_segments, reserved_segments_alignment); 130 const size_t committed_segments_size = align_to_page_size(_number_of_committed_segments); 131 132 // reserve space for _segmap 133 if (!_segmap.initialize(reserved_segments_size, committed_segments_size)) { 134 return false; 135 } 136 137 MemTracker::record_virtual_memory_type((address)_segmap.low_boundary(), mtCode); 138 139 assert(_segmap.committed_size() >= (size_t) _number_of_committed_segments, "could not commit enough space for segment map"); 140 assert(_segmap.reserved_size() >= (size_t) _number_of_reserved_segments , "could not reserve enough space for segment map"); 141 assert(_segmap.reserved_size() >= _segmap.committed_size() , "just checking"); 142 143 // initialize remaining instance variables 144 clear(); 145 return true; 146 } 147 148 149 bool CodeHeap::expand_by(size_t size) { 150 // expand _memory space 151 size_t dm = align_to_page_size(_memory.committed_size() + size) - _memory.committed_size(); 152 if (dm > 0) { 153 // Use at least the available uncommitted space if 'size' is larger 154 if (_memory.uncommitted_size() != 0 && dm > _memory.uncommitted_size()) { 155 dm = _memory.uncommitted_size(); 156 } 157 char* base = _memory.low() + _memory.committed_size(); 158 if (!_memory.expand_by(dm)) return false; 159 on_code_mapping(base, dm); 160 size_t i = _number_of_committed_segments; 161 _number_of_committed_segments = size_to_segments(_memory.committed_size()); 162 assert(_number_of_reserved_segments == size_to_segments(_memory.reserved_size()), "number of reserved segments should not change"); 163 assert(_number_of_reserved_segments >= _number_of_committed_segments, "just checking"); 164 // expand _segmap space 165 size_t ds = align_to_page_size(_number_of_committed_segments) - _segmap.committed_size(); 166 if ((ds > 0) && !_segmap.expand_by(ds)) { 167 return false; 168 } 169 assert(_segmap.committed_size() >= (size_t) _number_of_committed_segments, "just checking"); 170 // initialize additional segmap entries 171 mark_segmap_as_free(i, _number_of_committed_segments); 172 } 173 return true; 174 } 175 176 void CodeHeap::clear() { 177 _next_segment = 0; 178 mark_segmap_as_free(0, _number_of_committed_segments); 179 } 180 181 182 void* CodeHeap::allocate(size_t instance_size) { 183 size_t number_of_segments = size_to_segments(instance_size + header_size()); 184 assert(segments_to_size(number_of_segments) >= sizeof(FreeBlock), "not enough room for FreeList"); 185 186 // First check if we can satisfy request from freelist 187 NOT_PRODUCT(verify()); 188 HeapBlock* block = search_freelist(number_of_segments); 189 NOT_PRODUCT(verify()); 190 191 if (block != NULL) { 192 assert(block->length() >= number_of_segments && block->length() < number_of_segments + CodeCacheMinBlockLength, "sanity check"); 193 assert(!block->free(), "must be marked free"); 194 guarantee((char*) block >= _memory.low_boundary() && (char*) block < _memory.high(), 195 "The newly allocated block " INTPTR_FORMAT " is not within the heap " 196 "starting with " INTPTR_FORMAT " and ending with " INTPTR_FORMAT, 197 p2i(block), p2i(_memory.low_boundary()), p2i(_memory.high())); 198 DEBUG_ONLY(memset((void*)block->allocated_space(), badCodeHeapNewVal, instance_size)); 199 _max_allocated_capacity = MAX2(_max_allocated_capacity, allocated_capacity()); 200 _blob_count++; 201 return block->allocated_space(); 202 } 203 204 // Ensure minimum size for allocation to the heap. 205 number_of_segments = MAX2((int)CodeCacheMinBlockLength, (int)number_of_segments); 206 207 if (_next_segment + number_of_segments <= _number_of_committed_segments) { 208 mark_segmap_as_used(_next_segment, _next_segment + number_of_segments); 209 HeapBlock* b = block_at(_next_segment); 210 b->initialize(number_of_segments); 211 _next_segment += number_of_segments; 212 guarantee((char*) b >= _memory.low_boundary() && (char*) block < _memory.high(), 213 "The newly allocated block " INTPTR_FORMAT " is not within the heap " 214 "starting with " INTPTR_FORMAT " and ending with " INTPTR_FORMAT, 215 p2i(b), p2i(_memory.low_boundary()), p2i(_memory.high())); 216 DEBUG_ONLY(memset((void *)b->allocated_space(), badCodeHeapNewVal, instance_size)); 217 _max_allocated_capacity = MAX2(_max_allocated_capacity, allocated_capacity()); 218 _blob_count++; 219 return b->allocated_space(); 220 } else { 221 return NULL; 222 } 223 } 224 225 226 void CodeHeap::deallocate(void* p) { 227 assert(p == find_start(p), "illegal deallocation"); 228 // Find start of HeapBlock 229 HeapBlock* b = (((HeapBlock *)p) - 1); 230 assert(b->allocated_space() == p, "sanity check"); 231 guarantee((char*) b >= _memory.low_boundary() && (char*) b < _memory.high(), 232 "The block to be deallocated " INTPTR_FORMAT " is not within the heap " 233 "starting with " INTPTR_FORMAT " and ending with " INTPTR_FORMAT, 234 p2i(b), p2i(_memory.low_boundary()), p2i(_memory.high())); 235 DEBUG_ONLY(memset((void *)b->allocated_space(), badCodeHeapFreeVal, 236 segments_to_size(b->length()) - sizeof(HeapBlock))); 237 add_to_freelist(b); 238 NOT_PRODUCT(verify()); 239 } 240 241 /** 242 * Uses segment map to find the the start (header) of a nmethod. This works as follows: 243 * The memory of the code cache is divided into 'segments'. The size of a segment is 244 * determined by -XX:CodeCacheSegmentSize=XX. Allocation in the code cache can only 245 * happen at segment boundaries. A pointer in the code cache can be mapped to a segment 246 * by calling segment_for(addr). Each time memory is requested from the code cache, 247 * the segmap is updated accordingly. See the following example, which illustrates the 248 * state of code cache and the segment map: (seg -> segment, nm ->nmethod) 249 * 250 * code cache segmap 251 * ----------- --------- 252 * seg 1 | nm 1 | -> | 0 | 253 * seg 2 | nm 1 | -> | 1 | 254 * ... | nm 1 | -> | .. | 255 * seg m | nm 2 | -> | 0 | 256 * seg m+1 | nm 2 | -> | 1 | 257 * ... | nm 2 | -> | 2 | 258 * ... | nm 2 | -> | .. | 259 * ... | nm 2 | -> | 0xFE | 260 * seg m+n | nm 2 | -> | 1 | 261 * ... | nm 2 | -> | | 262 * 263 * A value of '0' in the segmap indicates that this segment contains the beginning of 264 * an nmethod. Let's walk through a simple example: If we want to find the start of 265 * an nmethod that falls into seg 2, we read the value of the segmap[2]. The value 266 * is an offset that points to the segment that contains the start of the nmethod. 267 * Another example: If we want to get the start of nm 2, and we happen to get a pointer 268 * that points to seg m+n, we first read seg[n+m], which returns '1'. So we have to 269 * do one more read of the segmap[m+n-1] to finally get the segment header. 270 */ 271 void* CodeHeap::find_start(void* p) const { 272 if (!contains(p)) { 273 return NULL; 274 } 275 size_t seg_idx = segment_for(p); 276 address seg_map = (address)_segmap.low(); 277 if (is_segment_unused(seg_map[seg_idx])) { 278 return NULL; 279 } 280 while (seg_map[seg_idx] > 0) { 281 seg_idx -= (int)seg_map[seg_idx]; 282 } 283 284 HeapBlock* h = block_at(seg_idx); 285 if (h->free()) { 286 return NULL; 287 } 288 return h->allocated_space(); 289 } 290 291 CodeBlob* CodeHeap::find_blob_unsafe(void* start) const { 292 CodeBlob* result = (CodeBlob*)CodeHeap::find_start(start); 293 if (result != NULL && result->blob_contains((address)start)) { 294 return result; 295 } 296 return NULL; 297 } 298 299 size_t CodeHeap::alignment_unit() const { 300 // this will be a power of two 301 return _segment_size; 302 } 303 304 305 size_t CodeHeap::alignment_offset() const { 306 // The lowest address in any allocated block will be 307 // equal to alignment_offset (mod alignment_unit). 308 return sizeof(HeapBlock) & (_segment_size - 1); 309 } 310 311 // Returns the current block if available and used. 312 // If not, it returns the subsequent block (if available), NULL otherwise. 313 // Free blocks are merged, therefore there is at most one free block 314 // between two used ones. As a result, the subsequent block (if available) is 315 // guaranteed to be used. 316 void* CodeHeap::next_used(HeapBlock* b) const { 317 if (b != NULL && b->free()) b = next_block(b); 318 assert(b == NULL || !b->free(), "must be in use or at end of heap"); 319 return (b == NULL) ? NULL : b->allocated_space(); 320 } 321 322 // Returns the first used HeapBlock 323 HeapBlock* CodeHeap::first_block() const { 324 if (_next_segment > 0) 325 return block_at(0); 326 return NULL; 327 } 328 329 HeapBlock* CodeHeap::block_start(void* q) const { 330 HeapBlock* b = (HeapBlock*)find_start(q); 331 if (b == NULL) return NULL; 332 return b - 1; 333 } 334 335 // Returns the next Heap block an offset into one 336 HeapBlock* CodeHeap::next_block(HeapBlock *b) const { 337 if (b == NULL) return NULL; 338 size_t i = segment_for(b) + b->length(); 339 if (i < _next_segment) 340 return block_at(i); 341 return NULL; 342 } 343 344 345 // Returns current capacity 346 size_t CodeHeap::capacity() const { 347 return _memory.committed_size(); 348 } 349 350 size_t CodeHeap::max_capacity() const { 351 return _memory.reserved_size(); 352 } 353 354 int CodeHeap::allocated_segments() const { 355 return (int)_next_segment; 356 } 357 358 size_t CodeHeap::allocated_capacity() const { 359 // size of used heap - size on freelist 360 return segments_to_size(_next_segment - _freelist_segments); 361 } 362 363 // Returns size of the unallocated heap block 364 size_t CodeHeap::heap_unallocated_capacity() const { 365 // Total number of segments - number currently used 366 return segments_to_size(_number_of_reserved_segments - _next_segment); 367 } 368 369 // Free list management 370 371 FreeBlock* CodeHeap::following_block(FreeBlock *b) { 372 return (FreeBlock*)(((address)b) + _segment_size * b->length()); 373 } 374 375 // Inserts block b after a 376 void CodeHeap::insert_after(FreeBlock* a, FreeBlock* b) { 377 assert(a != NULL && b != NULL, "must be real pointers"); 378 379 // Link b into the list after a 380 b->set_link(a->link()); 381 a->set_link(b); 382 383 // See if we can merge blocks 384 merge_right(b); // Try to make b bigger 385 merge_right(a); // Try to make a include b 386 } 387 388 // Try to merge this block with the following block 389 bool CodeHeap::merge_right(FreeBlock* a) { 390 assert(a->free(), "must be a free block"); 391 if (following_block(a) == a->link()) { 392 assert(a->link() != NULL && a->link()->free(), "must be free too"); 393 // Update block a to include the following block 394 a->set_length(a->length() + a->link()->length()); 395 a->set_link(a->link()->link()); 396 // Update find_start map 397 size_t beg = segment_for(a); 398 mark_segmap_as_used(beg, beg + a->length()); 399 _freelist_length--; 400 return true; 401 } 402 return false; 403 } 404 405 406 void CodeHeap::add_to_freelist(HeapBlock* a) { 407 FreeBlock* b = (FreeBlock*)a; 408 _freelist_length++; 409 410 assert(b != _freelist, "cannot be removed twice"); 411 412 413 // Mark as free and update free space count 414 _freelist_segments += b->length(); 415 b->set_free(); 416 417 // First element in list? 418 if (_freelist == NULL) { 419 _freelist = b; 420 b->set_link(NULL); 421 return; 422 } 423 424 // Since the freelist is ordered (smaller addresses -> larger addresses) and the 425 // element we want to insert into the freelist has a smaller address than the first 426 // element, we can simply add 'b' as the first element and we are done. 427 if (b < _freelist) { 428 // Insert first in list 429 b->set_link(_freelist); 430 _freelist = b; 431 merge_right(_freelist); 432 return; 433 } 434 435 // Scan for right place to put into list. List 436 // is sorted by increasing addresses 437 FreeBlock* prev = _freelist; 438 FreeBlock* cur = _freelist->link(); 439 while(cur != NULL && cur < b) { 440 assert(prev < cur, "Freelist must be ordered"); 441 prev = cur; 442 cur = cur->link(); 443 } 444 assert((prev < b) && (cur == NULL || b < cur), "free-list must be ordered"); 445 insert_after(prev, b); 446 } 447 448 /** 449 * Search freelist for an entry on the list with the best fit. 450 * @return NULL, if no one was found 451 */ 452 FreeBlock* CodeHeap::search_freelist(size_t length) { 453 FreeBlock* found_block = NULL; 454 FreeBlock* found_prev = NULL; 455 size_t found_length = 0; 456 457 FreeBlock* prev = NULL; 458 FreeBlock* cur = _freelist; 459 460 // Search for first block that fits 461 while(cur != NULL) { 462 if (cur->length() >= length) { 463 // Remember block, its previous element, and its length 464 found_block = cur; 465 found_prev = prev; 466 found_length = found_block->length(); 467 468 break; 469 } 470 // Next element in list 471 prev = cur; 472 cur = cur->link(); 473 } 474 475 if (found_block == NULL) { 476 // None found 477 return NULL; 478 } 479 480 // Exact (or at least good enough) fit. Remove from list. 481 // Don't leave anything on the freelist smaller than CodeCacheMinBlockLength. 482 if (found_length - length < CodeCacheMinBlockLength) { 483 _freelist_length--; 484 length = found_length; 485 if (found_prev == NULL) { 486 assert(_freelist == found_block, "sanity check"); 487 _freelist = _freelist->link(); 488 } else { 489 assert((found_prev->link() == found_block), "sanity check"); 490 // Unmap element 491 found_prev->set_link(found_block->link()); 492 } 493 } else { 494 // Truncate block and return a pointer to the following block 495 // Set used bit and length on new block 496 found_block->set_length(found_length - length); 497 found_block = following_block(found_block); 498 499 size_t beg = segment_for(found_block); 500 mark_segmap_as_used(beg, beg + length); 501 found_block->set_length(length); 502 } 503 504 found_block->set_used(); 505 _freelist_segments -= length; 506 return found_block; 507 } 508 509 //---------------------------------------------------------------------------- 510 // Non-product code 511 512 #ifndef PRODUCT 513 514 void CodeHeap::print() { 515 tty->print_cr("The Heap"); 516 } 517 518 void CodeHeap::verify() { 519 if (VerifyCodeCache) { 520 size_t len = 0; 521 int count = 0; 522 for(FreeBlock* b = _freelist; b != NULL; b = b->link()) { 523 len += b->length(); 524 count++; 525 // Check if we have merged all free blocks 526 assert(merge_right(b) == false, "Missed merging opportunity"); 527 } 528 // Verify that freelist contains the right amount of free space 529 assert(len == _freelist_segments, "wrong freelist"); 530 531 for(HeapBlock* h = first_block(); h != NULL; h = next_block(h)) { 532 if (h->free()) count--; 533 } 534 // Verify that the freelist contains the same number of blocks 535 // than free blocks found on the full list. 536 assert(count == 0, "missing free blocks"); 537 538 // Verify that the number of free blocks is not out of hand. 539 static int free_block_threshold = 10000; 540 if (count > free_block_threshold) { 541 warning("CodeHeap: # of free blocks > %d", free_block_threshold); 542 // Double the warning limit 543 free_block_threshold *= 2; 544 } 545 } 546 } 547 548 #endif