1 /* 2 * Copyright (c) 1997, 2015, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "memory/heap.hpp" 27 #include "oops/oop.inline.hpp" 28 #include "runtime/os.hpp" 29 #include "services/memTracker.hpp" 30 31 size_t CodeHeap::header_size() { 32 return sizeof(HeapBlock); 33 } 34 35 36 // Implementation of Heap 37 38 CodeHeap::CodeHeap(const char* name, const int code_blob_type) 39 : _code_blob_type(code_blob_type) { 40 _name = name; 41 _number_of_committed_segments = 0; 42 _number_of_reserved_segments = 0; 43 _segment_size = 0; 44 _log2_segment_size = 0; 45 _next_segment = 0; 46 _freelist = NULL; 47 _freelist_segments = 0; 48 _freelist_length = 0; 49 _max_allocated_capacity = 0; 50 _blob_count = 0; 51 _nmethod_count = 0; 52 _adapter_count = 0; 53 _full_count = 0; 54 } 55 56 57 void CodeHeap::mark_segmap_as_free(size_t beg, size_t end) { 58 assert( beg < _number_of_committed_segments, "interval begin out of bounds"); 59 assert(beg < end && end <= _number_of_committed_segments, "interval end out of bounds"); 60 // setup _segmap pointers for faster indexing 61 address p = (address)_segmap.low() + beg; 62 address q = (address)_segmap.low() + end; 63 // initialize interval 64 while (p < q) *p++ = free_sentinel; 65 } 66 67 68 void CodeHeap::mark_segmap_as_used(size_t beg, size_t end) { 69 assert( beg < _number_of_committed_segments, "interval begin out of bounds"); 70 assert(beg < end && end <= _number_of_committed_segments, "interval end out of bounds"); 71 // setup _segmap pointers for faster indexing 72 address p = (address)_segmap.low() + beg; 73 address q = (address)_segmap.low() + end; 74 // initialize interval 75 int i = 0; 76 while (p < q) { 77 *p++ = i++; 78 if (i == free_sentinel) i = 1; 79 } 80 } 81 82 83 static size_t align_to_page_size(size_t size) { 84 const size_t alignment = (size_t)os::vm_page_size(); 85 assert(is_power_of_2(alignment), "no kidding ???"); 86 return (size + alignment - 1) & ~(alignment - 1); 87 } 88 89 90 void CodeHeap::on_code_mapping(char* base, size_t size) { 91 #ifdef LINUX 92 extern void linux_wrap_code(char* base, size_t size); 93 linux_wrap_code(base, size); 94 #endif 95 } 96 97 98 bool CodeHeap::reserve(ReservedSpace rs, size_t committed_size, size_t segment_size) { 99 assert(rs.size() >= committed_size, "reserved < committed"); 100 assert(segment_size >= sizeof(FreeBlock), "segment size is too small"); 101 assert(is_power_of_2(segment_size), "segment_size must be a power of 2"); 102 103 _segment_size = segment_size; 104 _log2_segment_size = exact_log2(segment_size); 105 106 // Reserve and initialize space for _memory. 107 size_t page_size = os::vm_page_size(); 108 if (os::can_execute_large_page_memory()) { 109 const size_t min_pages = 8; 110 page_size = MIN2(os::page_size_for_region_aligned(committed_size, min_pages), 111 os::page_size_for_region_aligned(rs.size(), min_pages)); 112 } 113 114 const size_t granularity = os::vm_allocation_granularity(); 115 const size_t c_size = align_up(committed_size, page_size); 116 117 os::trace_page_sizes(_name, committed_size, rs.size(), page_size, 118 rs.base(), rs.size()); 119 if (!_memory.initialize(rs, c_size)) { 120 return false; 121 } 122 123 on_code_mapping(_memory.low(), _memory.committed_size()); 124 _number_of_committed_segments = size_to_segments(_memory.committed_size()); 125 _number_of_reserved_segments = size_to_segments(_memory.reserved_size()); 126 assert(_number_of_reserved_segments >= _number_of_committed_segments, "just checking"); 127 const size_t reserved_segments_alignment = MAX2((size_t)os::vm_page_size(), granularity); 128 const size_t reserved_segments_size = align_up(_number_of_reserved_segments, reserved_segments_alignment); 129 const size_t committed_segments_size = align_to_page_size(_number_of_committed_segments); 130 131 // reserve space for _segmap 132 if (!_segmap.initialize(reserved_segments_size, committed_segments_size)) { 133 return false; 134 } 135 136 MemTracker::record_virtual_memory_type((address)_segmap.low_boundary(), mtCode); 137 138 assert(_segmap.committed_size() >= (size_t) _number_of_committed_segments, "could not commit enough space for segment map"); 139 assert(_segmap.reserved_size() >= (size_t) _number_of_reserved_segments , "could not reserve enough space for segment map"); 140 assert(_segmap.reserved_size() >= _segmap.committed_size() , "just checking"); 141 142 // initialize remaining instance variables 143 clear(); 144 return true; 145 } 146 147 148 bool CodeHeap::expand_by(size_t size) { 149 // expand _memory space 150 size_t dm = align_to_page_size(_memory.committed_size() + size) - _memory.committed_size(); 151 if (dm > 0) { 152 // Use at least the available uncommitted space if 'size' is larger 153 if (_memory.uncommitted_size() != 0 && dm > _memory.uncommitted_size()) { 154 dm = _memory.uncommitted_size(); 155 } 156 char* base = _memory.low() + _memory.committed_size(); 157 if (!_memory.expand_by(dm)) return false; 158 on_code_mapping(base, dm); 159 size_t i = _number_of_committed_segments; 160 _number_of_committed_segments = size_to_segments(_memory.committed_size()); 161 assert(_number_of_reserved_segments == size_to_segments(_memory.reserved_size()), "number of reserved segments should not change"); 162 assert(_number_of_reserved_segments >= _number_of_committed_segments, "just checking"); 163 // expand _segmap space 164 size_t ds = align_to_page_size(_number_of_committed_segments) - _segmap.committed_size(); 165 if ((ds > 0) && !_segmap.expand_by(ds)) { 166 return false; 167 } 168 assert(_segmap.committed_size() >= (size_t) _number_of_committed_segments, "just checking"); 169 // initialize additional segmap entries 170 mark_segmap_as_free(i, _number_of_committed_segments); 171 } 172 return true; 173 } 174 175 void CodeHeap::clear() { 176 _next_segment = 0; 177 mark_segmap_as_free(0, _number_of_committed_segments); 178 } 179 180 181 void* CodeHeap::allocate(size_t instance_size) { 182 size_t number_of_segments = size_to_segments(instance_size + header_size()); 183 assert(segments_to_size(number_of_segments) >= sizeof(FreeBlock), "not enough room for FreeList"); 184 185 // First check if we can satisfy request from freelist 186 NOT_PRODUCT(verify()); 187 HeapBlock* block = search_freelist(number_of_segments); 188 NOT_PRODUCT(verify()); 189 190 if (block != NULL) { 191 assert(block->length() >= number_of_segments && block->length() < number_of_segments + CodeCacheMinBlockLength, "sanity check"); 192 assert(!block->free(), "must be marked free"); 193 guarantee((char*) block >= _memory.low_boundary() && (char*) block < _memory.high(), 194 "The newly allocated block " INTPTR_FORMAT " is not within the heap " 195 "starting with " INTPTR_FORMAT " and ending with " INTPTR_FORMAT, 196 p2i(block), p2i(_memory.low_boundary()), p2i(_memory.high())); 197 DEBUG_ONLY(memset((void*)block->allocated_space(), badCodeHeapNewVal, instance_size)); 198 _max_allocated_capacity = MAX2(_max_allocated_capacity, allocated_capacity()); 199 _blob_count++; 200 return block->allocated_space(); 201 } 202 203 // Ensure minimum size for allocation to the heap. 204 number_of_segments = MAX2((int)CodeCacheMinBlockLength, (int)number_of_segments); 205 206 if (_next_segment + number_of_segments <= _number_of_committed_segments) { 207 mark_segmap_as_used(_next_segment, _next_segment + number_of_segments); 208 HeapBlock* b = block_at(_next_segment); 209 b->initialize(number_of_segments); 210 _next_segment += number_of_segments; 211 guarantee((char*) b >= _memory.low_boundary() && (char*) block < _memory.high(), 212 "The newly allocated block " INTPTR_FORMAT " is not within the heap " 213 "starting with " INTPTR_FORMAT " and ending with " INTPTR_FORMAT, 214 p2i(b), p2i(_memory.low_boundary()), p2i(_memory.high())); 215 DEBUG_ONLY(memset((void *)b->allocated_space(), badCodeHeapNewVal, instance_size)); 216 _max_allocated_capacity = MAX2(_max_allocated_capacity, allocated_capacity()); 217 _blob_count++; 218 return b->allocated_space(); 219 } else { 220 return NULL; 221 } 222 } 223 224 225 void CodeHeap::deallocate(void* p) { 226 assert(p == find_start(p), "illegal deallocation"); 227 // Find start of HeapBlock 228 HeapBlock* b = (((HeapBlock *)p) - 1); 229 assert(b->allocated_space() == p, "sanity check"); 230 guarantee((char*) b >= _memory.low_boundary() && (char*) b < _memory.high(), 231 "The block to be deallocated " INTPTR_FORMAT " is not within the heap " 232 "starting with " INTPTR_FORMAT " and ending with " INTPTR_FORMAT, 233 p2i(b), p2i(_memory.low_boundary()), p2i(_memory.high())); 234 DEBUG_ONLY(memset((void *)b->allocated_space(), badCodeHeapFreeVal, 235 segments_to_size(b->length()) - sizeof(HeapBlock))); 236 add_to_freelist(b); 237 NOT_PRODUCT(verify()); 238 } 239 240 /** 241 * Uses segment map to find the the start (header) of a nmethod. This works as follows: 242 * The memory of the code cache is divided into 'segments'. The size of a segment is 243 * determined by -XX:CodeCacheSegmentSize=XX. Allocation in the code cache can only 244 * happen at segment boundaries. A pointer in the code cache can be mapped to a segment 245 * by calling segment_for(addr). Each time memory is requested from the code cache, 246 * the segmap is updated accordingly. See the following example, which illustrates the 247 * state of code cache and the segment map: (seg -> segment, nm ->nmethod) 248 * 249 * code cache segmap 250 * ----------- --------- 251 * seg 1 | nm 1 | -> | 0 | 252 * seg 2 | nm 1 | -> | 1 | 253 * ... | nm 1 | -> | .. | 254 * seg m | nm 2 | -> | 0 | 255 * seg m+1 | nm 2 | -> | 1 | 256 * ... | nm 2 | -> | 2 | 257 * ... | nm 2 | -> | .. | 258 * ... | nm 2 | -> | 0xFE | 259 * seg m+n | nm 2 | -> | 1 | 260 * ... | nm 2 | -> | | 261 * 262 * A value of '0' in the segmap indicates that this segment contains the beginning of 263 * an nmethod. Let's walk through a simple example: If we want to find the start of 264 * an nmethod that falls into seg 2, we read the value of the segmap[2]. The value 265 * is an offset that points to the segment that contains the start of the nmethod. 266 * Another example: If we want to get the start of nm 2, and we happen to get a pointer 267 * that points to seg m+n, we first read seg[n+m], which returns '1'. So we have to 268 * do one more read of the segmap[m+n-1] to finally get the segment header. 269 */ 270 void* CodeHeap::find_start(void* p) const { 271 if (!contains(p)) { 272 return NULL; 273 } 274 size_t seg_idx = segment_for(p); 275 address seg_map = (address)_segmap.low(); 276 if (is_segment_unused(seg_map[seg_idx])) { 277 return NULL; 278 } 279 while (seg_map[seg_idx] > 0) { 280 seg_idx -= (int)seg_map[seg_idx]; 281 } 282 283 HeapBlock* h = block_at(seg_idx); 284 if (h->free()) { 285 return NULL; 286 } 287 return h->allocated_space(); 288 } 289 290 CodeBlob* CodeHeap::find_blob_unsafe(void* start) const { 291 CodeBlob* result = (CodeBlob*)CodeHeap::find_start(start); 292 if (result != NULL && result->blob_contains((address)start)) { 293 return result; 294 } 295 return NULL; 296 } 297 298 size_t CodeHeap::alignment_unit() const { 299 // this will be a power of two 300 return _segment_size; 301 } 302 303 304 size_t CodeHeap::alignment_offset() const { 305 // The lowest address in any allocated block will be 306 // equal to alignment_offset (mod alignment_unit). 307 return sizeof(HeapBlock) & (_segment_size - 1); 308 } 309 310 // Returns the current block if available and used. 311 // If not, it returns the subsequent block (if available), NULL otherwise. 312 // Free blocks are merged, therefore there is at most one free block 313 // between two used ones. As a result, the subsequent block (if available) is 314 // guaranteed to be used. 315 void* CodeHeap::next_used(HeapBlock* b) const { 316 if (b != NULL && b->free()) b = next_block(b); 317 assert(b == NULL || !b->free(), "must be in use or at end of heap"); 318 return (b == NULL) ? NULL : b->allocated_space(); 319 } 320 321 // Returns the first used HeapBlock 322 HeapBlock* CodeHeap::first_block() const { 323 if (_next_segment > 0) 324 return block_at(0); 325 return NULL; 326 } 327 328 HeapBlock* CodeHeap::block_start(void* q) const { 329 HeapBlock* b = (HeapBlock*)find_start(q); 330 if (b == NULL) return NULL; 331 return b - 1; 332 } 333 334 // Returns the next Heap block an offset into one 335 HeapBlock* CodeHeap::next_block(HeapBlock *b) const { 336 if (b == NULL) return NULL; 337 size_t i = segment_for(b) + b->length(); 338 if (i < _next_segment) 339 return block_at(i); 340 return NULL; 341 } 342 343 344 // Returns current capacity 345 size_t CodeHeap::capacity() const { 346 return _memory.committed_size(); 347 } 348 349 size_t CodeHeap::max_capacity() const { 350 return _memory.reserved_size(); 351 } 352 353 int CodeHeap::allocated_segments() const { 354 return (int)_next_segment; 355 } 356 357 size_t CodeHeap::allocated_capacity() const { 358 // size of used heap - size on freelist 359 return segments_to_size(_next_segment - _freelist_segments); 360 } 361 362 // Returns size of the unallocated heap block 363 size_t CodeHeap::heap_unallocated_capacity() const { 364 // Total number of segments - number currently used 365 return segments_to_size(_number_of_reserved_segments - _next_segment); 366 } 367 368 // Free list management 369 370 FreeBlock* CodeHeap::following_block(FreeBlock *b) { 371 return (FreeBlock*)(((address)b) + _segment_size * b->length()); 372 } 373 374 // Inserts block b after a 375 void CodeHeap::insert_after(FreeBlock* a, FreeBlock* b) { 376 assert(a != NULL && b != NULL, "must be real pointers"); 377 378 // Link b into the list after a 379 b->set_link(a->link()); 380 a->set_link(b); 381 382 // See if we can merge blocks 383 merge_right(b); // Try to make b bigger 384 merge_right(a); // Try to make a include b 385 } 386 387 // Try to merge this block with the following block 388 bool CodeHeap::merge_right(FreeBlock* a) { 389 assert(a->free(), "must be a free block"); 390 if (following_block(a) == a->link()) { 391 assert(a->link() != NULL && a->link()->free(), "must be free too"); 392 // Update block a to include the following block 393 a->set_length(a->length() + a->link()->length()); 394 a->set_link(a->link()->link()); 395 // Update find_start map 396 size_t beg = segment_for(a); 397 mark_segmap_as_used(beg, beg + a->length()); 398 _freelist_length--; 399 return true; 400 } 401 return false; 402 } 403 404 405 void CodeHeap::add_to_freelist(HeapBlock* a) { 406 FreeBlock* b = (FreeBlock*)a; 407 _freelist_length++; 408 409 assert(b != _freelist, "cannot be removed twice"); 410 411 412 // Mark as free and update free space count 413 _freelist_segments += b->length(); 414 b->set_free(); 415 416 // First element in list? 417 if (_freelist == NULL) { 418 _freelist = b; 419 b->set_link(NULL); 420 return; 421 } 422 423 // Since the freelist is ordered (smaller addresses -> larger addresses) and the 424 // element we want to insert into the freelist has a smaller address than the first 425 // element, we can simply add 'b' as the first element and we are done. 426 if (b < _freelist) { 427 // Insert first in list 428 b->set_link(_freelist); 429 _freelist = b; 430 merge_right(_freelist); 431 return; 432 } 433 434 // Scan for right place to put into list. List 435 // is sorted by increasing addresses 436 FreeBlock* prev = _freelist; 437 FreeBlock* cur = _freelist->link(); 438 while(cur != NULL && cur < b) { 439 assert(prev < cur, "Freelist must be ordered"); 440 prev = cur; 441 cur = cur->link(); 442 } 443 assert((prev < b) && (cur == NULL || b < cur), "free-list must be ordered"); 444 insert_after(prev, b); 445 } 446 447 /** 448 * Search freelist for an entry on the list with the best fit. 449 * @return NULL, if no one was found 450 */ 451 FreeBlock* CodeHeap::search_freelist(size_t length) { 452 FreeBlock* found_block = NULL; 453 FreeBlock* found_prev = NULL; 454 size_t found_length = 0; 455 456 FreeBlock* prev = NULL; 457 FreeBlock* cur = _freelist; 458 459 // Search for first block that fits 460 while(cur != NULL) { 461 if (cur->length() >= length) { 462 // Remember block, its previous element, and its length 463 found_block = cur; 464 found_prev = prev; 465 found_length = found_block->length(); 466 467 break; 468 } 469 // Next element in list 470 prev = cur; 471 cur = cur->link(); 472 } 473 474 if (found_block == NULL) { 475 // None found 476 return NULL; 477 } 478 479 // Exact (or at least good enough) fit. Remove from list. 480 // Don't leave anything on the freelist smaller than CodeCacheMinBlockLength. 481 if (found_length - length < CodeCacheMinBlockLength) { 482 _freelist_length--; 483 length = found_length; 484 if (found_prev == NULL) { 485 assert(_freelist == found_block, "sanity check"); 486 _freelist = _freelist->link(); 487 } else { 488 assert((found_prev->link() == found_block), "sanity check"); 489 // Unmap element 490 found_prev->set_link(found_block->link()); 491 } 492 } else { 493 // Truncate block and return a pointer to the following block 494 // Set used bit and length on new block 495 found_block->set_length(found_length - length); 496 found_block = following_block(found_block); 497 498 size_t beg = segment_for(found_block); 499 mark_segmap_as_used(beg, beg + length); 500 found_block->set_length(length); 501 } 502 503 found_block->set_used(); 504 _freelist_segments -= length; 505 return found_block; 506 } 507 508 //---------------------------------------------------------------------------- 509 // Non-product code 510 511 #ifndef PRODUCT 512 513 void CodeHeap::print() { 514 tty->print_cr("The Heap"); 515 } 516 517 void CodeHeap::verify() { 518 if (VerifyCodeCache) { 519 size_t len = 0; 520 int count = 0; 521 for(FreeBlock* b = _freelist; b != NULL; b = b->link()) { 522 len += b->length(); 523 count++; 524 // Check if we have merged all free blocks 525 assert(merge_right(b) == false, "Missed merging opportunity"); 526 } 527 // Verify that freelist contains the right amount of free space 528 assert(len == _freelist_segments, "wrong freelist"); 529 530 for(HeapBlock* h = first_block(); h != NULL; h = next_block(h)) { 531 if (h->free()) count--; 532 } 533 // Verify that the freelist contains the same number of blocks 534 // than free blocks found on the full list. 535 assert(count == 0, "missing free blocks"); 536 537 // Verify that the number of free blocks is not out of hand. 538 static int free_block_threshold = 10000; 539 if (count > free_block_threshold) { 540 warning("CodeHeap: # of free blocks > %d", free_block_threshold); 541 // Double the warning limit 542 free_block_threshold *= 2; 543 } 544 } 545 } 546 547 #endif