1 /*
   2  * Copyright (c) 1997, 2019, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "memory/heap.hpp"
  27 #include "oops/oop.inline.hpp"
  28 #include "runtime/os.hpp"
  29 #include "services/memTracker.hpp"
  30 #include "utilities/align.hpp"
  31 
  32 size_t CodeHeap::header_size() {
  33   return sizeof(HeapBlock);
  34 }
  35 
  36 
  37 // Implementation of Heap
  38 
  39 CodeHeap::CodeHeap(const char* name, const int code_blob_type)
  40   : _code_blob_type(code_blob_type) {
  41   _name                         = name;
  42   _number_of_committed_segments = 0;
  43   _number_of_reserved_segments  = 0;
  44   _segment_size                 = 0;
  45   _log2_segment_size            = 0;
  46   _next_segment                 = 0;
  47   _freelist                     = NULL;
  48   _last_insert_point            = NULL;
  49   _freelist_segments            = 0;
  50   _freelist_length              = 0;
  51   _max_allocated_capacity       = 0;
  52   _blob_count                   = 0;
  53   _nmethod_count                = 0;
  54   _adapter_count                = 0;
  55   _full_count                   = 0;
  56   _fragmentation_count          = 0;
  57 }
  58 
  59 // Dummy initialization of template array.
  60 char CodeHeap::segmap_template[] = {0};
  61 
  62 // This template array is used to (re)initialize the segmap,
  63 // replacing a 1..254 loop.
  64 void CodeHeap::init_segmap_template() {
  65   assert(free_sentinel == 255, "Segment map logic changed!");
  66   for (int i = 0; i <= free_sentinel; i++) {
  67     segmap_template[i] = i;
  68   }
  69 }
  70 
  71 // The segmap is marked free for that part of the heap
  72 // which has not been allocated yet (beyond _next_segment).
  73 // The range of segments to be marked is given by [beg..end).
  74 // "Allocated" space in this context means there exists a
  75 // HeapBlock or a FreeBlock describing this space.
  76 // This method takes segment map indices as range boundaries
  77 void CodeHeap::mark_segmap_as_free(size_t beg, size_t end) {
  78   assert(             beg <  _number_of_committed_segments, "interval begin out of bounds");
  79   assert(beg < end && end <= _number_of_committed_segments, "interval end   out of bounds");
  80   // Don't do unpredictable things in PRODUCT build
  81   if (beg < end) {
  82     // setup _segmap pointers for faster indexing
  83     address p = (address)_segmap.low() + beg;
  84     address q = (address)_segmap.low() + end;
  85     // initialize interval
  86     memset(p, free_sentinel, q-p);
  87   }
  88 }
  89 
  90 // Don't get confused here.
  91 // All existing blocks, no matter if they are used() or free(),
  92 // have their segmap marked as used. This allows to find the
  93 // block header (HeapBlock or FreeBlock) for any pointer
  94 // within the allocated range (upper limit: _next_segment).
  95 // This method takes segment map indices as range boundaries.
  96 // The range of segments to be marked is given by [beg..end).
  97 void CodeHeap::mark_segmap_as_used(size_t beg, size_t end, bool is_FreeBlock_join) {
  98   assert(             beg <  _number_of_committed_segments, "interval begin out of bounds");
  99   assert(beg < end && end <= _number_of_committed_segments, "interval end   out of bounds");
 100   // Don't do unpredictable things in PRODUCT build
 101   if (beg < end) {
 102     // setup _segmap pointers for faster indexing
 103     address p = (address)_segmap.low() + beg;
 104     address q = (address)_segmap.low() + end;
 105     // initialize interval
 106     // If we are joining two free blocks, the segmap range for each
 107     // block is consistent. To create a consistent segmap range for
 108     // the blocks combined, we have three choices:
 109     //  1 - Do a full init from beg to end. Not very efficient because
 110     //      the segmap range for the left block is potentially initialized
 111     //      over and over again.
 112     //  2 - Carry over the last segmap element value of the left block
 113     //      and initialize the segmap range of the right block starting
 114     //      with that value. Saves initializing the left block's segmap
 115     //      over and over again. Very efficient if FreeBlocks mostly
 116     //      are appended to the right.
 117     //  3 - Take full advantage of the segmap being almost correct with
 118     //      the two blocks combined. Lets assume the left block consists
 119     //      of m segments. The the segmap looks like
 120     //        ... (m-2) (m-1) (m) 0  1  2  3 ...
 121     //      By substituting the '0' by '1', we create a valid, but
 122     //      suboptimal, segmap range covering the two blocks combined.
 123     //      We introduced an extra hop for the find_block_for() iteration.
 124     //
 125     // When this method is called with is_FreeBlock_join == true, the
 126     // segmap index beg must select the first segment of the right block.
 127     // Otherwise, it has to select the first segment of the left block.
 128     // Variant 3 is used for all FreeBlock joins.
 129     if (is_FreeBlock_join && (beg > 0)) {
 130 #ifndef PRODUCT
 131       FreeBlock* pBlock = (FreeBlock*)block_at(beg);
 132       assert(beg + pBlock->length() == end, "Internal error: (%d - %d) != %d", (unsigned int)end, (unsigned int)beg, (unsigned int)(pBlock->length()));
 133       assert(*p == 0, "Begin index does not select a block start segment, *p = %2.2x", *p);
 134 #endif
 135       // If possible, extend the previous hop.
 136       if (*(p-1) < (free_sentinel-1)) {
 137         *p = *(p-1) + 1;
 138       } else {
 139         *p = 1;
 140       }
 141       if (_fragmentation_count++ >= fragmentation_limit) {
 142         defrag_segmap(true);
 143         _fragmentation_count = 0;
 144       }
 145     } else {
 146       size_t n_bulk = free_sentinel-1; // bulk processing uses template indices [1..254].
 147       // Use shortcut for blocks <= 255 segments.
 148       // Special case bulk processing: [0..254].
 149       if ((end - beg) <= n_bulk) {
 150         memcpy(p, &segmap_template[0], end - beg);
 151       } else {
 152         *p++  = 0;  // block header marker
 153         while (p < q) {
 154           if ((p+n_bulk) <= q) {
 155             memcpy(p, &segmap_template[1], n_bulk);
 156             p += n_bulk;
 157           } else {
 158             memcpy(p, &segmap_template[1], q-p);
 159             p = q;
 160           }
 161         }
 162       }
 163     }
 164   }
 165 }
 166 
 167 void CodeHeap::invalidate(size_t beg, size_t end, size_t hdr_size) {
 168 #ifndef PRODUCT
 169   // Fill the given range with some bad value.
 170   // length is expected to be in segment_size units.
 171   // This prevents inadvertent execution of code leftover from previous use.
 172   char* p = low_boundary() + segments_to_size(beg) + hdr_size;
 173   memset(p, badCodeHeapNewVal, segments_to_size(end-beg)-hdr_size);
 174 #endif
 175 }
 176 
 177 void CodeHeap::clear(size_t beg, size_t end) {
 178   mark_segmap_as_free(beg, end);
 179   invalidate(beg, end, 0);
 180 }
 181 
 182 void CodeHeap::clear() {
 183   _next_segment = 0;
 184   clear(_next_segment, _number_of_committed_segments);
 185 }
 186 
 187 
 188 static size_t align_to_page_size(size_t size) {
 189   const size_t alignment = (size_t)os::vm_page_size();
 190   assert(is_power_of_2(alignment), "no kidding ???");
 191   return (size + alignment - 1) & ~(alignment - 1);
 192 }
 193 
 194 
 195 void CodeHeap::on_code_mapping(char* base, size_t size) {
 196 #ifdef LINUX
 197   extern void linux_wrap_code(char* base, size_t size);
 198   linux_wrap_code(base, size);
 199 #endif
 200 }
 201 
 202 
 203 bool CodeHeap::reserve(ReservedSpace rs, size_t committed_size, size_t segment_size) {
 204   assert(rs.size() >= committed_size, "reserved < committed");
 205   assert(segment_size >= sizeof(FreeBlock), "segment size is too small");
 206   assert(is_power_of_2(segment_size), "segment_size must be a power of 2");
 207 
 208   _segment_size      = segment_size;
 209   _log2_segment_size = exact_log2(segment_size);
 210 
 211   // Reserve and initialize space for _memory.
 212   size_t page_size = os::vm_page_size();
 213   if (os::can_execute_large_page_memory()) {
 214     const size_t min_pages = 8;
 215     page_size = MIN2(os::page_size_for_region_aligned(committed_size, min_pages),
 216                      os::page_size_for_region_aligned(rs.size(), min_pages));
 217   }
 218 
 219   const size_t granularity = os::vm_allocation_granularity();
 220   const size_t c_size = align_up(committed_size, page_size);
 221 
 222   os::trace_page_sizes(_name, committed_size, rs.size(), page_size,
 223                        rs.base(), rs.size());
 224   if (!_memory.initialize(rs, c_size)) {
 225     return false;
 226   }
 227 
 228   on_code_mapping(_memory.low(), _memory.committed_size());
 229   _number_of_committed_segments = size_to_segments(_memory.committed_size());
 230   _number_of_reserved_segments  = size_to_segments(_memory.reserved_size());
 231   assert(_number_of_reserved_segments >= _number_of_committed_segments, "just checking");
 232   const size_t reserved_segments_alignment = MAX2((size_t)os::vm_page_size(), granularity);
 233   const size_t reserved_segments_size = align_up(_number_of_reserved_segments, reserved_segments_alignment);
 234   const size_t committed_segments_size = align_to_page_size(_number_of_committed_segments);
 235 
 236   // reserve space for _segmap
 237   if (!_segmap.initialize(reserved_segments_size, committed_segments_size)) {
 238     return false;
 239   }
 240 
 241   MemTracker::record_virtual_memory_type((address)_segmap.low_boundary(), mtCode);
 242 
 243   assert(_segmap.committed_size() >= (size_t) _number_of_committed_segments, "could not commit  enough space for segment map");
 244   assert(_segmap.reserved_size()  >= (size_t) _number_of_reserved_segments , "could not reserve enough space for segment map");
 245   assert(_segmap.reserved_size()  >= _segmap.committed_size()     , "just checking");
 246 
 247   // initialize remaining instance variables, heap memory and segmap
 248   clear();
 249   init_segmap_template();
 250   return true;
 251 }
 252 
 253 
 254 bool CodeHeap::expand_by(size_t size) {
 255   // expand _memory space
 256   size_t dm = align_to_page_size(_memory.committed_size() + size) - _memory.committed_size();
 257   if (dm > 0) {
 258     // Use at least the available uncommitted space if 'size' is larger
 259     if (_memory.uncommitted_size() != 0 && dm > _memory.uncommitted_size()) {
 260       dm = _memory.uncommitted_size();
 261     }
 262     char* base = _memory.low() + _memory.committed_size();
 263     if (!_memory.expand_by(dm)) return false;
 264     on_code_mapping(base, dm);
 265     size_t i = _number_of_committed_segments;
 266     _number_of_committed_segments = size_to_segments(_memory.committed_size());
 267     assert(_number_of_reserved_segments == size_to_segments(_memory.reserved_size()), "number of reserved segments should not change");
 268     assert(_number_of_reserved_segments >= _number_of_committed_segments, "just checking");
 269     // expand _segmap space
 270     size_t ds = align_to_page_size(_number_of_committed_segments) - _segmap.committed_size();
 271     if ((ds > 0) && !_segmap.expand_by(ds)) {
 272       return false;
 273     }
 274     assert(_segmap.committed_size() >= (size_t) _number_of_committed_segments, "just checking");
 275     // initialize additional space (heap memory and segmap)
 276     clear(i, _number_of_committed_segments);
 277   }
 278   return true;
 279 }
 280 
 281 
 282 void* CodeHeap::allocate(size_t instance_size) {
 283   size_t number_of_segments = size_to_segments(instance_size + header_size());
 284   assert(segments_to_size(number_of_segments) >= sizeof(FreeBlock), "not enough room for FreeList");
 285 
 286   // First check if we can satisfy request from freelist
 287   NOT_PRODUCT(verify());
 288   HeapBlock* block = search_freelist(number_of_segments);
 289   NOT_PRODUCT(verify());
 290 
 291   if (block != NULL) {
 292     assert(!block->free(), "must not be marked free");
 293     guarantee((char*) block >= _memory.low_boundary() && (char*) block < _memory.high(),
 294               "The newly allocated block " INTPTR_FORMAT " is not within the heap "
 295               "starting with "  INTPTR_FORMAT " and ending with "  INTPTR_FORMAT,
 296               p2i(block), p2i(_memory.low_boundary()), p2i(_memory.high()));
 297     _max_allocated_capacity = MAX2(_max_allocated_capacity, allocated_capacity());
 298     _blob_count++;
 299     return block->allocated_space();
 300   }
 301 
 302   // Ensure minimum size for allocation to the heap.
 303   number_of_segments = MAX2((int)CodeCacheMinBlockLength, (int)number_of_segments);
 304 
 305   if (_next_segment + number_of_segments <= _number_of_committed_segments) {
 306     mark_segmap_as_used(_next_segment, _next_segment + number_of_segments, false);
 307     block = block_at(_next_segment);
 308     block->initialize(number_of_segments);
 309     _next_segment += number_of_segments;
 310     guarantee((char*) block >= _memory.low_boundary() && (char*) block < _memory.high(),
 311               "The newly allocated block " INTPTR_FORMAT " is not within the heap "
 312               "starting with "  INTPTR_FORMAT " and ending with " INTPTR_FORMAT,
 313               p2i(block), p2i(_memory.low_boundary()), p2i(_memory.high()));
 314     _max_allocated_capacity = MAX2(_max_allocated_capacity, allocated_capacity());
 315     _blob_count++;
 316     return block->allocated_space();
 317   } else {
 318     return NULL;
 319   }
 320 }
 321 
 322 // Split the given block into two at the given segment.
 323 // This is helpful when a block was allocated too large
 324 // to trim off the unused space at the end (interpreter).
 325 // It also helps with splitting a large free block during allocation.
 326 // Usage state (used or free) must be set by caller since
 327 // we don't know if the resulting blocks will be used or free.
 328 // split_at is the segment number (relative to segment_for(b))
 329 //          where the split happens. The segment with relative
 330 //          number split_at is the first segment of the split-off block.
 331 HeapBlock* CodeHeap::split_block(HeapBlock *b, size_t split_at) {
 332   if (b == NULL) return NULL;
 333   // After the split, both blocks must have a size of at least CodeCacheMinBlockLength
 334   assert((split_at >= CodeCacheMinBlockLength) && (split_at + CodeCacheMinBlockLength <= b->length()),
 335          "split position(%d) out of range [0..%d]", (int)split_at, (int)b->length());
 336   size_t split_segment = segment_for(b) + split_at;
 337   size_t b_size        = b->length();
 338   size_t newb_size     = b_size - split_at;
 339 
 340   HeapBlock* newb = block_at(split_segment);
 341   newb->set_length(newb_size);
 342   mark_segmap_as_used(segment_for(newb), segment_for(newb) + newb_size, false);
 343   b->set_length(split_at);
 344   return newb;
 345 }
 346 
 347 void CodeHeap::deallocate_tail(void* p, size_t used_size) {
 348   assert(p == find_start(p), "illegal deallocation");
 349   // Find start of HeapBlock
 350   HeapBlock* b = (((HeapBlock *)p) - 1);
 351   assert(b->allocated_space() == p, "sanity check");
 352 
 353   size_t actual_number_of_segments = b->length();
 354   size_t used_number_of_segments   = size_to_segments(used_size + header_size());
 355   size_t unused_number_of_segments = actual_number_of_segments - used_number_of_segments;
 356   guarantee(used_number_of_segments <= actual_number_of_segments, "Must be!");
 357 
 358   HeapBlock* f = split_block(b, used_number_of_segments);
 359   add_to_freelist(f);
 360   NOT_PRODUCT(verify());
 361 }
 362 
 363 void CodeHeap::deallocate(void* p) {
 364   assert(p == find_start(p), "illegal deallocation");
 365   // Find start of HeapBlock
 366   HeapBlock* b = (((HeapBlock *)p) - 1);
 367   assert(b->allocated_space() == p, "sanity check");
 368   guarantee((char*) b >= _memory.low_boundary() && (char*) b < _memory.high(),
 369             "The block to be deallocated " INTPTR_FORMAT " is not within the heap "
 370             "starting with "  INTPTR_FORMAT " and ending with " INTPTR_FORMAT,
 371             p2i(b), p2i(_memory.low_boundary()), p2i(_memory.high()));
 372   add_to_freelist(b);
 373   NOT_PRODUCT(verify());
 374 }
 375 
 376 /**
 377  * The segment map is used to quickly find the the start (header) of a
 378  * code block (e.g. nmethod) when only a pointer to a location inside the
 379  * code block is known. This works as follows:
 380  *  - The storage reserved for the code heap is divided into 'segments'.
 381  *  - The size of a segment is determined by -XX:CodeCacheSegmentSize=<#bytes>.
 382  *  - The size must be a power of two to allow the use of shift operations
 383  *    to quickly convert between segment index and segment address.
 384  *  - Segment start addresses should be aligned to be multiples of CodeCacheSegmentSize.
 385  *  - It seems beneficial for CodeCacheSegmentSize to be equal to os::page_size().
 386  *  - Allocation in the code cache can only happen at segment start addresses.
 387  *  - Allocation in the code cache is in units of CodeCacheSegmentSize.
 388  *  - A pointer in the code cache can be mapped to a segment by calling
 389  *    segment_for(addr).
 390  *  - The segment map is a byte array where array element [i] is related
 391  *    to the i-th segment in the code heap.
 392  *  - Each time memory is allocated/deallocated from the code cache,
 393  *    the segment map is updated accordingly.
 394  *    Note: deallocation does not cause the memory to become "free", as
 395  *          indicated by the segment map state "free_sentinel". Deallocation
 396  *          just changes the block state from "used" to "free".
 397  *  - Elements of the segment map (byte) array are interpreted
 398  *    as unsigned integer.
 399  *  - Element values normally identify an offset backwards (in segment
 400  *    size units) from the associated segment towards the start of
 401  *    the block.
 402  *  - Some values have a special meaning:
 403  *       0 - This segment is the start of a block (HeapBlock or FreeBlock).
 404  *     255 - The free_sentinel value. This is a free segment, i.e. it is
 405  *           not yet allocated and thus does not belong to any block.
 406  *  - The value of the current element has to be subtracted from the
 407  *    current index to get closer to the start.
 408  *  - If the value of the then current element is zero, the block start
 409  *    segment is found and iteration stops. Otherwise, start over with the
 410  *    previous step.
 411  *
 412  *    The following example illustrates a possible state of code cache
 413  *    and the segment map: (seg -> segment, nm ->nmethod)
 414  *
 415  *          code cache          segmap
 416  *         -----------        ---------
 417  * seg 1   | nm 1    |   ->   | 0     |
 418  * seg 2   | nm 1    |   ->   | 1     |
 419  * ...     | nm 1    |   ->   | ..    |
 420  * seg m-1 | nm 1    |   ->   | m-1   |
 421  * seg m   | nm 2    |   ->   | 0     |
 422  * seg m+1 | nm 2    |   ->   | 1     |
 423  * ...     | nm 2    |   ->   | 2     |
 424  * ...     | nm 2    |   ->   | ..    |
 425  * ...     | nm 2    |   ->   | 0xFE  | (free_sentinel-1)
 426  * ...     | nm 2    |   ->   | 1     |
 427  * seg m+n | nm 2    |   ->   | 2     |
 428  * ...     | nm 2    |   ->   |       |
 429  *
 430  * How to read:
 431  * A value of '0' in the segmap indicates that this segment contains the
 432  * beginning of a CodeHeap block. Let's walk through a simple example:
 433  *
 434  * We want to find the start of the block that contains nm 1, and we are
 435  * given a pointer that points into segment m-2. We then read the value
 436  * of segmap[m-2]. The value is an offset that points to the segment
 437  * which contains the start of the block.
 438  *
 439  * Another example: We want to locate the start of nm 2, and we happen to
 440  * get a pointer that points into seg m+n. We first read seg[n+m], which
 441  * returns '2'. So we have to update our segment map index (ix -= segmap[n+m])
 442  * and start over.
 443  */
 444 
 445 // Find block which contains the passed pointer,
 446 // regardless of the block being used or free.
 447 // NULL is returned if anything invalid is detected.
 448 void* CodeHeap::find_block_for(void* p) const {
 449   // Check the pointer to be in committed range.
 450   if (!contains(p)) {
 451     return NULL;
 452   }
 453 
 454   address seg_map = (address)_segmap.low();
 455   size_t  seg_idx = segment_for(p);
 456 
 457   // This may happen in special cases. Just ignore.
 458   // Example: PPC ICache stub generation.
 459   if (is_segment_unused(seg_map[seg_idx])) {
 460     return NULL;
 461   }
 462 
 463   // Iterate the segment map chain to find the start of the block.
 464   while (seg_map[seg_idx] > 0) {
 465     // Don't check each segment index to refer to a used segment.
 466     // This method is called extremely often. Therefore, any checking
 467     // has a significant impact on performance. Rely on CodeHeap::verify()
 468     // to do the job on request.
 469     seg_idx -= (int)seg_map[seg_idx];
 470   }
 471 
 472   return address_for(seg_idx);
 473 }
 474 
 475 // Find block which contains the passed pointer.
 476 // The block must be used, i.e. must not be a FreeBlock.
 477 // Return a pointer that points past the block header.
 478 void* CodeHeap::find_start(void* p) const {
 479   HeapBlock* h = (HeapBlock*)find_block_for(p);
 480   return ((h == NULL) || h->free()) ? NULL : h->allocated_space();
 481 }
 482 
 483 // Find block which contains the passed pointer.
 484 // Same as find_start(p), but with additional safety net.
 485 CodeBlob* CodeHeap::find_blob_unsafe(void* start) const {
 486   CodeBlob* result = (CodeBlob*)CodeHeap::find_start(start);
 487   return (result != NULL && result->blob_contains((address)start)) ? result : NULL;
 488 }
 489 
 490 size_t CodeHeap::alignment_unit() const {
 491   // this will be a power of two
 492   return _segment_size;
 493 }
 494 
 495 
 496 size_t CodeHeap::alignment_offset() const {
 497   // The lowest address in any allocated block will be
 498   // equal to alignment_offset (mod alignment_unit).
 499   return sizeof(HeapBlock) & (_segment_size - 1);
 500 }
 501 
 502 // Returns the current block if available and used.
 503 // If not, it returns the subsequent block (if available), NULL otherwise.
 504 // Free blocks are merged, therefore there is at most one free block
 505 // between two used ones. As a result, the subsequent block (if available) is
 506 // guaranteed to be used.
 507 // The returned pointer points past the block header.
 508 void* CodeHeap::next_used(HeapBlock* b) const {
 509   if (b != NULL && b->free()) b = next_block(b);
 510   assert(b == NULL || !b->free(), "must be in use or at end of heap");
 511   return (b == NULL) ? NULL : b->allocated_space();
 512 }
 513 
 514 // Returns the first used HeapBlock
 515 // The returned pointer points to the block header.
 516 HeapBlock* CodeHeap::first_block() const {
 517   if (_next_segment > 0)
 518     return block_at(0);
 519   return NULL;
 520 }
 521 
 522 // The returned pointer points to the block header.
 523 HeapBlock* CodeHeap::block_start(void* q) const {
 524   HeapBlock* b = (HeapBlock*)find_start(q);
 525   if (b == NULL) return NULL;
 526   return b - 1;
 527 }
 528 
 529 // Returns the next Heap block.
 530 // The returned pointer points to the block header.
 531 HeapBlock* CodeHeap::next_block(HeapBlock *b) const {
 532   if (b == NULL) return NULL;
 533   size_t i = segment_for(b) + b->length();
 534   if (i < _next_segment)
 535     return block_at(i);
 536   return NULL;
 537 }
 538 
 539 
 540 // Returns current capacity
 541 size_t CodeHeap::capacity() const {
 542   return _memory.committed_size();
 543 }
 544 
 545 size_t CodeHeap::max_capacity() const {
 546   return _memory.reserved_size();
 547 }
 548 
 549 int CodeHeap::allocated_segments() const {
 550   return (int)_next_segment;
 551 }
 552 
 553 size_t CodeHeap::allocated_capacity() const {
 554   // size of used heap - size on freelist
 555   return segments_to_size(_next_segment - _freelist_segments);
 556 }
 557 
 558 // Returns size of the unallocated heap block
 559 size_t CodeHeap::heap_unallocated_capacity() const {
 560   // Total number of segments - number currently used
 561   return segments_to_size(_number_of_reserved_segments - _next_segment);
 562 }
 563 
 564 // Free list management
 565 
 566 FreeBlock* CodeHeap::following_block(FreeBlock *b) {
 567   return (FreeBlock*)(((address)b) + _segment_size * b->length());
 568 }
 569 
 570 // Inserts block b after a
 571 void CodeHeap::insert_after(FreeBlock* a, FreeBlock* b) {
 572   assert(a != NULL && b != NULL, "must be real pointers");
 573 
 574   // Link b into the list after a
 575   b->set_link(a->link());
 576   a->set_link(b);
 577 
 578   // See if we can merge blocks
 579   merge_right(b); // Try to make b bigger
 580   merge_right(a); // Try to make a include b
 581 }
 582 
 583 // Try to merge this block with the following block
 584 bool CodeHeap::merge_right(FreeBlock* a) {
 585   assert(a->free(), "must be a free block");
 586   if (following_block(a) == a->link()) {
 587     assert(a->link() != NULL && a->link()->free(), "must be free too");
 588 
 589     // Remember linked (following) block. invalidate should only zap header of this block.
 590     size_t follower = segment_for(a->link());
 591     // Merge block a to include the following block.
 592     a->set_length(a->length() + a->link()->length());
 593     a->set_link(a->link()->link());
 594 
 595     // Update the segment map and invalidate block contents.
 596     mark_segmap_as_used(follower, segment_for(a) + a->length(), true);
 597     // Block contents has already been invalidated by add_to_freelist.
 598     // What's left is the header of the following block which now is
 599     // in the middle of the merged block. Just zap one segment.
 600     invalidate(follower, follower + 1, 0);
 601 
 602     _freelist_length--;
 603     return true;
 604   }
 605   return false;
 606 }
 607 
 608 
 609 void CodeHeap::add_to_freelist(HeapBlock* a) {
 610   FreeBlock* b = (FreeBlock*)a;
 611   size_t  bseg = segment_for(b);
 612   _freelist_length++;
 613 
 614   _blob_count--;
 615   assert(_blob_count >= 0, "sanity");
 616 
 617   assert(b != _freelist, "cannot be removed twice");
 618 
 619   // Mark as free and update free space count
 620   _freelist_segments += b->length();
 621   b->set_free();
 622   invalidate(bseg, bseg + b->length(), sizeof(FreeBlock));
 623 
 624   // First element in list?
 625   if (_freelist == NULL) {
 626     b->set_link(NULL);
 627     _freelist = b;
 628     return;
 629   }
 630 
 631   // Since the freelist is ordered (smaller addresses -> larger addresses) and the
 632   // element we want to insert into the freelist has a smaller address than the first
 633   // element, we can simply add 'b' as the first element and we are done.
 634   if (b < _freelist) {
 635     // Insert first in list
 636     b->set_link(_freelist);
 637     _freelist = b;
 638     merge_right(_freelist);
 639     return;
 640   }
 641 
 642   // Scan for right place to put into list.
 643   // List is sorted by increasing addresses.
 644   FreeBlock* prev = _freelist;
 645   FreeBlock* cur  = _freelist->link();
 646   if ((_freelist_length > freelist_limit) && (_last_insert_point != NULL)) {
 647     _last_insert_point = (FreeBlock*)find_block_for(_last_insert_point);
 648     if ((_last_insert_point != NULL) && _last_insert_point->free() && (_last_insert_point < b)) {
 649       prev = _last_insert_point;
 650       cur  = prev->link();
 651     }
 652   }
 653   while(cur != NULL && cur < b) {
 654     assert(prev < cur, "Freelist must be ordered");
 655     prev = cur;
 656     cur  = cur->link();
 657   }
 658   assert((prev < b) && (cur == NULL || b < cur), "free-list must be ordered");
 659   insert_after(prev, b);
 660   _last_insert_point = prev;
 661 }
 662 
 663 /**
 664  * Search freelist for an entry on the list with the best fit.
 665  * @return NULL, if no one was found
 666  */
 667 HeapBlock* CodeHeap::search_freelist(size_t length) {
 668   FreeBlock* found_block  = NULL;
 669   FreeBlock* found_prev   = NULL;
 670   size_t     found_length = _next_segment; // max it out to begin with
 671 
 672   HeapBlock* res  = NULL;
 673   FreeBlock* prev = NULL;
 674   FreeBlock* cur  = _freelist;
 675 
 676   length = length < CodeCacheMinBlockLength ? CodeCacheMinBlockLength : length;
 677 
 678   // Search for best-fitting block
 679   while(cur != NULL) {
 680     size_t cur_length = cur->length();
 681     if (cur_length == length) {
 682       // We have a perfect fit
 683       found_block  = cur;
 684       found_prev   = prev;
 685       found_length = cur_length;
 686       break;
 687     } else if ((cur_length > length) && (cur_length < found_length)) {
 688       // This is a new, closer fit. Remember block, its previous element, and its length
 689       found_block  = cur;
 690       found_prev   = prev;
 691       found_length = cur_length;
 692     }
 693     // Next element in list
 694     prev = cur;
 695     cur  = cur->link();
 696   }
 697 
 698   if (found_block == NULL) {
 699     // None found
 700     return NULL;
 701   }
 702 
 703   // Exact (or at least good enough) fit. Remove from list.
 704   // Don't leave anything on the freelist smaller than CodeCacheMinBlockLength.
 705   if (found_length - length < CodeCacheMinBlockLength) {
 706     _freelist_length--;
 707     length = found_length;
 708     if (found_prev == NULL) {
 709       assert(_freelist == found_block, "sanity check");
 710       _freelist = _freelist->link();
 711     } else {
 712       assert((found_prev->link() == found_block), "sanity check");
 713       // Unmap element
 714       found_prev->set_link(found_block->link());
 715     }
 716     res = (HeapBlock*)found_block;
 717     // sizeof(HeapBlock) < sizeof(FreeBlock).
 718     // Invalidate the additional space that FreeBlock occupies.
 719     // The rest of the block should already be invalidated.
 720     // This is necessary due to a dubious assert in nmethod.cpp(PcDescCache::reset_to()).
 721     // Can't use invalidate() here because it works on segment_size units (too coarse).
 722     DEBUG_ONLY(memset((void*)res->allocated_space(), badCodeHeapNewVal, sizeof(FreeBlock) - sizeof(HeapBlock)));
 723   } else {
 724     // Truncate the free block and return the truncated part
 725     // as new HeapBlock. The remaining free block does not
 726     // need to be updated, except for it's length. Truncating
 727     // the segment map does not invalidate the leading part.
 728     res = split_block(found_block, found_length - length);
 729   }
 730 
 731   res->set_used();
 732   _freelist_segments -= length;
 733   return res;
 734 }
 735 
 736 int CodeHeap::defrag_segmap(bool do_defrag) {
 737   int extra_hops_used = 0;
 738   int extra_hops_free = 0;
 739   int blocks_used     = 0;
 740   int blocks_free     = 0;
 741   for(HeapBlock* h = first_block(); h != NULL; h = next_block(h)) {
 742     size_t beg = segment_for(h);
 743     size_t end = segment_for(h) + h->length();
 744     int extra_hops = segmap_hops(beg, end);
 745     if (h->free()) {
 746       extra_hops_free += extra_hops;
 747       blocks_free++;
 748     } else {
 749       extra_hops_used += extra_hops;
 750       blocks_used++;
 751     }
 752     if (do_defrag && (extra_hops > 0)) {
 753       mark_segmap_as_used(beg, end, false);
 754     }
 755   }
 756   return extra_hops_used + extra_hops_free;
 757 }
 758 
 759 // Count the hops required to get from the last segment of a
 760 // heap block to the block header segment. For the optimal case,
 761 //   #hops = ((#segments-1)+(free_sentinel-2))/(free_sentinel-1)
 762 // The range of segments to be checked is given by [beg..end).
 763 // Return the number of extra hops required. There may be extra hops
 764 // due to the is_FreeBlock_join optimization in mark_segmap_as_used().
 765 int CodeHeap::segmap_hops(size_t beg, size_t end) {
 766   if (beg < end) {
 767     // setup _segmap pointers for faster indexing
 768     address p = (address)_segmap.low() + beg;
 769     int hops_expected = (int)(((end-beg-1)+(free_sentinel-2))/(free_sentinel-1));
 770     int nhops = 0;
 771     size_t ix = end-beg-1;
 772     while (p[ix] > 0) {
 773       ix -= p[ix];
 774       nhops++;
 775     }
 776     return (nhops > hops_expected) ? nhops - hops_expected : 0;
 777   }
 778   return 0;
 779 }
 780 
 781 //----------------------------------------------------------------------------
 782 // Non-product code
 783 
 784 #ifndef PRODUCT
 785 
 786 void CodeHeap::print() {
 787   tty->print_cr("The Heap");
 788 }
 789 
 790 void CodeHeap::verify() {
 791   if (VerifyCodeCache) {
 792     size_t len = 0;
 793     int count = 0;
 794     for(FreeBlock* b = _freelist; b != NULL; b = b->link()) {
 795       len += b->length();
 796       count++;
 797       // Check if we have merged all free blocks
 798       assert(merge_right(b) == false, "Missed merging opportunity");
 799     }
 800     // Verify that freelist contains the right amount of free space
 801     assert(len == _freelist_segments, "wrong freelist");
 802 
 803     for(HeapBlock* h = first_block(); h != NULL; h = next_block(h)) {
 804       if (h->free()) count--;
 805     }
 806     // Verify that the freelist contains the same number of blocks
 807     // than free blocks found on the full list.
 808     assert(count == 0, "missing free blocks");
 809 
 810     //---<  all free block memory must have been invalidated  >---
 811     for(FreeBlock* b = _freelist; b != NULL; b = b->link()) {
 812       for (char* c = (char*)b + sizeof(FreeBlock); c < (char*)b + segments_to_size(b->length()); c++) {
 813         assert(*c == (char)badCodeHeapNewVal, "FreeBlock@" PTR_FORMAT "(" PTR_FORMAT ") not invalidated @byte %d", p2i(b), b->length(), (int)(c - (char*)b));
 814       }
 815     }
 816 
 817     address seg_map = (address)_segmap.low();
 818     size_t  nseg       = 0;
 819     int     extra_hops = 0;
 820     count = 0;
 821     for(HeapBlock* b = first_block(); b != NULL; b = next_block(b)) {
 822       size_t seg1 = segment_for(b);
 823       size_t segn = seg1 + b->length();
 824       extra_hops += segmap_hops(seg1, segn);
 825       count++;
 826       for (size_t i = seg1; i < segn; i++) {
 827         nseg++;
 828         //---<  Verify segment map marking  >---
 829         // All allocated segments, no matter if in a free or used block,
 830         // must be marked "in use".
 831         assert(!is_segment_unused(seg_map[i]), "CodeHeap: unused segment. seg_map[%d]([%d..%d]) = %d, %s block",    (int)i, (int)seg1, (int)segn, seg_map[i], b->free()? "free":"used");
 832         assert((unsigned char)seg_map[i] < free_sentinel, "CodeHeap: seg_map[%d]([%d..%d]) = %d (out of range)",    (int)i, (int)seg1, (int)segn, seg_map[i]);
 833       }
 834     }
 835     assert(nseg == _next_segment, "CodeHeap: segment count mismatch. found %d, expected %d.", (int)nseg, (int)_next_segment);
 836     assert((count == 0) || (extra_hops < (16 + 2*count)), "CodeHeap: many extra hops due to optimization. blocks: %d, extra hops: %d.", count, extra_hops);
 837 
 838     // Verify that the number of free blocks is not out of hand.
 839     static int free_block_threshold = 10000;
 840     if (count > free_block_threshold) {
 841       warning("CodeHeap: # of free blocks > %d", free_block_threshold);
 842       // Double the warning limit
 843       free_block_threshold *= 2;
 844     }
 845   }
 846 }
 847 
 848 #endif