src/share/vm/memory/heap.cpp
Index
Unified diffs
Context diffs
Sdiffs
Patch
New
Old
Previous File
Next File
*** old/src/share/vm/memory/heap.cpp Mon Feb 10 08:34:31 2014
--- new/src/share/vm/memory/heap.cpp Mon Feb 10 08:34:31 2014
*** 41,61 ****
--- 41,62 ----
_segment_size = 0;
_log2_segment_size = 0;
_next_segment = 0;
_freelist = NULL;
_freelist_segments = 0;
+ _freelist_length = 0;
}
void CodeHeap::mark_segmap_as_free(size_t beg, size_t end) {
assert(0 <= beg && beg < _number_of_committed_segments, "interval begin out of bounds");
assert(beg < end && end <= _number_of_committed_segments, "interval end out of bounds");
// setup _segmap pointers for faster indexing
address p = (address)_segmap.low() + beg;
address q = (address)_segmap.low() + end;
// initialize interval
! while (p < q) *p++ = 0xFF;
! while (p < q) *p++ = free_sentinel;
}
void CodeHeap::mark_segmap_as_used(size_t beg, size_t end) {
assert(0 <= beg && beg < _number_of_committed_segments, "interval begin out of bounds");
*** 65,75 ****
--- 66,76 ----
address q = (address)_segmap.low() + end;
// initialize interval
int i = 0;
while (p < q) {
*p++ = i++;
! if (i == 0xFF) i = 1;
! if (i == free_sentinel) i = 1;
}
}
static size_t align_to_page_size(size_t size) {
*** 137,151 ****
--- 138,147 ----
clear();
return true;
}
void CodeHeap::release() {
Unimplemented();
}
bool CodeHeap::expand_by(size_t size) {
// expand _memory space
size_t dm = align_to_page_size(_memory.committed_size() + size) - _memory.committed_size();
if (dm > 0) {
char* base = _memory.low() + _memory.committed_size();
*** 155,207 ****
--- 151,194 ----
_number_of_committed_segments = size_to_segments(_memory.committed_size());
assert(_number_of_reserved_segments == size_to_segments(_memory.reserved_size()), "number of reserved segments should not change");
assert(_number_of_reserved_segments >= _number_of_committed_segments, "just checking");
// expand _segmap space
size_t ds = align_to_page_size(_number_of_committed_segments) - _segmap.committed_size();
! if ((ds > 0) && !_segmap.expand_by(ds)) {
- if (!_segmap.expand_by(ds)) return false;
}
assert(_segmap.committed_size() >= (size_t) _number_of_committed_segments, "just checking");
// initialize additional segmap entries
mark_segmap_as_free(i, _number_of_committed_segments);
}
return true;
}
void CodeHeap::shrink_by(size_t size) {
Unimplemented();
}
void CodeHeap::clear() {
_next_segment = 0;
mark_segmap_as_free(0, _number_of_committed_segments);
}
void* CodeHeap::allocate(size_t instance_size, bool is_critical) {
! size_t number_of_segments = size_to_segments(instance_size + sizeof(HeapBlock));
! size_t number_of_segments = size_to_segments(instance_size + header_size());
assert(segments_to_size(number_of_segments) >= sizeof(FreeBlock), "not enough room for FreeList");
! // First check if we can satify request from freelist
! debug_only(verify());
! // First check if we can satisfy request from freelist
! NOT_PRODUCT(verify());
HeapBlock* block = search_freelist(number_of_segments, is_critical);
! debug_only(if (VerifyCodeCacheOften) verify());
! NOT_PRODUCT(verify());
+
if (block != NULL) {
assert(block->length() >= number_of_segments && block->length() < number_of_segments + CodeCacheMinBlockLength, "sanity check");
assert(!block->free(), "must be marked free");
#ifdef ASSERT
memset((void *)block->allocated_space(), badCodeHeapNewVal, instance_size);
#endif
+ DEBUG_ONLY(memset((void*)block->allocated_space(), badCodeHeapNewVal, instance_size));
return block->allocated_space();
}
// Ensure minimum size for allocation to the heap.
if (number_of_segments < CodeCacheMinBlockLength) {
number_of_segments = CodeCacheMinBlockLength;
}
+ number_of_segments = MAX2((int)CodeCacheMinBlockLength, (int)number_of_segments);
if (!is_critical) {
// Make sure the allocation fits in the unallocated heap without using
// the CodeCacheMimimumFreeSpace that is reserved for critical allocations.
if (segments_to_size(number_of_segments) > (heap_unallocated_capacity() - CodeCacheMinimumFreeSpace)) {
*** 213,225 ****
--- 200,210 ----
if (_next_segment + number_of_segments <= _number_of_committed_segments) {
mark_segmap_as_used(_next_segment, _next_segment + number_of_segments);
HeapBlock* b = block_at(_next_segment);
b->initialize(number_of_segments);
_next_segment += number_of_segments;
#ifdef ASSERT
memset((void *)b->allocated_space(), badCodeHeapNewVal, instance_size);
#endif
+ DEBUG_ONLY(memset((void *)b->allocated_space(), badCodeHeapNewVal, instance_size));
return b->allocated_space();
} else {
return NULL;
}
}
*** 228,259 ****
--- 213,272 ----
void CodeHeap::deallocate(void* p) {
assert(p == find_start(p), "illegal deallocation");
// Find start of HeapBlock
HeapBlock* b = (((HeapBlock *)p) - 1);
assert(b->allocated_space() == p, "sanity check");
#ifdef ASSERT
memset((void *)b->allocated_space(),
badCodeHeapFreeVal,
segments_to_size(b->length()) - sizeof(HeapBlock));
#endif
+ DEBUG_ONLY(memset((void *)b->allocated_space(), badCodeHeapFreeVal,
+ segments_to_size(b->length()) - sizeof(HeapBlock)));
add_to_freelist(b);
debug_only(if (VerifyCodeCacheOften) verify());
+ NOT_PRODUCT(verify());
}
+ /**
+ * Uses segment map to find the the start (header) of a nmethod. This works as follows:
+ * The memory of the code cache is divided into 'segments'. The size of a segment is
+ * determined by -XX:CodeCacheSegmentSize=XX. Allocation in the code cache can only
+ * happen at segment boundaries. A pointer in the code cache can be mapped to a segment
+ * by calling segment_for(addr). Each time memory is requested from the code cache,
+ * the segmap is updated accordingly. See the following example, which illustrates the
+ * state of code cache and the segment map: (seg -> segment, nm ->nmethod)
+ *
+ * code cache segmap
+ * ----------- ---------
+ * seg 1 | nm 1 | -> | 0 |
+ * seg 2 | nm 1 | -> | 1 |
+ * ... | nm 1 | -> | .. |
+ * seg m | nm 2 | -> | 0 |
+ * seg m+1 | nm 2 | -> | 1 |
+ * ... | nm 2 | -> | 2 |
+ * ... | nm 2 | -> | .. |
+ * ... | nm 2 | -> | 0xFE |
+ * seg m+n | nm 2 | -> | 1 |
+ * ... | nm 2 | -> | |
+ *
+ * A value of '0' in the segmap indicates that this segment contains the beginning of
+ * an nmethod. Let's walk through a simple example: If we want to find the start of
+ * an nmethod that falls into seg 2, we read the value of the segmap[2]. The value
+ * is an offset that points to the segment that contains the start of the nmethod.
+ * Another example: If we want to get the start of nm 2, and we happen to get a pointer
+ * that points to seg m+n, we first read seg[n+m], which returns '1'. So we have to
+ * do one more read of the segmap[m+n-1] to finally get the segment header.
+ */
void* CodeHeap::find_start(void* p) const {
if (!contains(p)) {
return NULL;
}
! size_t seg_idx = segment_for(p);
! address b = (address)_segmap.low();
! if (b[i] == 0xFF) {
! address seg_map = (address)_segmap.low();
! if (is_segment_unused(seg_map[seg_idx])) {
return NULL;
}
! while (b[i] > 0) i -= (int)b[i];
! HeapBlock* h = block_at(i);
! while (seg_map[seg_idx] > 0) {
! seg_idx -= (int)seg_map[seg_idx];
+ }
+
+ HeapBlock* h = block_at(seg_idx);
if (h->free()) {
return NULL;
}
return h->allocated_space();
}
*** 270,280 ****
--- 283,293 ----
// equal to alignment_offset (mod alignment_unit).
return sizeof(HeapBlock) & (_segment_size - 1);
}
// Finds the next free heapblock. If the current one is free, that it returned
! void* CodeHeap::next_free(HeapBlock *b) const {
! void* CodeHeap::next_free(HeapBlock* b) const {
// Since free blocks are merged, there is max. on free block
// between two used ones
if (b != NULL && b->free()) b = next_block(b);
assert(b == NULL || !b->free(), "must be in use or at end of heap");
return (b == NULL) ? NULL : b->allocated_space();
*** 285,295 ****
--- 298,308 ----
if (_next_segment > 0)
return block_at(0);
return NULL;
}
! HeapBlock *CodeHeap::block_start(void *q) const {
! HeapBlock* CodeHeap::block_start(void* q) const {
HeapBlock* b = (HeapBlock*)find_start(q);
if (b == NULL) return NULL;
return b - 1;
}
*** 310,319 ****
--- 323,336 ----
size_t CodeHeap::max_capacity() const {
return _memory.reserved_size();
}
+ int CodeHeap::allocated_segments() const {
+ return (int)_next_segment;
+ }
+
size_t CodeHeap::allocated_capacity() const {
// size of used heap - size on freelist
return segments_to_size(_next_segment - _freelist_segments);
}
*** 323,333 ****
--- 340,350 ----
return segments_to_size(_number_of_reserved_segments - _next_segment);
}
// Free list management
! FreeBlock *CodeHeap::following_block(FreeBlock *b) {
! FreeBlock* CodeHeap::following_block(FreeBlock *b) {
return (FreeBlock*)(((address)b) + _segment_size * b->length());
}
// Inserts block b after a
void CodeHeap::insert_after(FreeBlock* a, FreeBlock* b) {
*** 341,367 ****
--- 358,391 ----
merge_right(b); // Try to make b bigger
merge_right(a); // Try to make a include b
}
// Try to merge this block with the following block
! void CodeHeap::merge_right(FreeBlock *a) {
! bool CodeHeap::merge_right(FreeBlock* a) {
assert(a->free(), "must be a free block");
if (following_block(a) == a->link()) {
assert(a->link() != NULL && a->link()->free(), "must be free too");
// Update block a to include the following block
a->set_length(a->length() + a->link()->length());
a->set_link(a->link()->link());
// Update find_start map
size_t beg = segment_for(a);
mark_segmap_as_used(beg, beg + a->length());
+ _freelist_length--;
+ return true;
}
+ return false;
}
void CodeHeap::add_to_freelist(HeapBlock *a) {
+
+ void CodeHeap::add_to_freelist(HeapBlock* a) {
FreeBlock* b = (FreeBlock*)a;
+ _freelist_length++;
+
assert(b != _freelist, "cannot be removed twice");
+
// Mark as free and update free space count
_freelist_segments += b->length();
b->set_free();
// First element in list?
*** 369,467 ****
--- 393,492 ----
_freelist = b;
b->set_link(NULL);
return;
}
+ // Since the freelist is ordered (smaller addresses -> larger addresses) and the
+ // element we want to insert into the freelist has a smaller address than the first
+ // element, we can simply add 'b' as the first element and we are done.
+ if (b < _freelist) {
+ // Insert first in list
+ b->set_link(_freelist);
+ _freelist = b;
+ merge_right(_freelist);
+ return;
+ }
+
// Scan for right place to put into list. List
- // is sorted by increasing addresseses
! FreeBlock* prev = NULL;
! FreeBlock* cur = _freelist;
! FreeBlock* prev = _freelist;
! FreeBlock* cur = _freelist->link();
while(cur != NULL && cur < b) {
! assert(prev == NULL || prev < cur, "must be ordered");
! assert(prev < cur, "Freelist must be ordered");
prev = cur;
cur = cur->link();
}
assert( (prev == NULL && b < _freelist) ||
(prev < b && (cur == NULL || b < cur)), "list must be ordered");
if (prev == NULL) {
// Insert first in list
b->set_link(_freelist);
_freelist = b;
merge_right(_freelist);
} else {
+ assert((prev < b) && (cur == NULL || b < cur), "free-list must be ordered");
insert_after(prev, b);
}
}
// Search freelist for an entry on the list with the best fit
// Return NULL if no one was found
+ /**
+ * Search freelist for an entry on the list with the best fit.
+ * @return NULL, if no one was found
+ */
FreeBlock* CodeHeap::search_freelist(size_t length, bool is_critical) {
! FreeBlock *best_block = NULL;
! FreeBlock *best_prev = NULL;
! size_t best_length = 0;
// Search for smallest block which is bigger than length
! FreeBlock *prev = NULL;
! FreeBlock *cur = _freelist;
while(cur != NULL) {
size_t l = cur->length();
if (l >= length && (best_block == NULL || best_length > l)) {
! FreeBlock* found_block = NULL;
! FreeBlock* found_prev = NULL;
! size_t found_length = 0;
+
+ FreeBlock* prev = NULL;
! FreeBlock* cur = _freelist;
! const size_t critical_boundary = (size_t)high_boundary() - CodeCacheMinimumFreeSpace;
+ // Search for first block that fits
+ while(cur != NULL) {
+ if (cur->length() >= length) {
// Non critical allocations are not allowed to use the last part of the code heap.
if (!is_critical) {
// Make sure the end of the allocation doesn't cross into the last part of the code heap
if (((size_t)cur + length) > ((size_t)high_boundary() - CodeCacheMinimumFreeSpace)) {
// the freelist is sorted by address - if one fails, all consecutive will also fail.
+ // Make sure the end of the allocation doesn't cross into the last part of the code heap.
+ if (!is_critical && (((size_t)cur + length) > critical_boundary)) {
+ // The freelist is sorted by address - if one fails, all consecutive will also fail.
break;
}
}
- // Remember best block, its previous element, and its length
! best_block = cur;
! best_prev = prev;
! best_length = best_block->length();
}
! found_block = cur;
! found_prev = prev;
! found_length = found_block->length();
+ break;
+ }
// Next element in list
prev = cur;
cur = cur->link();
}
! if (best_block == NULL) {
! if (found_block == NULL) {
// None found
return NULL;
}
assert((best_prev == NULL && _freelist == best_block ) ||
(best_prev != NULL && best_prev->link() == best_block), "sanity check");
// Exact (or at least good enough) fit. Remove from list.
// Don't leave anything on the freelist smaller than CodeCacheMinBlockLength.
! if (best_length < length + CodeCacheMinBlockLength) {
! length = best_length;
if (best_prev == NULL) {
assert(_freelist == best_block, "sanity check");
! if (found_length - length < CodeCacheMinBlockLength) {
! _freelist_length--;
+ length = found_length;
+ if (found_prev == NULL) {
+ assert(_freelist == found_block, "sanity check");
_freelist = _freelist->link();
} else {
+ assert((found_prev->link() == found_block), "sanity check");
// Unmap element
! best_prev->set_link(best_block->link());
! found_prev->set_link(found_block->link());
}
} else {
// Truncate block and return a pointer to the following block
! best_block->set_length(best_length - length);
! best_block = following_block(best_block);
! found_block->set_length(found_length - length);
! found_block = following_block(found_block);
// Set used bit and length on new block
! size_t beg = segment_for(best_block);
! size_t beg = segment_for(found_block);
mark_segmap_as_used(beg, beg + length);
! best_block->set_length(length);
! found_block->set_length(length);
}
! best_block->set_used();
! found_block->set_used();
_freelist_segments -= length;
! return best_block;
! return found_block;
}
//----------------------------------------------------------------------------
// Non-product code
*** 469,503 ****
--- 494,529 ----
void CodeHeap::print() {
tty->print_cr("The Heap");
}
#endif
void CodeHeap::verify() {
// Count the number of blocks on the freelist, and the amount of space
// represented.
int count = 0;
+ if (VerifyCodeCache) {
size_t len = 0;
+ int count = 0;
for(FreeBlock* b = _freelist; b != NULL; b = b->link()) {
len += b->length();
count++;
+ // Check if we have merged all free blocks
+ assert(merge_right(b) == false, "Missed merging opportunity");
}
// Verify that freelist contains the right amount of free space
! // guarantee(len == _freelist_segments, "wrong freelist");
! assert(len == _freelist_segments, "wrong freelist");
+
+ for(HeapBlock* h = first_block(); h != NULL; h = next_block(h)) {
+ if (h->free()) count--;
+ }
+ // Verify that the freelist contains the same number of blocks
+ // than free blocks found on the full list.
+ assert(count == 0, "missing free blocks");
// Verify that the number of free blocks is not out of hand.
static int free_block_threshold = 10000;
if (count > free_block_threshold) {
warning("CodeHeap: # of free blocks > %d", free_block_threshold);
// Double the warning limit
free_block_threshold *= 2;
}
// Verify that the freelist contains the same number of free blocks that is
// found on the full list.
for(HeapBlock *h = first_block(); h != NULL; h = next_block(h)) {
if (h->free()) count--;
}
// guarantee(count == 0, "missing free blocks");
}
+
+ #endif
src/share/vm/memory/heap.cpp
Index
Unified diffs
Context diffs
Sdiffs
Patch
New
Old
Previous File
Next File