# HG changeset patch # User lucy # Date 1557929126 -7200 # Node ID d8e64b972b77357dd62012e0058ca9db9780e3cb # Parent b99e97bc504076c6dedcf9f512d7a74fea22d82d 8223444: Improve CodeHeap Free Space Management Reviewed-by: diff --git a/src/hotspot/cpu/aarch64/c2_globals_aarch64.hpp b/src/hotspot/cpu/aarch64/c2_globals_aarch64.hpp --- a/src/hotspot/cpu/aarch64/c2_globals_aarch64.hpp +++ b/src/hotspot/cpu/aarch64/c2_globals_aarch64.hpp @@ -76,7 +76,7 @@ define_pd_global(intx, NonProfiledCodeHeapSize, 21*M); define_pd_global(intx, ProfiledCodeHeapSize, 22*M); define_pd_global(intx, NonNMethodCodeHeapSize, 5*M ); -define_pd_global(uintx, CodeCacheMinBlockLength, 4); +define_pd_global(uintx, CodeCacheMinBlockLength, 6); define_pd_global(uintx, CodeCacheMinimumUseSpace, 400*K); // Heap related flags diff --git a/src/hotspot/cpu/arm/c2_globals_arm.hpp b/src/hotspot/cpu/arm/c2_globals_arm.hpp --- a/src/hotspot/cpu/arm/c2_globals_arm.hpp +++ b/src/hotspot/cpu/arm/c2_globals_arm.hpp @@ -97,7 +97,7 @@ // Ergonomics related flags define_pd_global(uint64_t, MaxRAM, 4ULL*G); #endif -define_pd_global(uintx, CodeCacheMinBlockLength, 4); +define_pd_global(uintx, CodeCacheMinBlockLength, 6); define_pd_global(size_t, CodeCacheMinimumUseSpace, 400*K); define_pd_global(bool, TrapBasedRangeChecks, false); // Not needed diff --git a/src/hotspot/cpu/ppc/c2_globals_ppc.hpp b/src/hotspot/cpu/ppc/c2_globals_ppc.hpp --- a/src/hotspot/cpu/ppc/c2_globals_ppc.hpp +++ b/src/hotspot/cpu/ppc/c2_globals_ppc.hpp @@ -1,6 +1,6 @@ /* * Copyright (c) 2000, 2019, Oracle and/or its affiliates. All rights reserved. - * Copyright (c) 2012, 2018 SAP SE. All rights reserved. + * Copyright (c) 2012, 2019 SAP SE. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -90,7 +90,7 @@ // Ergonomics related flags define_pd_global(uint64_t, MaxRAM, 128ULL*G); -define_pd_global(uintx, CodeCacheMinBlockLength, 4); +define_pd_global(uintx, CodeCacheMinBlockLength, 6); define_pd_global(uintx, CodeCacheMinimumUseSpace, 400*K); define_pd_global(bool, TrapBasedRangeChecks, true); diff --git a/src/hotspot/cpu/sparc/c2_globals_sparc.hpp b/src/hotspot/cpu/sparc/c2_globals_sparc.hpp --- a/src/hotspot/cpu/sparc/c2_globals_sparc.hpp +++ b/src/hotspot/cpu/sparc/c2_globals_sparc.hpp @@ -80,7 +80,7 @@ // Ergonomics related flags define_pd_global(uint64_t,MaxRAM, 128ULL*G); -define_pd_global(uintx, CodeCacheMinBlockLength, 4); +define_pd_global(uintx, CodeCacheMinBlockLength, 6); define_pd_global(uintx, CodeCacheMinimumUseSpace, 400*K); define_pd_global(bool, TrapBasedRangeChecks, false); // Not needed on sparc. diff --git a/src/hotspot/cpu/x86/c2_globals_x86.hpp b/src/hotspot/cpu/x86/c2_globals_x86.hpp --- a/src/hotspot/cpu/x86/c2_globals_x86.hpp +++ b/src/hotspot/cpu/x86/c2_globals_x86.hpp @@ -88,7 +88,7 @@ define_pd_global(uintx, NonProfiledCodeHeapSize, 21*M); define_pd_global(uintx, ProfiledCodeHeapSize, 22*M); define_pd_global(uintx, NonNMethodCodeHeapSize, 5*M ); -define_pd_global(uintx, CodeCacheMinBlockLength, 4); +define_pd_global(uintx, CodeCacheMinBlockLength, 6); define_pd_global(uintx, CodeCacheMinimumUseSpace, 400*K); define_pd_global(bool, TrapBasedRangeChecks, false); // Not needed on x86. diff --git a/src/hotspot/share/memory/heap.cpp b/src/hotspot/share/memory/heap.cpp --- a/src/hotspot/share/memory/heap.cpp +++ b/src/hotspot/share/memory/heap.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2015, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2019, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -55,6 +55,10 @@ } +// The segmap is marked free for that part of the heap +// which has not been allocated yet (beyond _next_segment). +// "Allocated" space in this context means there exists a +// HeapBlock or a FreeBlock describing this space. void CodeHeap::mark_segmap_as_free(size_t beg, size_t end) { assert( beg < _number_of_committed_segments, "interval begin out of bounds"); assert(beg < end && end <= _number_of_committed_segments, "interval end out of bounds"); @@ -62,10 +66,14 @@ address p = (address)_segmap.low() + beg; address q = (address)_segmap.low() + end; // initialize interval - while (p < q) *p++ = free_sentinel; + memset(p, free_sentinel, q-p); } - +// Don't get confused here. +// All existing blocks, no matter if they are used() or free(), +// have their segmap marked as used. This allows to find the +// block header (HeapBlock or FreeBlock) for any pointer +// within the allocated range (upper limit: _next_segment). void CodeHeap::mark_segmap_as_used(size_t beg, size_t end) { assert( beg < _number_of_committed_segments, "interval begin out of bounds"); assert(beg < end && end <= _number_of_committed_segments, "interval end out of bounds"); @@ -189,7 +197,6 @@ NOT_PRODUCT(verify()); if (block != NULL) { - assert(block->length() >= number_of_segments && block->length() < number_of_segments + CodeCacheMinBlockLength, "sanity check"); assert(!block->free(), "must be marked free"); guarantee((char*) block >= _memory.low_boundary() && (char*) block < _memory.high(), "The newly allocated block " INTPTR_FORMAT " is not within the heap " @@ -222,19 +229,47 @@ } } +// Split the given block into two at the given segment. +// This is helpful when a block was allocated too large +// to trim off the unused space at the end (interpreter). +// It also helps with splitting a large free block during allocation. +// Usage state (used or free) must be set by caller since +// we don't know if the resulting blocks will be used or free. +// split_at is the segment number (relative to segment_for(b)) +// where the split happens. The segment with relative +// number split_at is the first segment of the split-off block. +HeapBlock* CodeHeap::split_block(HeapBlock *b, size_t split_at) { + if (b == NULL) return NULL; + // After the split, both blocks must have a size of at least CodeCacheMinBlockLength + assert((split_at >= CodeCacheMinBlockLength) && (split_at + CodeCacheMinBlockLength <= b->length()), + "split position(%d) out of range [0..%d]", (int)split_at, (int)b->length()); + size_t split_segment = segment_for(b) + split_at; + size_t b_size = b->length(); + size_t newb_size = b_size - split_at; + + HeapBlock* newb = block_at(split_segment); + newb->set_length(newb_size); + mark_segmap_as_used(segment_for(newb), segment_for(newb) + newb_size); + b->set_length(split_at); + return newb; +} + void CodeHeap::deallocate_tail(void* p, size_t used_size) { assert(p == find_start(p), "illegal deallocation"); // Find start of HeapBlock HeapBlock* b = (((HeapBlock *)p) - 1); assert(b->allocated_space() == p, "sanity check"); - size_t used_number_of_segments = size_to_segments(used_size + header_size()); + size_t actual_number_of_segments = b->length(); + size_t used_number_of_segments = size_to_segments(used_size + header_size()); + size_t unused_number_of_segments = actual_number_of_segments - used_number_of_segments; guarantee(used_number_of_segments <= actual_number_of_segments, "Must be!"); - guarantee(b == block_at(_next_segment - actual_number_of_segments), "Intermediate allocation!"); - size_t number_of_segments_to_deallocate = actual_number_of_segments - used_number_of_segments; - _next_segment -= number_of_segments_to_deallocate; - mark_segmap_as_free(_next_segment, _next_segment + number_of_segments_to_deallocate); - b->initialize(used_number_of_segments); + + HeapBlock* f = split_block(b, used_number_of_segments); + DEBUG_ONLY(memset((void *)f->allocated_space(), badCodeHeapFreeVal, + segments_to_size(unused_number_of_segments) - sizeof(HeapBlock))); + add_to_freelist(f); + NOT_PRODUCT(verify()); } void CodeHeap::deallocate(void* p) { @@ -430,8 +465,8 @@ // First element in list? if (_freelist == NULL) { + b->set_link(NULL); _freelist = b; - b->set_link(NULL); return; } @@ -463,23 +498,31 @@ * Search freelist for an entry on the list with the best fit. * @return NULL, if no one was found */ -FreeBlock* CodeHeap::search_freelist(size_t length) { - FreeBlock* found_block = NULL; - FreeBlock* found_prev = NULL; - size_t found_length = 0; +HeapBlock* CodeHeap::search_freelist(size_t length) { + FreeBlock* found_block = NULL; + FreeBlock* found_prev = NULL; + size_t found_length = _next_segment; // max it out to begin with + HeapBlock* res = NULL; FreeBlock* prev = NULL; - FreeBlock* cur = _freelist; + FreeBlock* cur = _freelist; - // Search for first block that fits + length = length < CodeCacheMinBlockLength ? CodeCacheMinBlockLength : length; + + // Search for best-fitting block while(cur != NULL) { - if (cur->length() >= length) { - // Remember block, its previous element, and its length - found_block = cur; - found_prev = prev; - found_length = found_block->length(); - + size_t cur_length = cur->length(); + if (cur_length == length) { + // We have a perfect fit + found_block = cur; + found_prev = prev; + found_length = cur_length; break; + } else if ((cur_length > length) && (cur_length < found_length)) { + // This is a new, closer fit. Remember block, its previous element, and its length + found_block = cur; + found_prev = prev; + found_length = cur_length; } // Next element in list prev = cur; @@ -504,20 +547,18 @@ // Unmap element found_prev->set_link(found_block->link()); } + res = found_block; } else { - // Truncate block and return a pointer to the following block - // Set used bit and length on new block - found_block->set_length(found_length - length); - found_block = following_block(found_block); - - size_t beg = segment_for(found_block); - mark_segmap_as_used(beg, beg + length); - found_block->set_length(length); + // Truncate the free block and return the truncated part + // as new HeapBlock. The remaining free block does not + // need to be updated, except for it's length. Truncating + // the segment map does not invalidate the leading part. + res = split_block(found_block, found_length - length); } - found_block->set_used(); + res->set_used(); _freelist_segments -= length; - return found_block; + return res; } //---------------------------------------------------------------------------- @@ -549,6 +590,25 @@ // than free blocks found on the full list. assert(count == 0, "missing free blocks"); + // Verify segment map marking. + // All allocated segments, no matter if in a free or used block, + // must be marked "in use". + address seg_map = (address)_segmap.low(); + size_t nseg = 0; + for(HeapBlock* b = first_block(); b != NULL; b = next_block(b)) { + size_t seg1 = segment_for(b); + size_t segn = seg1 + b->length(); + for (size_t i = seg1; i < segn; i++) { + nseg++; + if (is_segment_unused(seg_map[i])) { + warning("CodeHeap: unused segment. %d [%d..%d], %s block", (int)i, (int)seg1, (int)segn, b->free()? "free":"used"); + } + } + } + if (nseg != _next_segment) { + warning("CodeHeap: segment count mismatch. found %d, expected %d.", (int)nseg, (int)_next_segment); + } + // Verify that the number of free blocks is not out of hand. static int free_block_threshold = 10000; if (count > free_block_threshold) { diff --git a/src/hotspot/share/memory/heap.hpp b/src/hotspot/share/memory/heap.hpp --- a/src/hotspot/share/memory/heap.hpp +++ b/src/hotspot/share/memory/heap.hpp @@ -51,6 +51,8 @@ public: // Initialization void initialize(size_t length) { _header._length = length; set_used(); } + // Merging/splitting + void set_length(size_t length) { _header._length = length; } // Accessors void* allocated_space() const { return (void*)(this + 1); } @@ -71,9 +73,6 @@ // Initialization void initialize(size_t length) { HeapBlock::initialize(length); _link= NULL; } - // Merging - void set_length(size_t l) { _header._length = l; } - // Accessors FreeBlock* link() const { return _link; } void set_link(FreeBlock* link) { _link = link; } @@ -125,7 +124,7 @@ // Toplevel freelist management void add_to_freelist(HeapBlock* b); - FreeBlock* search_freelist(size_t length); + HeapBlock* search_freelist(size_t length); // Iteration helpers void* next_used(HeapBlock* b) const; @@ -180,6 +179,7 @@ size_t segment_size() const { return _segment_size; } // for CodeHeapState HeapBlock* first_block() const; // for CodeHeapState HeapBlock* next_block(HeapBlock* b) const; // for CodeHeapState + HeapBlock* split_block(HeapBlock* b, size_t split_seg); // split one block into two FreeBlock* freelist() const { return _freelist; } // for CodeHeapState