diff --git a/src/hotspot/cpu/aarch64/macroAssembler_aarch64.cpp b/src/hotspot/cpu/aarch64/macroAssembler_aarch64.cpp index a8aaba88abd..a1de6a7fde5 100644 --- a/src/hotspot/cpu/aarch64/macroAssembler_aarch64.cpp +++ b/src/hotspot/cpu/aarch64/macroAssembler_aarch64.cpp @@ -43,6 +43,7 @@ #include "memory/universe.hpp" #include "nativeInst_aarch64.hpp" #include "oops/accessDecorators.hpp" +#include "oops/compressedKlass.inline.hpp" #include "oops/compressedOops.inline.hpp" #include "oops/klass.inline.hpp" #include "runtime/icache.hpp" diff --git a/src/hotspot/cpu/ppc/macroAssembler_ppc.cpp b/src/hotspot/cpu/ppc/macroAssembler_ppc.cpp index 98565003691..e4c8356a05c 100644 --- a/src/hotspot/cpu/ppc/macroAssembler_ppc.cpp +++ b/src/hotspot/cpu/ppc/macroAssembler_ppc.cpp @@ -32,6 +32,7 @@ #include "interpreter/interpreter.hpp" #include "memory/resourceArea.hpp" #include "nativeInst_ppc.hpp" +#include "oops/compressedKlass.inline.hpp" #include "oops/klass.inline.hpp" #include "oops/methodData.hpp" #include "prims/methodHandles.hpp" diff --git a/src/hotspot/cpu/s390/macroAssembler_s390.cpp b/src/hotspot/cpu/s390/macroAssembler_s390.cpp index 06567a511ed..cec5b80b428 100644 --- a/src/hotspot/cpu/s390/macroAssembler_s390.cpp +++ b/src/hotspot/cpu/s390/macroAssembler_s390.cpp @@ -35,6 +35,7 @@ #include "memory/resourceArea.hpp" #include "memory/universe.hpp" #include "oops/accessDecorators.hpp" +#include "oops/compressedKlass.inline.hpp" #include "oops/compressedOops.inline.hpp" #include "oops/klass.inline.hpp" #include "prims/methodHandles.hpp" diff --git a/src/hotspot/cpu/x86/macroAssembler_x86.cpp b/src/hotspot/cpu/x86/macroAssembler_x86.cpp index b573adc3acd..add274f2ad2 100644 --- a/src/hotspot/cpu/x86/macroAssembler_x86.cpp +++ b/src/hotspot/cpu/x86/macroAssembler_x86.cpp @@ -37,6 +37,7 @@ #include "memory/resourceArea.hpp" #include "memory/universe.hpp" #include "oops/accessDecorators.hpp" +#include "oops/compressedKlass.inline.hpp" #include "oops/compressedOops.inline.hpp" #include "oops/klass.inline.hpp" #include "prims/methodHandles.hpp" @@ -4796,27 +4797,46 @@ void MacroAssembler::decode_and_move_klass_not_null(Register dst, Register src) assert_different_registers(src, dst); // Note: it will change flags assert (UseCompressedClassPointers, "should only be used for compressed headers"); + // CompressedKlassPointers::initialize() must have been run. + assert(CompressedKlassPointers::shift() == 0 || + CompressedKlassPointers::shift() == LogKlassAlignmentInBytes, "decode alg wrong"); // Cannot assert, unverified entry point counts instructions (see .ad file) // vtableStubs also counts instructions in pd_code_size_limit. // Also do not verify_oop as this is called by verify_oop. - if (CompressedKlassPointers::base() == NULL && CompressedKlassPointers::shift() == 0) { // The best case scenario is that there is no base or shift. Then it is already // a pointer that needs nothing but a register rename. movl(dst, src); } else { - if (CompressedKlassPointers::base() != NULL) { - mov64(dst, (int64_t)CompressedKlassPointers::base()); - } else { - xorq(dst, dst); - } - if (CompressedKlassPointers::shift() != 0) { - assert(LogKlassAlignmentInBytes == CompressedKlassPointers::shift(), "decode alg wrong"); - assert(LogKlassAlignmentInBytes == Address::times_8, "klass not aligned on 64bits?"); + // Shift=3 -> use leaq + if (CompressedKlassPointers::shift() == Address::times_8) { + // using leaq + if (CompressedKlassPointers::base() != NULL) { + mov64(dst, (int64_t)CompressedKlassPointers::base()); + } else { + xorq(dst, dst); + } leaq(dst, Address(dst, src, Address::times_8, 0)); } else { + assert(CompressedKlassPointers::shift() >= Address::times_8, "Shift values < 3 not implemented"); + // Shift > 3. Cannot use lea. + // We load the right-shifted base as immediate, add the compressed pointer, then left-shift + // the sum. + // This only works if the bits shifted out are zero, so the base must be aligned to + // LogKlassAlignmentInBytes bits, but that is okay. Base alignment is usually at least + // page sized. + assert(is_aligned(CompressedKlassPointers::base(), KlassAlignmentInBytes), "unaligned base"); + const int64_t base_right_shifted = ((int64_t)CompressedKlassPointers::base()) >> LogKlassAlignmentInBytes; + if (CompressedKlassPointers::base() != NULL) { + mov64(dst, base_right_shifted); + } else { + xorq(dst, dst); + } addq(dst, src); + if (CompressedKlassPointers::shift() > 0) { + shlq(dst, LogKlassAlignmentInBytes); + } } } } diff --git a/src/hotspot/share/cds/archiveBuilder.cpp b/src/hotspot/share/cds/archiveBuilder.cpp index 90c3046aec5..57a523f66d9 100644 --- a/src/hotspot/share/cds/archiveBuilder.cpp +++ b/src/hotspot/share/cds/archiveBuilder.cpp @@ -38,6 +38,7 @@ #include "memory/allStatic.hpp" #include "memory/memRegion.hpp" #include "memory/resourceArea.hpp" +#include "oops/compressedKlass.inline.hpp" #include "oops/instanceKlass.hpp" #include "oops/klass.inline.hpp" #include "oops/objArrayKlass.hpp" @@ -623,8 +624,10 @@ void ArchiveBuilder::make_shallow_copy(DumpRegion *dump_region, SourceObjInfo* s SystemDictionaryShared::validate_before_archiving(InstanceKlass::cast(klass)); dump_region->allocate(sizeof(address)); } + dest = dump_region->allocate(bytes, KlassAlignmentInBytes); + } else { + dest = dump_region->allocate(bytes); } - dest = dump_region->allocate(bytes); newtop = dump_region->top(); memcpy(dest, src, bytes); @@ -635,7 +638,8 @@ void ArchiveBuilder::make_shallow_copy(DumpRegion *dump_region, SourceObjInfo* s ArchivePtrMarker::mark_pointer((address*)dest); } - log_trace(cds)("Copy: " PTR_FORMAT " ==> " PTR_FORMAT " %d", p2i(src), p2i(dest), bytes); + log_trace(cds)("Copy: " PTR_FORMAT " ==> " PTR_FORMAT " %d (%s)", p2i(src), p2i(dest), bytes, + MetaspaceObj::type_name(ref->msotype())); src_info->set_dumped_addr((address)dest); _alloc_stats.record(ref->msotype(), int(newtop - oldtop), src_info->read_only()); diff --git a/src/hotspot/share/cds/archiveBuilder.hpp b/src/hotspot/share/cds/archiveBuilder.hpp index 1057d05b083..18b12553f5b 100644 --- a/src/hotspot/share/cds/archiveBuilder.hpp +++ b/src/hotspot/share/cds/archiveBuilder.hpp @@ -28,6 +28,7 @@ #include "cds/archiveUtils.hpp" #include "cds/dumpAllocStats.hpp" #include "memory/metaspaceClosure.hpp" +#include "memory/metaspace/metaspaceAlignment.hpp" #include "oops/array.hpp" #include "oops/klass.hpp" #include "runtime/os.hpp" @@ -43,9 +44,14 @@ class Klass; class MemRegion; class Symbol; -// Metaspace::allocate() requires that all blocks must be aligned with KlassAlignmentInBytes. -// We enforce the same alignment rule in blocks allocated from the shared space. -const int SharedSpaceObjectAlignment = KlassAlignmentInBytes; +// Metaspace::allocate() requires a very modest minimum alignment. We enforce the same alignment +// rule in blocks allocated from the shared space. The exception to that are Klass structures +// stored in the archive, which need to be alignment with KlassAlignmentInBytes to fit the +// compressed class pointer encoding scheme (see ArchiveBuilder::make_shallow_copy) +const int SharedSpaceObjectAlignment = metaspace::MetaspaceMinAlignmentBytes; + +// standard alignment should be sufficient for storing pointers. +STATIC_ASSERT(SharedSpaceObjectAlignment >= sizeof(intptr_t)); // Overview of CDS archive creation (for both static and dynamic dump): // diff --git a/src/hotspot/share/cds/archiveUtils.cpp b/src/hotspot/share/cds/archiveUtils.cpp index 8c273c06a8d..7744161f639 100644 --- a/src/hotspot/share/cds/archiveUtils.cpp +++ b/src/hotspot/share/cds/archiveUtils.cpp @@ -202,19 +202,25 @@ void DumpRegion::commit_to(char* newtop) { } -char* DumpRegion::allocate(size_t num_bytes) { - char* p = (char*)align_up(_top, (size_t)SharedSpaceObjectAlignment); - char* newtop = p + align_up(num_bytes, (size_t)SharedSpaceObjectAlignment); +char* DumpRegion::allocate(size_t num_bytes, size_t alignment) { + // We align the starting address of each allocation. + char* p = (char*)align_up(_top, alignment); + char* newtop = p + num_bytes; + // Note that while technically it should not be necessary, atm outside code relies on _top always being + // intptr_t aligned (see ReadClosure). This is unfortunate since it prevents denser packing. Also, I am + // not sure if this would break if SharedSpaceObjectAlignment were ever not sizeof(intptr_t). + newtop = align_up(newtop, sizeof(intptr_t)); expand_top_to(newtop); - memset(p, 0, newtop - p); + memset(p, 0, newtop - p); // todo: needed? debug_only? return p; } +char* DumpRegion::allocate(size_t num_bytes) { + return allocate(num_bytes, SharedSpaceObjectAlignment); +} + void DumpRegion::append_intptr_t(intptr_t n, bool need_to_mark) { - assert(is_aligned(_top, sizeof(intptr_t)), "bad alignment"); - intptr_t *p = (intptr_t*)_top; - char* newtop = _top + sizeof(intptr_t); - expand_top_to(newtop); + intptr_t* p = (intptr_t*) allocate(sizeof(intptr_t)); *p = n; if (need_to_mark) { ArchivePtrMarker::mark_pointer(p); @@ -301,7 +307,7 @@ void ReadClosure::do_tag(int tag) { int old_tag; old_tag = (int)(intptr_t)nextPtr(); // do_int(&old_tag); - assert(tag == old_tag, "old tag doesn't match"); + assert(tag == old_tag, "tag doesn't match (%d, expected %d)", old_tag, tag); FileMapInfo::assert_mark(tag == old_tag); } diff --git a/src/hotspot/share/cds/archiveUtils.hpp b/src/hotspot/share/cds/archiveUtils.hpp index 588ad1b6da9..35c3e3dd365 100644 --- a/src/hotspot/share/cds/archiveUtils.hpp +++ b/src/hotspot/share/cds/archiveUtils.hpp @@ -142,10 +142,14 @@ private: public: DumpRegion(const char* name, uintx max_delta = 0) : _name(name), _base(NULL), _top(NULL), _end(NULL), - _max_delta(max_delta), _is_packed(false) {} + _max_delta(max_delta), _is_packed(false), + _rs(NULL), _vs(NULL) {} char* expand_top_to(char* newtop); + // Allocate with standard alignment char* allocate(size_t num_bytes); + // Allocate with a specific alignment + char* allocate(size_t num_bytes, size_t alignment); void append_intptr_t(intptr_t n, bool need_to_mark = false); diff --git a/src/hotspot/share/classfile/systemDictionaryShared.cpp b/src/hotspot/share/classfile/systemDictionaryShared.cpp index 9d1e378369d..ec64bd0da58 100644 --- a/src/hotspot/share/classfile/systemDictionaryShared.cpp +++ b/src/hotspot/share/classfile/systemDictionaryShared.cpp @@ -58,6 +58,7 @@ #include "memory/oopFactory.hpp" #include "memory/resourceArea.hpp" #include "memory/universe.hpp" +#include "oops/compressedKlass.hpp" #include "oops/instanceKlass.hpp" #include "oops/klass.inline.hpp" #include "oops/objArrayOop.inline.hpp" @@ -1408,8 +1409,8 @@ SystemDictionaryShared::find_record(RunTimeSharedDictionary* static_dict, RunTim InstanceKlass* SystemDictionaryShared::find_builtin_class(Symbol* name) { const RunTimeClassInfo* record = find_record(&_builtin_dictionary, &_dynamic_builtin_dictionary, name); if (record != NULL) { + DEBUG_ONLY(CompressedKlassPointers::verify_klass_pointer(record->_klass)); assert(!record->_klass->is_hidden(), "hidden class cannot be looked up by name"); - assert(check_alignment(record->_klass), "Address not aligned"); return record->_klass; } else { return NULL; diff --git a/src/hotspot/share/memory/classLoaderMetaspace.cpp b/src/hotspot/share/memory/classLoaderMetaspace.cpp index 8a87adc97e3..08dcdc12a8c 100644 --- a/src/hotspot/share/memory/classLoaderMetaspace.cpp +++ b/src/hotspot/share/memory/classLoaderMetaspace.cpp @@ -28,6 +28,7 @@ #include "memory/classLoaderMetaspace.hpp" #include "memory/metaspace.hpp" #include "memory/metaspaceUtils.hpp" +#include "memory/metaspace/metaspaceAlignment.hpp" #include "memory/metaspace/chunkManager.hpp" #include "memory/metaspace/internalStats.hpp" #include "memory/metaspace/metaspaceArena.hpp" @@ -37,6 +38,7 @@ #include "memory/metaspace/runningCounters.hpp" #include "memory/metaspaceTracer.hpp" #include "utilities/debug.hpp" +#include "utilities/globalDefinitions.hpp" using metaspace::ChunkManager; using metaspace::MetaspaceArena; @@ -60,17 +62,24 @@ ClassLoaderMetaspace::ClassLoaderMetaspace(Mutex* lock, Metaspace::MetaspaceType _non_class_space_arena = new MetaspaceArena( non_class_cm, ArenaGrowthPolicy::policy_for_space_type(space_type, false), + metaspace::MetaspaceMinAlignmentWords, lock, RunningCounters::used_nonclass_counter(), "non-class sm"); // If needed, initialize class arena if (Metaspace::using_class_space()) { + // Class space alignment must be >= expected Klass alignment. Obviously :) + // Note: to be able to test metaspace alignment handling separately from whatever + // compressed class pointer encoding looks like, decouple class_space_alignment_words + // from KlassAlignmentInBytes. It just has to be a larger alignment. + const int class_space_alignment_words = KlassAlignmentInBytes / BytesPerWord; ChunkManager* const class_cm = ChunkManager::chunkmanager_class(); _class_space_arena = new MetaspaceArena( class_cm, ArenaGrowthPolicy::policy_for_space_type(space_type, true), + class_space_alignment_words, lock, RunningCounters::used_class_counter(), "class sm"); diff --git a/src/hotspot/share/memory/metaspace.cpp b/src/hotspot/share/memory/metaspace.cpp index 2c42c013560..57d2aa0758b 100644 --- a/src/hotspot/share/memory/metaspace.cpp +++ b/src/hotspot/share/memory/metaspace.cpp @@ -35,6 +35,7 @@ #include "memory/metaspace/chunkManager.hpp" #include "memory/metaspace/commitLimiter.hpp" #include "memory/metaspace/internalStats.hpp" +#include "memory/metaspace/metaspaceAlignment.hpp" #include "memory/metaspace/metaspaceCommon.hpp" #include "memory/metaspace/metaspaceContext.hpp" #include "memory/metaspace/metaspaceReporter.hpp" @@ -835,8 +836,8 @@ void Metaspace::global_initialize() { // We must prevent the very first address of the ccs from being used to store // metadata, since that address would translate to a narrow pointer of 0, and the // VM does not distinguish between "narrow 0 as in NULL" and "narrow 0 as in start - // of ccs". - // Before Elastic Metaspace that did not happen due to the fact that every Metachunk + // of ccs". See CompressedKlassPointers::decode(). + // Before JEP 387 that did not happen due to the fact that every Metachunk // had a header and therefore could not allocate anything at offset 0. #ifdef _LP64 if (using_class_space()) { @@ -849,7 +850,7 @@ void Metaspace::global_initialize() { #ifdef _LP64 if (UseCompressedClassPointers) { // Note: "cds" would be a better fit but keep this for backward compatibility. - LogTarget(Info, gc, metaspace) lt; + LogTarget(Info, metaspace) lt; if (lt.is_enabled()) { ResourceMark rm; LogStream ls(lt); @@ -867,8 +868,7 @@ void Metaspace::post_initialize() { } size_t Metaspace::max_allocation_word_size() { - const size_t max_overhead_words = metaspace::get_raw_word_size_for_requested_word_size(1); - return metaspace::chunklevel::MAX_CHUNK_WORD_SIZE - max_overhead_words; + return metaspace::chunklevel::MAX_CHUNK_WORD_SIZE; } // This version of Metaspace::allocate does not throw OOM but simply returns NULL, and diff --git a/src/hotspot/share/memory/metaspace/allocationGuard.hpp b/src/hotspot/share/memory/metaspace/allocationGuard.hpp deleted file mode 100644 index 0125c23ed61..00000000000 --- a/src/hotspot/share/memory/metaspace/allocationGuard.hpp +++ /dev/null @@ -1,116 +0,0 @@ -/* - * Copyright (c) 2020, Oracle and/or its affiliates. All rights reserved. - * Copyright (c) 2020 SAP SE. All rights reserved. - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This code is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 only, as - * published by the Free Software Foundation. - * - * This code is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * version 2 for more details (a copy is included in the LICENSE file that - * accompanied this code). - * - * You should have received a copy of the GNU General Public License version - * 2 along with this work; if not, write to the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. - * - * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA - * or visit www.oracle.com if you need additional information or have any - * questions. - * - */ - -#ifndef SHARE_MEMORY_METASPACE_ALLOCATIONGUARD_HPP -#define SHARE_MEMORY_METASPACE_ALLOCATIONGUARD_HPP - -#include "memory/allocation.hpp" -#include "memory/metaspace/chunklevel.hpp" -#include "utilities/globalDefinitions.hpp" - -// In Debug builds, Metadata in Metaspace can be optionally guarded - enclosed in canaries - -// to detect memory overwriters. -// -// These canaries are periodically checked, e.g. when the Metaspace is purged in a context -// of a GC. - -// The canaries precede any allocated block... -// -// +---------------+ -// | 'METAMETA' | -// +---------------+ -// | block size | -// +---------------+ -// | block... | -// . . -// . . -// . . -// | | -// +---------------+ -// . . -// +---------------+ -// | 'METAMETA' | -// +---------------+ -// | block size | -// +---------------+ -// | block... | - -// ... and since the blocks are allocated via pointer bump and closely follow each other, -// one block's prefix is its predecessor's suffix, so apart from the last block all -// blocks have an overwriter canary on both ends. -// - -// Note: this feature is only available in debug, and is activated using -// -XX:+MetaspaceGuardAllocations. When active, it disables deallocation handling - since -// freeblock handling in the freeblock lists would get too complex - so one may run leaks -// in deallocation-heavy scenarios (e.g. lots of class redefinitions). -// - -namespace metaspace { - -#ifdef ASSERT - -struct Prefix { - static const uintx EyeCatcher = - NOT_LP64(0x77698465) LP64_ONLY(0x7769846577698465ULL); // "META" resp "METAMETA" - - const uintx _mark; - const size_t _word_size; // raw word size including prefix - // MetaWord payload [0]; // varsized (but unfortunately not all our compilers understand that) - - Prefix(size_t word_size) : - _mark(EyeCatcher), - _word_size(word_size) - {} - - MetaWord* payload() const { - return (MetaWord*)(this + 1); - } - - bool is_valid() const { - return _mark == EyeCatcher && _word_size > 0 && _word_size < chunklevel::MAX_CHUNK_WORD_SIZE; - } - -}; - -// The prefix structure must be aligned to MetaWord size. -STATIC_ASSERT((sizeof(Prefix) & WordAlignmentMask) == 0); - -inline size_t prefix_size() { - return sizeof(Prefix); -} - -// Given a pointer to a memory area, establish the prefix at the start of that area and -// return the starting pointer to the payload. -inline MetaWord* establish_prefix(MetaWord* p_raw, size_t raw_word_size) { - const Prefix* pp = new(p_raw)Prefix(raw_word_size); - return pp->payload(); -} - -#endif - -} // namespace metaspace - -#endif // SHARE_MEMORY_METASPACE_ALLOCATIONGUARD_HPP diff --git a/src/hotspot/share/memory/metaspace/chunklevel.hpp b/src/hotspot/share/memory/metaspace/chunklevel.hpp index 8dbc2467fd7..0a6815219c0 100644 --- a/src/hotspot/share/memory/metaspace/chunklevel.hpp +++ b/src/hotspot/share/memory/metaspace/chunklevel.hpp @@ -26,6 +26,7 @@ #ifndef SHARE_MEMORY_METASPACE_CHUNKLEVEL_HPP #define SHARE_MEMORY_METASPACE_CHUNKLEVEL_HPP +#include "utilities/debug.hpp" #include "utilities/globalDefinitions.hpp" // Constants for the chunk levels and some utility functions. diff --git a/src/hotspot/share/memory/metaspace/freeBlocks.cpp b/src/hotspot/share/memory/metaspace/freeBlocks.cpp index d6901cb7608..af113fdfb2c 100644 --- a/src/hotspot/share/memory/metaspace/freeBlocks.cpp +++ b/src/hotspot/share/memory/metaspace/freeBlocks.cpp @@ -52,6 +52,11 @@ MetaWord* FreeBlocks::remove_block(size_t requested_word_size) { if (p != NULL) { // Blocks which are larger than a certain threshold are split and // the remainder is handed back to the manager. + // Attention alignment: the resulting block must have the right alignment + // for the enclosing arena. ATM this works, since the arena aligns allocated block + // size. If we ever switch to a different model (e.g. aligning the start + // address of allocated blocks instead of the request size) this should be + // rewritten). const size_t waste = real_size - requested_word_size; if (waste > MinWordSize) { add_block(p + requested_word_size, waste); diff --git a/src/hotspot/share/memory/metaspace/metaspaceAlignment.hpp b/src/hotspot/share/memory/metaspace/metaspaceAlignment.hpp new file mode 100644 index 00000000000..17623433a44 --- /dev/null +++ b/src/hotspot/share/memory/metaspace/metaspaceAlignment.hpp @@ -0,0 +1,71 @@ +/* + * Copyright (c) 2020, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2020 SAP SE. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#ifndef SHARE_MEMORY_METASPACE_ALIGNMENT_HPP +#define SHARE_MEMORY_METASPACE_ALIGNMENT_HPP + +#include "memory/metaspace/chunklevel.hpp" +#include "memory/metaspace/freeBlocks.hpp" +#include "utilities/align.hpp" +#include "utilities/powerOfTwo.hpp" +#include "utilities/globalDefinitions.hpp" + +// -- thoughtful comment here -- + +namespace metaspace { + +// The minimal alignment: good enough to store structures with 64bit wide members (also on 32-bit). +// Should we ever store longer values, revise. +static const int LogMetaspaceMinimalAlignment = LogBytesPerLong; +static const size_t MetaspaceMinAlignmentBytes = 1 << LogMetaspaceMinimalAlignment; +static const size_t MetaspaceMinAlignmentWords = MetaspaceMinAlignmentBytes / BytesPerWord; + +// The maximum possible alignment is the smallest chunk size (note that the buddy allocator places +// chunks at chunk-size-aligned boundaries, therefore the start address is guaranteed to be aligned). +static const int MetaspaceMaxAlignmentWords = chunklevel::MIN_CHUNK_WORD_SIZE; + +// Given a net allocation word size and an alignment value, return the raw word size we actually +// allocate internally. +inline size_t get_raw_word_size_for_requested_word_size(size_t net_word_size, + size_t alignment_words) { + + // The alignment should be between the minimum alignment but cannot be larger than the smallest chunk size + assert(is_power_of_2(alignment_words), "invalid alignment"); + assert(alignment_words >= MetaspaceMinAlignmentWords && + alignment_words <= MetaspaceMaxAlignmentWords, + "invalid alignment (" SIZE_FORMAT ")", alignment_words); + + // Deallocated metablocks are kept in a binlist which means blocks need to have + // a minimal size + size_t raw_word_size = MAX2(net_word_size, FreeBlocks::MinWordSize); + + raw_word_size = align_up(raw_word_size, alignment_words); + + return raw_word_size; +} + +} // namespace metaspace + +#endif // SHARE_MEMORY_METASPACE_ALIGNMENT_HPP diff --git a/src/hotspot/share/memory/metaspace/metaspaceArena.cpp b/src/hotspot/share/memory/metaspace/metaspaceArena.cpp index ae058fe2fdf..99e68ac46ef 100644 --- a/src/hotspot/share/memory/metaspace/metaspaceArena.cpp +++ b/src/hotspot/share/memory/metaspace/metaspaceArena.cpp @@ -26,12 +26,12 @@ #include "precompiled.hpp" #include "logging/log.hpp" #include "logging/logStream.hpp" -#include "memory/metaspace/allocationGuard.hpp" #include "memory/metaspace/chunkManager.hpp" #include "memory/metaspace/counters.hpp" #include "memory/metaspace/freeBlocks.hpp" #include "memory/metaspace/internalStats.hpp" #include "memory/metaspace/metachunk.hpp" +#include "memory/metaspace/metaspaceAlignment.hpp" #include "memory/metaspace/metaspaceArena.hpp" #include "memory/metaspace/metaspaceArenaGrowthPolicy.hpp" #include "memory/metaspace/metaspaceCommon.hpp" @@ -113,16 +113,21 @@ void MetaspaceArena::add_allocation_to_fbl(MetaWord* p, size_t word_size) { _fbl->add_block(p, word_size); } -MetaspaceArena::MetaspaceArena(ChunkManager* chunk_manager, const ArenaGrowthPolicy* growth_policy, +MetaspaceArena::MetaspaceArena(ChunkManager* chunk_manager, const ArenaGrowthPolicy* growth_policy, int alignment_words, Mutex* lock, SizeAtomicCounter* total_used_words_counter, const char* name) : _lock(lock), _chunk_manager(chunk_manager), _growth_policy(growth_policy), _chunks(), + _alignment_words(alignment_words), _fbl(NULL), _total_used_words_counter(total_used_words_counter), _name(name) +#ifdef ASSERT + , _first_fence(NULL) +#endif + { UL(debug, ": born."); @@ -227,30 +232,58 @@ MetaWord* MetaspaceArena::allocate(size_t requested_word_size) { UL2(trace, "requested " SIZE_FORMAT " words.", requested_word_size); MetaWord* p = NULL; - const size_t raw_word_size = get_raw_word_size_for_requested_word_size(requested_word_size); + const size_t raw_word_size = get_raw_word_size_for_requested_word_size(requested_word_size, _alignment_words); - // 1) Attempt to allocate from the free blocks list - // (Note: to reduce complexity, deallocation handling is disabled if allocation guards - // are enabled, see Settings::ergo_initialize()) + // Before bothering the arena proper, attempt to re-use a block from the free blocks list if (Settings::handle_deallocations() && _fbl != NULL && !_fbl->is_empty()) { p = _fbl->remove_block(raw_word_size); if (p != NULL) { DEBUG_ONLY(InternalStats::inc_num_allocs_from_deallocated_blocks();) UL2(trace, "taken from fbl (now: %d, " SIZE_FORMAT ").", _fbl->count(), _fbl->total_size()); - // Note: Space which is kept in the freeblock dictionary still counts as used as far - // as statistics go; therefore we skip the epilogue in this function to avoid double - // accounting. + // Note: free blocks in freeblock dictionary still count as "used" as far as statistics go; + // therefore we have no need to adjust any usage counters (see epilogue of allocate_inner()) + // and can just return here. return p; } } + // Primary allocation + p = allocate_inner(requested_word_size); + +#ifdef ASSERT + // Fence allocation + if (p != NULL && Settings::use_allocation_guard()) { + STATIC_ASSERT(is_aligned(sizeof(Fence), BytesPerWord)); + MetaWord* guard = allocate_inner(sizeof(Fence) / BytesPerWord); + if (guard != NULL) { + // Ignore allocation errors for the fence to keep coding simple. If this + // happens (e.g. because right at this time we hit the Metaspace GC threshold) + // we miss adding this one fence. Not a big deal. Note that his would + // be pretty rare. Chances are much higher the primary allocation above + // would have already failed). + Fence* f = new(guard) Fence(_first_fence); + _first_fence = f; + } + } +#endif // ASSERT + + return p; +} + +// Allocate from the arena proper, once dictionary allocations and fencing are sorted out. +MetaWord* MetaspaceArena::allocate_inner(size_t requested_word_size) { + + assert_lock_strong(lock()); + + const size_t raw_word_size = get_raw_word_size_for_requested_word_size(requested_word_size, _alignment_words); + MetaWord* p = NULL; bool current_chunk_too_small = false; bool commit_failure = false; if (current_chunk() != NULL) { - // 2) Attempt to satisfy the allocation from the current chunk. + // Attempt to satisfy the allocation from the current chunk. // If the current chunk is too small to hold the requested size, attempt to enlarge it. // If that fails, retire the chunk. @@ -311,13 +344,6 @@ MetaWord* MetaspaceArena::allocate(size_t requested_word_size) { } } -#ifdef ASSERT - // When using allocation guards, establish a prefix. - if (p != NULL && Settings::use_allocation_guard()) { - p = establish_prefix(p, raw_word_size); - } -#endif - if (p == NULL) { InternalStats::inc_num_allocs_failed_limit(); } else { @@ -354,7 +380,7 @@ void MetaspaceArena::deallocate_locked(MetaWord* p, size_t word_size) { UL2(trace, "deallocating " PTR_FORMAT ", word size: " SIZE_FORMAT ".", p2i(p), word_size); - size_t raw_word_size = get_raw_word_size_for_requested_word_size(word_size); + size_t raw_word_size = get_raw_word_size_for_requested_word_size(word_size, _alignment_words); add_allocation_to_fbl(p, raw_word_size); DEBUG_ONLY(verify_locked();) @@ -425,36 +451,15 @@ void MetaspaceArena::verify_locked() const { } } +void MetaspaceArena::Fence::verify() const { + assert(_eye1 == EyeCatcher && _eye2 == EyeCatcher, + "Metaspace corruption: fence block at " PTR_FORMAT " broken.", p2i(this)); +} + void MetaspaceArena::verify_allocation_guards() const { assert(Settings::use_allocation_guard(), "Don't call with guards disabled."); - - // Verify canaries of all allocations. - // (We can walk all allocations since at the start of a chunk an allocation - // must be present, and the allocation header contains its size, so we can - // find the next one). - for (const Metachunk* c = _chunks.first(); c != NULL; c = c->next()) { - const Prefix* first_broken_block = NULL; - int num_broken_blocks = 0; - const MetaWord* p = c->base(); - while (p < c->top()) { - const Prefix* pp = (const Prefix*)p; - if (!pp->is_valid()) { - UL2(error, "Corrupt block at " PTR_FORMAT " (chunk: " METACHUNK_FORMAT ").", - p2i(pp), METACHUNK_FORMAT_ARGS(c)); - if (first_broken_block == NULL) { - first_broken_block = pp; - } - num_broken_blocks ++; - } - p += pp->_word_size; - } - // After examining all blocks in a chunk, assert if any of those blocks - // was found to be corrupted. - if (first_broken_block != NULL) { - assert(false, "Corrupt block: found at least %d corrupt metaspace block(s) - " - "first corrupted block at " PTR_FORMAT ".", - num_broken_blocks, p2i(first_broken_block)); - } + for (const Fence* f = _first_fence; f != NULL; f = f->next()) { + f->verify(); } } @@ -489,8 +494,8 @@ void MetaspaceArena::print_on_locked(outputStream* st) const { _chunks.count(), _chunks.calc_word_size(), _chunks.calc_committed_word_size()); _chunks.print_on(st); st->cr(); - st->print_cr("growth-policy " PTR_FORMAT ", lock " PTR_FORMAT ", cm " PTR_FORMAT ", fbl " PTR_FORMAT, - p2i(_growth_policy), p2i(_lock), p2i(_chunk_manager), p2i(_fbl)); + st->print_cr("growth-policy " PTR_FORMAT ", alignment %d, lock " PTR_FORMAT ", cm " PTR_FORMAT ", fbl " PTR_FORMAT, + p2i(_growth_policy), _alignment_words * BytesPerWord, p2i(_lock), p2i(_chunk_manager), p2i(_fbl)); } } // namespace metaspace diff --git a/src/hotspot/share/memory/metaspace/metaspaceArena.hpp b/src/hotspot/share/memory/metaspace/metaspaceArena.hpp index 1edbc8997b9..ccfadc8de6b 100644 --- a/src/hotspot/share/memory/metaspace/metaspaceArena.hpp +++ b/src/hotspot/share/memory/metaspace/metaspaceArena.hpp @@ -94,6 +94,9 @@ class MetaspaceArena : public CHeapObj { // List of chunks. Head of the list is the current chunk. MetachunkList _chunks; + // Alignment alignment, in words. + const int _alignment_words; + // Structure to take care of leftover/deallocated space in used chunks. // Owned by the Arena. Gets allocated on demand only. FreeBlocks* _fbl; @@ -107,6 +110,27 @@ class MetaspaceArena : public CHeapObj { // A name for purely debugging/logging purposes. const char* const _name; +#ifdef ASSERT + // Allocation guards: When active, arena allocations are interleaved with + // fence allocations. An overwritten fence indicates a buffer overrun in either + // the preceding or the following user block. All fences are linked together; + // validating the fences just means walking that linked list. + // Note that for the Arena, fence blocks are just another form of user blocks. + class Fence { + static const uintx EyeCatcher = + NOT_LP64(0x77698465) LP64_ONLY(0x7769846577698465ULL); // "META" resp "METAMETA" + // Two eyecatchers to easily spot a corrupted _next pointer + const uintx _eye1; + const Fence* const _next; + const uintx _eye2; + public: + Fence(const Fence* next) : _eye1(EyeCatcher), _next(next), _eye2(EyeCatcher) {} + const Fence* next() const { return _next; } + void verify() const; + }; + const Fence* _first_fence; +#endif // ASSERT + Mutex* lock() const { return _lock; } ChunkManager* chunk_manager() const { return _chunk_manager; } @@ -138,9 +162,12 @@ class MetaspaceArena : public CHeapObj { // from this arena. DEBUG_ONLY(bool is_valid_area(MetaWord* p, size_t word_size) const;) + // Allocate from the arena proper, once dictionary allocations and fencing are sorted out. + MetaWord* allocate_inner(size_t word_size); + public: - MetaspaceArena(ChunkManager* chunk_manager, const ArenaGrowthPolicy* growth_policy, + MetaspaceArena(ChunkManager* chunk_manager, const ArenaGrowthPolicy* growth_policy, int alignment_words, Mutex* lock, SizeAtomicCounter* total_used_words_counter, const char* name); diff --git a/src/hotspot/share/memory/metaspace/metaspaceCommon.cpp b/src/hotspot/share/memory/metaspace/metaspaceCommon.cpp index 61d364b7e33..855c9d433ac 100644 --- a/src/hotspot/share/memory/metaspace/metaspaceCommon.cpp +++ b/src/hotspot/share/memory/metaspace/metaspaceCommon.cpp @@ -24,8 +24,8 @@ */ #include "precompiled.hpp" -#include "memory/metaspace/allocationGuard.hpp" #include "memory/metaspace/freeBlocks.hpp" +#include "memory/metaspace/metaspaceAlignment.hpp" #include "memory/metaspace/metaspaceCommon.hpp" #include "memory/metaspace/metaspaceSettings.hpp" #include "memory/metaspace/virtualSpaceNode.hpp" @@ -169,29 +169,5 @@ void print_number_of_classes(outputStream* out, uintx classes, uintx classes_sha } } -// Given a net allocation word size, return the raw word size we actually allocate. -// Note: externally visible for gtests. -//static -size_t get_raw_word_size_for_requested_word_size(size_t word_size) { - size_t byte_size = word_size * BytesPerWord; - - // Deallocated metablocks are kept in a binlist which limits their minimal - // size to at least the size of a binlist item (2 words). - byte_size = MAX2(byte_size, FreeBlocks::MinWordSize * BytesPerWord); - - // Metaspace allocations are aligned to word size. - byte_size = align_up(byte_size, AllocationAlignmentByteSize); - - // If we guard allocations, we need additional space for a prefix. -#ifdef ASSERT - if (Settings::use_allocation_guard()) { - byte_size += align_up(prefix_size(), AllocationAlignmentByteSize); - } -#endif - size_t raw_word_size = byte_size / BytesPerWord; - assert(raw_word_size * BytesPerWord == byte_size, "Sanity"); - return raw_word_size; -} - } // namespace metaspace diff --git a/src/hotspot/share/memory/metaspace/metaspaceCommon.hpp b/src/hotspot/share/memory/metaspace/metaspaceCommon.hpp index f8e453ca82f..b157dc01ab3 100644 --- a/src/hotspot/share/memory/metaspace/metaspaceCommon.hpp +++ b/src/hotspot/share/memory/metaspace/metaspaceCommon.hpp @@ -35,27 +35,6 @@ class outputStream; namespace metaspace { -// Metaspace allocation alignment: - -// 1) Metaspace allocations have to be aligned such that 64bit values are aligned -// correctly. -// -// 2) Klass* structures allocated from Metaspace have to be aligned to KlassAlignmentInBytes. -// -// At the moment LogKlassAlignmentInBytes is 3, so KlassAlignmentInBytes == 8, -// so (1) and (2) can both be fulfilled with an alignment of 8. Should we increase -// KlassAlignmentInBytes at any time this will increase the necessary alignment as well. In -// that case we may think about introducing a separate alignment just for the class space -// since that alignment would only be needed for Klass structures. - -static const size_t AllocationAlignmentByteSize = 8; -STATIC_ASSERT(AllocationAlignmentByteSize == (size_t)KlassAlignmentInBytes); - -static const size_t AllocationAlignmentWordSize = AllocationAlignmentByteSize / BytesPerWord; - -// Returns the raw word size allocated for a given net allocation -size_t get_raw_word_size_for_requested_word_size(size_t word_size); - // Utility functions // Print a size, in words, scaled. diff --git a/src/hotspot/share/memory/metaspace/metaspaceReporter.cpp b/src/hotspot/share/memory/metaspace/metaspaceReporter.cpp index d856883aaec..c51c06fe199 100644 --- a/src/hotspot/share/memory/metaspace/metaspaceReporter.cpp +++ b/src/hotspot/share/memory/metaspace/metaspaceReporter.cpp @@ -38,6 +38,7 @@ #include "memory/metaspace/runningCounters.hpp" #include "memory/metaspace/virtualSpaceList.hpp" #include "memory/metaspaceUtils.hpp" +#include "oops/compressedOops.hpp" #include "runtime/os.hpp" namespace metaspace { @@ -105,10 +106,15 @@ static void print_settings(outputStream* out, size_t scale) { if (Metaspace::using_class_space()) { out->print("CompressedClassSpaceSize: "); print_human_readable_size(out, CompressedClassSpaceSize, scale); + out->cr(); + out->print_cr("KlassAlignmentInBytes: %d", KlassAlignmentInBytes); + out->print("KlassEncodingMetaspaceMax: "); + print_human_readable_size(out, KlassEncodingMetaspaceMax, scale); + out->cr(); + CompressedKlassPointers::print_mode(out); } else { - out->print("No class space"); + out->print_cr("No class space"); } - out->cr(); out->print("Initial GC threshold: "); print_human_readable_size(out, MetaspaceSize, scale); out->cr(); diff --git a/src/hotspot/share/memory/metaspace/metaspaceSettings.cpp b/src/hotspot/share/memory/metaspace/metaspaceSettings.cpp index ffc8630e9be..c8b80b78ac0 100644 --- a/src/hotspot/share/memory/metaspace/metaspaceSettings.cpp +++ b/src/hotspot/share/memory/metaspace/metaspaceSettings.cpp @@ -84,11 +84,6 @@ void Settings::ergo_initialize() { // Deallocations can be manually switched off to aid error analysis, since this removes one layer of complexity // from allocation. _handle_deallocations = MetaspaceHandleDeallocations; - - // We also switch it off automatically if we use allocation guards. This is to keep prefix handling in MetaspaceArena simple. - if (_use_allocation_guard) { - _handle_deallocations = false; - } #endif LogStream ls(Log(metaspace)::info()); Settings::print_on(&ls); diff --git a/src/hotspot/share/memory/metaspace/testHelpers.cpp b/src/hotspot/share/memory/metaspace/testHelpers.cpp index 73554807b7d..18c3c1cbd8b 100644 --- a/src/hotspot/share/memory/metaspace/testHelpers.cpp +++ b/src/hotspot/share/memory/metaspace/testHelpers.cpp @@ -24,6 +24,7 @@ */ #include "precompiled.hpp" +#include "memory/metaspace/metaspaceAlignment.hpp" #include "memory/metaspace/metaspaceArena.hpp" #include "memory/metaspace/metaspaceArenaGrowthPolicy.hpp" #include "memory/metaspace/metaspaceContext.hpp" @@ -96,7 +97,8 @@ MetaspaceTestArena* MetaspaceTestContext::create_arena(Metaspace::MetaspaceType MetaspaceArena* arena = NULL; { MutexLocker ml(lock, Mutex::_no_safepoint_check_flag); - arena = new MetaspaceArena(_context->cm(), growth_policy, lock, &_used_words_counter, _name); + arena = new MetaspaceArena(_context->cm(), growth_policy, MetaspaceMinAlignmentWords, + lock, &_used_words_counter, _name); } return new MetaspaceTestArena(lock, arena); } diff --git a/src/hotspot/share/oops/compressedKlass.cpp b/src/hotspot/share/oops/compressedKlass.cpp new file mode 100644 index 00000000000..c1fee8f53bb --- /dev/null +++ b/src/hotspot/share/oops/compressedKlass.cpp @@ -0,0 +1,111 @@ +/* + * Copyright (c) 2021 SAP SE. All rights reserved. + * Copyright (c) 2021, Oracle and/or its affiliates. All rights reserved. + * + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#include "precompiled.hpp" +#include "oops/compressedKlass.hpp" +#include "utilities/ostream.hpp" +#include "utilities/debug.hpp" +#include "runtime/globals.hpp" + +address CompressedKlassPointers::_base = NULL; +int CompressedKlassPointers::_shift_copy = 0; + +// Given an address range [addr, addr+len) which the encoding is supposed to +// cover, choose base, shift and range. +// The address range is the expected range of uncompressed Klass pointers we +// will encounter (and the implicit promise that there will be no Klass +// structures outside this range). +void CompressedKlassPointers::initialize(address addr, size_t len) { +#ifdef _LP64 + assert(UseCompressedClassPointers, "Sanity"); + + // For now we assume that the starting address of the range is to be used as + // encoding base. Note that this could be different, the base can be whatever + // as long as the range is covered. Need to revisit this for the various platforms + // once lilliput solidifies. + assert(is_valid_base(addr), "Address must be a valid encoding base"); + + assert(len <= (size_t)KlassEncodingMetaspaceMax, "Range too large."); + + if (UseSharedSpaces || DumpSharedSpaces) { + + // Special requirements if CDS is active: + // Encoding base and shift must be the same between dump and run time. + // CDS takes care that the SharedBaseAddress and CompressedClassSpaceSize + // are the same. Archive size will be probably different at runtime, but + // it can only be smaller than at, never larger, since archives get + // shrunk at the end of the dump process. + // From that it follows that the range [addr, len) we are handed in at + // runtime will start at the same address then at dumptime, and its len + // may be smaller at runtime then it was at dump time. + // + // To be very careful here, we avoid any optimizations and just keep using + // the same address and shift value. Specifically we avoid using zero-based + // encoding. We also set the expected value range to 4G (encoding range + // cannot be larger than that). + + _base = addr; + + } else { + + // (Note that this case is almost not worth optimizing for. CDS is typically on.) + + if ((addr + len) <= (address)KlassEncodingMetaspaceMax) { + _base = 0; + } else { + _base = addr; + } + } + + // For SA + _shift_copy = KlassAlignmentInBytes; + +#else + fatal("64bit only."); +#endif +} + +// Given an address p, return true if p can be used as an encoding base. +// (Some platforms have restrictions of what constitutes a valid base address). +bool CompressedKlassPointers::is_valid_base(address p) { +#ifdef AARCH64 + // Below 32G, base must be aligned to 4G. + // Above that point, base must be aligned to 32G + if (p < (address)(32 * G)) { + return is_aligned(p, 4 * G); + } + return is_aligned(p, (4 << LogKlassAlignmentInBytes) * G); +#else + return true; +#endif +} + +void CompressedKlassPointers::print_mode(outputStream* st) { + st->print_cr("Narrow klass base: " PTR_FORMAT ", Narrow klass shift: %d, " + "Narrow klass range: " SIZE_FORMAT_HEX, p2i(base()), shift(), + KlassEncodingMetaspaceMax); +} + diff --git a/src/hotspot/share/oops/compressedKlass.hpp b/src/hotspot/share/oops/compressedKlass.hpp new file mode 100644 index 00000000000..12812b7a628 --- /dev/null +++ b/src/hotspot/share/oops/compressedKlass.hpp @@ -0,0 +1,115 @@ +/* + * Copyright (c) 2021 SAP SE. All rights reserved. + * Copyright (c) 2021, Oracle and/or its affiliates. All rights reserved. + * + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#ifndef SHARE_OOPS_COMPRESSEDKLASS_HPP +#define SHARE_OOPS_COMPRESSEDKLASS_HPP + +#include "memory/allStatic.hpp" +#include "utilities/globalDefinitions.hpp" +#include "utilities/debug.hpp" + +class outputStream; +class Klass; + + +/* + * + * < Thoughtful comment here > + * + */ + +// Narrow Klass pointer constants; +const int LogKlassAlignmentInBytes = 9; // 512 byte alignment +const int KlassAlignmentInBytes = 1 << LogKlassAlignmentInBytes; + +// Max. allowed size of compressed class pointer, in bits +const int MaxNarrowKlassPointerBits = 22; + +const uint64_t NarrowKlassPointerBitMask = ((((uint64_t)1) << MaxNarrowKlassPointerBits) - 1); + +// Maximal size of compressed class pointer encoding range (2G with 22bit class ptr and 9 bit alignment). +const uint64_t KlassEncodingMetaspaceMax = UCONST64(1) << (MaxNarrowKlassPointerBits + LogKlassAlignmentInBytes); + +// If compressed klass pointers then use narrowKlass. +typedef uint32_t narrowKlass; + +class CompressedKlassPointers : public AllStatic { + friend class VMStructs; + friend class ArchiveBuilder; + + static address _base; + + // Shift is actually a constant; we keep this just for the SA (see vmStructs.cpp) + static int _shift_copy; + + // The decode/encode versions taking an explicit base are for the sole use of CDS + // (see ArchiveBuilder). + static inline Klass* decode_raw(narrowKlass v, address base); + static inline Klass* decode_not_null(narrowKlass v, address base); + static inline narrowKlass encode_not_null(Klass* v, address base); + DEBUG_ONLY(static inline void verify_klass_pointer(const Klass* v, address base)); + +public: + + // Given an address p, return true if p can be used as an encoding base. + // (Some platforms have restrictions of what constitutes a valid base + // address). + static bool is_valid_base(address p); + + // Given an address range [addr, addr+len) which the encoding is supposed to + // cover, choose base, shift and range. + // The address range is the expected range of uncompressed Klass pointers we + // will encounter (and the implicit promise that there will be no Klass + // structures outside this range). + static void initialize(address addr, size_t len); + + static void print_mode(outputStream* st); + + // The encoding base. Note: this is not necessarily the base address of the + // class space nor the base address of the CDS archive. + static address base() { return _base; } + + // End of the encoding range. + static address end() { return base() + KlassEncodingMetaspaceMax; } + + // Shift == LogKlassAlignmentInBytes (TODO: unify) + static int shift() { return LogKlassAlignmentInBytes; } + + static bool is_null(Klass* v) { return v == NULL; } + static bool is_null(narrowKlass v) { return v == 0; } + + static inline Klass* decode_raw(narrowKlass v); + static inline Klass* decode_not_null(narrowKlass v); + static inline Klass* decode(narrowKlass v); + static inline narrowKlass encode_not_null(Klass* v); + static inline narrowKlass encode(Klass* v); + + DEBUG_ONLY(static inline void verify_klass_pointer(const Klass* v)); + DEBUG_ONLY(static inline void verify_narrow_klass_pointer(narrowKlass v);) + +}; + +#endif // SHARE_OOPS_COMPRESSEDOOPS_HPP diff --git a/src/hotspot/share/oops/compressedKlass.inline.hpp b/src/hotspot/share/oops/compressedKlass.inline.hpp new file mode 100644 index 00000000000..9e1d1c183c6 --- /dev/null +++ b/src/hotspot/share/oops/compressedKlass.inline.hpp @@ -0,0 +1,97 @@ +/* + * Copyright (c) 2021 SAP SE. All rights reserved. + * Copyright (c) 2021, Oracle and/or its affiliates. All rights reserved. + * + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#ifndef SHARE_OOPS_COMPRESSEDKLASS_INLINE_HPP +#define SHARE_OOPS_COMPRESSEDKLASS_INLINE_HPP + +#include "oops/compressedKlass.hpp" +#include "memory/allStatic.hpp" +#include "utilities/align.hpp" +#include "utilities/globalDefinitions.hpp" + + +inline Klass* CompressedKlassPointers::decode_raw(narrowKlass v) { + return decode_raw(v, base()); +} + +inline Klass* CompressedKlassPointers::decode_raw(narrowKlass v, address narrow_base) { + return (Klass*)(void*)((uintptr_t)narrow_base +((uintptr_t)v << shift())); +} + +inline Klass* CompressedKlassPointers::decode_not_null(narrowKlass v) { + return decode_not_null(v, base()); +} + +inline Klass* CompressedKlassPointers::decode_not_null(narrowKlass v, address narrow_base) { + assert(!is_null(v), "narrow klass value can never be zero"); + Klass* result = decode_raw(v, narrow_base); + DEBUG_ONLY(verify_klass_pointer(result, narrow_base)); + return result; +} + +inline Klass* CompressedKlassPointers::decode(narrowKlass v) { + return is_null(v) ? (Klass*)NULL : decode_not_null(v); +} + +inline narrowKlass CompressedKlassPointers::encode_not_null(Klass* v) { + return encode_not_null(v, base()); +} + +inline narrowKlass CompressedKlassPointers::encode_not_null(Klass* v, address narrow_base) { + DEBUG_ONLY(verify_klass_pointer(v, narrow_base)); + uint64_t v2 = (uint64_t)(pointer_delta((void*)v, narrow_base, 1)); + v2 >>= shift(); + assert(v2 <= UINT_MAX, "narrow klass pointer overflow"); + narrowKlass result = (narrowKlass)v2; + DEBUG_ONLY(verify_narrow_klass_pointer(result)); + assert(decode_not_null(result, narrow_base) == v, "reversibility"); + return result; +} + +inline narrowKlass CompressedKlassPointers::encode(Klass* v) { + return is_null(v) ? (narrowKlass)0 : encode_not_null(v); +} + +#ifdef ASSERT +inline void CompressedKlassPointers::verify_klass_pointer(const Klass* v, address narrow_base) { + assert(is_aligned(v, KlassAlignmentInBytes), "misaligned Klass* pointer (" PTR_FORMAT ")", p2i(v)); + address end = narrow_base + KlassEncodingMetaspaceMax; + assert((address)v >= narrow_base && (address)v < end, + "Klass (" PTR_FORMAT ") located outside encoding range [" PTR_FORMAT ", " PTR_FORMAT ")", + p2i(v), p2i(narrow_base), p2i(end)); +} + +inline void CompressedKlassPointers::verify_klass_pointer(const Klass* v) { + verify_klass_pointer(v, base()); +} + +inline void CompressedKlassPointers::verify_narrow_klass_pointer(narrowKlass v) { + // Make sure we only use the lower n bits + assert((((uint64_t)v) & ~NarrowKlassPointerBitMask) == 0, "%x: not a valid narrow klass pointer", v); +} +#endif + +#endif // SHARE_OOPS_COMPRESSEDOOPS_HPP diff --git a/src/hotspot/share/oops/compressedOops.cpp b/src/hotspot/share/oops/compressedOops.cpp index bb050673e11..32c4295b306 100644 --- a/src/hotspot/share/oops/compressedOops.cpp +++ b/src/hotspot/share/oops/compressedOops.cpp @@ -177,126 +177,3 @@ void CompressedOops::print_mode(outputStream* st) { } st->cr(); } - -// For UseCompressedClassPointers. -NarrowPtrStruct CompressedKlassPointers::_narrow_klass = { NULL, 0, true }; - -// CompressedClassSpaceSize set to 1GB, but appear 3GB away from _narrow_ptrs_base during CDS dump. -// (Todo: we should #ifdef out CompressedKlassPointers for 32bit completely and fix all call sites which -// are compiled for 32bit to LP64_ONLY). -size_t CompressedKlassPointers::_range = 0; - - -// Given an address range [addr, addr+len) which the encoding is supposed to -// cover, choose base, shift and range. -// The address range is the expected range of uncompressed Klass pointers we -// will encounter (and the implicit promise that there will be no Klass -// structures outside this range). -void CompressedKlassPointers::initialize(address addr, size_t len) { -#ifdef _LP64 - assert(is_valid_base(addr), "Address must be a valid encoding base"); - address const end = addr + len; - - address base; - int shift; - size_t range; - - if (UseSharedSpaces || DumpSharedSpaces) { - - // Special requirements if CDS is active: - // Encoding base and shift must be the same between dump and run time. - // CDS takes care that the SharedBaseAddress and CompressedClassSpaceSize - // are the same. Archive size will be probably different at runtime, but - // it can only be smaller than at, never larger, since archives get - // shrunk at the end of the dump process. - // From that it follows that the range [addr, len) we are handed in at - // runtime will start at the same address then at dumptime, and its len - // may be smaller at runtime then it was at dump time. - // - // To be very careful here, we avoid any optimizations and just keep using - // the same address and shift value. Specifically we avoid using zero-based - // encoding. We also set the expected value range to 4G (encoding range - // cannot be larger than that). - - base = addr; - - // JDK-8265705 - // This is a temporary fix for aarch64: there, if the range-to-be-encoded is located - // below 32g, either encoding base should be zero or base should be aligned to 4G - // and shift should be zero. The simplest way to fix this for now is to force - // shift to zero for both runtime and dumptime. - // Note however that this is not a perfect solution. Ideally this whole function - // should be CDS agnostic, that would simplify it - and testing - alot. See JDK-8267141 - // for details. - shift = 0; - - // This must be true since at dumptime cds+ccs is 4G, at runtime it can - // only be smaller, see comment above. - assert(len <= 4 * G, "Encoding range cannot be larger than 4G"); - range = 4 * G; - - } else { - - // Otherwise we attempt to use a zero base if the range fits in lower 32G. - if (end <= (address)KlassEncodingMetaspaceMax) { - base = 0; - } else { - base = addr; - } - - // Highest offset a Klass* can ever have in relation to base. - range = end - base; - - // We may not even need a shift if the range fits into 32bit: - const uint64_t UnscaledClassSpaceMax = (uint64_t(max_juint) + 1); - if (range < UnscaledClassSpaceMax) { - shift = 0; - } else { - shift = LogKlassAlignmentInBytes; - } - - } - - set_base(base); - set_shift(shift); - set_range(range); -#else - fatal("64bit only."); -#endif -} - -// Given an address p, return true if p can be used as an encoding base. -// (Some platforms have restrictions of what constitutes a valid base address). -bool CompressedKlassPointers::is_valid_base(address p) { -#ifdef AARCH64 - // Below 32G, base must be aligned to 4G. - // Above that point, base must be aligned to 32G - if (p < (address)(32 * G)) { - return is_aligned(p, 4 * G); - } - return is_aligned(p, (4 << LogKlassAlignmentInBytes) * G); -#else - return true; -#endif -} - -void CompressedKlassPointers::print_mode(outputStream* st) { - st->print_cr("Narrow klass base: " PTR_FORMAT ", Narrow klass shift: %d, " - "Narrow klass range: " SIZE_FORMAT_HEX, p2i(base()), shift(), - range()); -} - -void CompressedKlassPointers::set_base(address base) { - assert(UseCompressedClassPointers, "no compressed klass ptrs?"); - _narrow_klass._base = base; -} - -void CompressedKlassPointers::set_shift(int shift) { - assert(shift == 0 || shift == LogKlassAlignmentInBytes, "invalid shift for klass ptrs"); - _narrow_klass._shift = shift; -} - -void CompressedKlassPointers::set_range(size_t range) { - assert(UseCompressedClassPointers, "no compressed klass ptrs?"); - _range = range; -} diff --git a/src/hotspot/share/oops/compressedOops.hpp b/src/hotspot/share/oops/compressedOops.hpp index 83f18138b5a..25d0633bf4e 100644 --- a/src/hotspot/share/oops/compressedOops.hpp +++ b/src/hotspot/share/oops/compressedOops.hpp @@ -139,57 +139,4 @@ public: static inline narrowOop narrow_oop_cast(T i); }; -// For UseCompressedClassPointers. -class CompressedKlassPointers : public AllStatic { - friend class VMStructs; - - static NarrowPtrStruct _narrow_klass; - - // Together with base, this defines the address range within which Klass - // structures will be located: [base, base+range). While the maximal - // possible encoding range is 4|32G for shift 0|3, if we know beforehand - // the expected range of Klass* pointers will be smaller, a platform - // could use this info to optimize encoding. - static size_t _range; - - static void set_base(address base); - static void set_range(size_t range); - -public: - - static void set_shift(int shift); - - - // Given an address p, return true if p can be used as an encoding base. - // (Some platforms have restrictions of what constitutes a valid base - // address). - static bool is_valid_base(address p); - - // Given an address range [addr, addr+len) which the encoding is supposed to - // cover, choose base, shift and range. - // The address range is the expected range of uncompressed Klass pointers we - // will encounter (and the implicit promise that there will be no Klass - // structures outside this range). - static void initialize(address addr, size_t len); - - static void print_mode(outputStream* st); - - static address base() { return _narrow_klass._base; } - static size_t range() { return _range; } - static int shift() { return _narrow_klass._shift; } - - static bool is_null(Klass* v) { return v == NULL; } - static bool is_null(narrowKlass v) { return v == 0; } - - static inline Klass* decode_raw(narrowKlass v, address base); - static inline Klass* decode_raw(narrowKlass v); - static inline Klass* decode_not_null(narrowKlass v); - static inline Klass* decode_not_null(narrowKlass v, address base); - static inline Klass* decode(narrowKlass v); - static inline narrowKlass encode_not_null(Klass* v); - static inline narrowKlass encode_not_null(Klass* v, address base); - static inline narrowKlass encode(Klass* v); - -}; - #endif // SHARE_OOPS_COMPRESSEDOOPS_HPP diff --git a/src/hotspot/share/oops/compressedOops.inline.hpp b/src/hotspot/share/oops/compressedOops.inline.hpp index 316523cc98e..fb023e0dedb 100644 --- a/src/hotspot/share/oops/compressedOops.inline.hpp +++ b/src/hotspot/share/oops/compressedOops.inline.hpp @@ -113,50 +113,4 @@ inline narrowOop CompressedOops::narrow_oop_cast(T i) { return static_cast(narrow_value); } -static inline bool check_alignment(Klass* v) { - return (intptr_t)v % KlassAlignmentInBytes == 0; -} - -inline Klass* CompressedKlassPointers::decode_raw(narrowKlass v) { - return decode_raw(v, base()); -} - -inline Klass* CompressedKlassPointers::decode_raw(narrowKlass v, address narrow_base) { - return (Klass*)(void*)((uintptr_t)narrow_base +((uintptr_t)v << shift())); -} - -inline Klass* CompressedKlassPointers::decode_not_null(narrowKlass v) { - return decode_not_null(v, base()); -} - -inline Klass* CompressedKlassPointers::decode_not_null(narrowKlass v, address narrow_base) { - assert(!is_null(v), "narrow klass value can never be zero"); - Klass* result = decode_raw(v, narrow_base); - assert(check_alignment(result), "address not aligned: " INTPTR_FORMAT, p2i((void*) result)); - return result; -} - -inline Klass* CompressedKlassPointers::decode(narrowKlass v) { - return is_null(v) ? (Klass*)NULL : decode_not_null(v); -} - -inline narrowKlass CompressedKlassPointers::encode_not_null(Klass* v) { - return encode_not_null(v, base()); -} - -inline narrowKlass CompressedKlassPointers::encode_not_null(Klass* v, address narrow_base) { - assert(!is_null(v), "klass value can never be zero"); - assert(check_alignment(v), "Address not aligned"); - uint64_t pd = (uint64_t)(pointer_delta((void*)v, narrow_base, 1)); - assert(KlassEncodingMetaspaceMax > pd, "change encoding max if new encoding"); - uint64_t result = pd >> shift(); - assert((result & CONST64(0xffffffff00000000)) == 0, "narrow klass pointer overflow"); - assert(decode_not_null(result, narrow_base) == v, "reversibility"); - return (narrowKlass)result; -} - -inline narrowKlass CompressedKlassPointers::encode(Klass* v) { - return is_null(v) ? (narrowKlass)0 : encode_not_null(v); -} - #endif // SHARE_OOPS_COMPRESSEDOOPS_INLINE_HPP diff --git a/src/hotspot/share/oops/klass.cpp b/src/hotspot/share/oops/klass.cpp index 59d85bf017f..a46511a9bfa 100644 --- a/src/hotspot/share/oops/klass.cpp +++ b/src/hotspot/share/oops/klass.cpp @@ -48,7 +48,9 @@ #include "prims/jvmtiExport.hpp" #include "runtime/arguments.hpp" #include "runtime/atomic.hpp" +#include "runtime/globals.hpp" #include "runtime/handles.inline.hpp" +#include "utilities/align.hpp" #include "utilities/macros.hpp" #include "utilities/powerOfTwo.hpp" #include "utilities/stack.inline.hpp" @@ -194,7 +196,9 @@ Method* Klass::uncached_lookup_method(const Symbol* name, const Symbol* signatur } void* Klass::operator new(size_t size, ClassLoaderData* loader_data, size_t word_size, TRAPS) throw() { - return Metaspace::allocate(loader_data, word_size, MetaspaceObj::ClassType, THREAD); + MetaWord* p = Metaspace::allocate(loader_data, word_size, MetaspaceObj::ClassType, THREAD); + assert(is_aligned(p, KlassAlignmentInBytes), "metaspace returned badly aligned memory."); + return p; } // "Normal" instantiation is preceeded by a MetaspaceObj allocation @@ -770,6 +774,10 @@ void Klass::verify_on(outputStream* st) { // in the CLD graph but not in production. assert(Metaspace::contains((address)this), "Should be"); + if (UseCompressedClassPointers) { + assert(is_aligned(this, KlassAlignmentInBytes), "misaligned Klass structure"); + } + guarantee(this->is_klass(),"should be klass"); if (super() != NULL) { diff --git a/src/hotspot/share/oops/markWord.hpp b/src/hotspot/share/oops/markWord.hpp index d79d2b5ab60..4b66387d934 100644 --- a/src/hotspot/share/oops/markWord.hpp +++ b/src/hotspot/share/oops/markWord.hpp @@ -27,7 +27,8 @@ #include "metaprogramming/integralConstant.hpp" #include "metaprogramming/primitiveConversions.hpp" -#include "oops/oopsHierarchy.hpp" +#include "oops/compressedKlass.hpp" +//#include "oops/oopsHierarchy.hpp" #include "runtime/globals.hpp" // The markWord describes the header of an object. diff --git a/src/hotspot/share/oops/markWord.inline.hpp b/src/hotspot/share/oops/markWord.inline.hpp index af185f6d05a..c45289c0fad 100644 --- a/src/hotspot/share/oops/markWord.inline.hpp +++ b/src/hotspot/share/oops/markWord.inline.hpp @@ -25,6 +25,7 @@ #ifndef SHARE_OOPS_MARKWORD_INLINE_HPP #define SHARE_OOPS_MARKWORD_INLINE_HPP +#include "oops/compressedKlass.inline.hpp" #include "oops/compressedOops.inline.hpp" #include "oops/markWord.hpp" #include "runtime/safepoint.hpp" diff --git a/src/hotspot/share/oops/oop.cpp b/src/hotspot/share/oops/oop.cpp index 88601f1aa2b..852852b5736 100644 --- a/src/hotspot/share/oops/oop.cpp +++ b/src/hotspot/share/oops/oop.cpp @@ -30,6 +30,7 @@ #include "memory/resourceArea.hpp" #include "memory/universe.hpp" #include "oops/access.inline.hpp" +#include "oops/compressedKlass.inline.hpp" #include "oops/compressedOops.inline.hpp" #include "oops/oop.inline.hpp" #include "oops/verifyOopClosure.hpp" diff --git a/src/hotspot/share/oops/oop.hpp b/src/hotspot/share/oops/oop.hpp index 84a66c8b1c9..5f170388db1 100644 --- a/src/hotspot/share/oops/oop.hpp +++ b/src/hotspot/share/oops/oop.hpp @@ -28,6 +28,7 @@ #include "memory/iterator.hpp" #include "memory/memRegion.hpp" #include "oops/accessDecorators.hpp" +#include "oops/compressedKlass.hpp" #include "oops/markWord.hpp" #include "oops/metadata.hpp" #include "runtime/atomic.hpp" diff --git a/src/hotspot/share/oops/oop.inline.hpp b/src/hotspot/share/oops/oop.inline.hpp index 8a1f763c158..f7153855a6c 100644 --- a/src/hotspot/share/oops/oop.inline.hpp +++ b/src/hotspot/share/oops/oop.inline.hpp @@ -31,6 +31,7 @@ #include "oops/access.inline.hpp" #include "oops/arrayKlass.hpp" #include "oops/arrayOop.hpp" +#include "oops/compressedKlass.inline.hpp" #include "oops/compressedOops.inline.hpp" #include "oops/markWord.inline.hpp" #include "oops/oopsHierarchy.hpp" diff --git a/src/hotspot/share/oops/oopsHierarchy.hpp b/src/hotspot/share/oops/oopsHierarchy.hpp index a7f8d5ba653..b16035d226d 100644 --- a/src/hotspot/share/oops/oopsHierarchy.hpp +++ b/src/hotspot/share/oops/oopsHierarchy.hpp @@ -36,9 +36,6 @@ // Global offset instead of address for an oop within a java object. enum class narrowOop : uint32_t { null = 0 }; -// If compressed klass pointers then use narrowKlass. -typedef juint narrowKlass; - typedef void* OopOrNarrowOopStar; #ifndef CHECK_UNHANDLED_OOPS diff --git a/src/hotspot/share/runtime/arguments.cpp b/src/hotspot/share/runtime/arguments.cpp index 66be3d34fbe..2c228bd6db2 100644 --- a/src/hotspot/share/runtime/arguments.cpp +++ b/src/hotspot/share/runtime/arguments.cpp @@ -1584,6 +1584,8 @@ void Arguments::set_use_compressed_klass_ptrs() { } // Check the CompressedClassSpaceSize to make sure we use compressed klass ptrs. if (UseCompressedClassPointers) { + // TODO stuefe: Only correct for CDS=off: with CDS, one needs to take ccs into account. Also, + // does CompressedKlassPointers::initialize not check this? if (CompressedClassSpaceSize > KlassEncodingMetaspaceMax) { warning("CompressedClassSpaceSize is too large for UseCompressedClassPointers"); FLAG_SET_DEFAULT(UseCompressedClassPointers, false); diff --git a/src/hotspot/share/runtime/globals.hpp b/src/hotspot/share/runtime/globals.hpp index 255f7b4d654..ccb7665d808 100644 --- a/src/hotspot/share/runtime/globals.hpp +++ b/src/hotspot/share/runtime/globals.hpp @@ -1425,7 +1425,7 @@ const intx ObjectAlignmentInBytes = 8; "class pointers are used") \ range(1*M, 3*G) \ \ - develop(size_t, CompressedClassSpaceBaseAddress, 0, \ + product(size_t, CompressedClassSpaceBaseAddress, 0, DIAGNOSTIC, \ "Force the class space to be allocated at this address or " \ "fails VM initialization (requires -Xshare=off.") \ \ diff --git a/src/hotspot/share/runtime/os.cpp b/src/hotspot/share/runtime/os.cpp index 243814dfbc8..4e33f2a02aa 100644 --- a/src/hotspot/share/runtime/os.cpp +++ b/src/hotspot/share/runtime/os.cpp @@ -41,6 +41,7 @@ #include "memory/guardedMemory.hpp" #include "memory/resourceArea.hpp" #include "memory/universe.hpp" +#include "oops/compressedKlass.hpp" #include "oops/compressedOops.inline.hpp" #include "oops/oop.inline.hpp" #include "prims/jvm_misc.hpp" diff --git a/src/hotspot/share/runtime/vmStructs.cpp b/src/hotspot/share/runtime/vmStructs.cpp index 28d34525f13..28e275d881f 100644 --- a/src/hotspot/share/runtime/vmStructs.cpp +++ b/src/hotspot/share/runtime/vmStructs.cpp @@ -379,8 +379,8 @@ /* CompressedKlassPointers */ \ /***************************/ \ \ - static_field(CompressedKlassPointers, _narrow_klass._base, address) \ - static_field(CompressedKlassPointers, _narrow_klass._shift, int) \ + static_field(CompressedKlassPointers, _base, address) \ + static_field(CompressedKlassPointers, _shift_copy, int) \ \ /******/ \ /* os */ \ diff --git a/src/hotspot/share/utilities/globalDefinitions.hpp b/src/hotspot/share/utilities/globalDefinitions.hpp index 78a8ffa11bf..152ee1e73f4 100644 --- a/src/hotspot/share/utilities/globalDefinitions.hpp +++ b/src/hotspot/share/utilities/globalDefinitions.hpp @@ -521,11 +521,6 @@ extern int MinObjAlignmentInBytesMask; extern int LogMinObjAlignment; extern int LogMinObjAlignmentInBytes; -const int LogKlassAlignmentInBytes = 3; -const int LogKlassAlignment = LogKlassAlignmentInBytes - LogHeapWordSize; -const int KlassAlignmentInBytes = 1 << LogKlassAlignmentInBytes; -const int KlassAlignment = KlassAlignmentInBytes / HeapWordSize; - // Maximal size of heap where unscaled compression can be used. Also upper bound // for heap placement: 4GB. const uint64_t UnscaledOopHeapMax = (uint64_t(max_juint) + 1); @@ -533,11 +528,6 @@ const uint64_t UnscaledOopHeapMax = (uint64_t(max_juint) + 1); // placement for zero based compression algorithm: UnscaledOopHeapMax << LogMinObjAlignmentInBytes. extern uint64_t OopEncodingHeapMax; -// Maximal size of compressed class space. Above this limit compression is not possible. -// Also upper bound for placement of zero based class space. (Class space is further limited -// to be < 3G, see arguments.cpp.) -const uint64_t KlassEncodingMetaspaceMax = (uint64_t(max_juint) + 1) << LogKlassAlignmentInBytes; - // Machine dependent stuff // The maximum size of the code cache. Can be overridden by targets. diff --git a/src/hotspot/share/utilities/vmError.cpp b/src/hotspot/share/utilities/vmError.cpp index ec14e2095ca..c12633c8114 100644 --- a/src/hotspot/share/utilities/vmError.cpp +++ b/src/hotspot/share/utilities/vmError.cpp @@ -36,6 +36,7 @@ #include "memory/metaspaceUtils.hpp" #include "memory/resourceArea.inline.hpp" #include "memory/universe.hpp" +#include "oops/compressedKlass.hpp" #include "oops/compressedOops.hpp" #include "prims/whitebox.hpp" #include "runtime/arguments.hpp" diff --git a/test/hotspot/gtest/metaspace/test_allocationGuard.cpp b/test/hotspot/gtest/metaspace/test_allocationGuard.cpp index 76eca074002..9b0eb3d69a5 100644 --- a/test/hotspot/gtest/metaspace/test_allocationGuard.cpp +++ b/test/hotspot/gtest/metaspace/test_allocationGuard.cpp @@ -44,13 +44,13 @@ using metaspace::Settings; // Note: We use TEST_VM_ASSERT_MSG. However, an assert is only triggered if allocation // guards are enabled; if guards are disabled for the gtests, this test would fail. // So for that case, we trigger a fake assert. -TEST_VM_ASSERT_MSG(metaspace, test_overwriter, ".*failed: Corrupt block") { +TEST_VM_ASSERT_MSG(metaspace, test_overwriter, ".*Metaspace corruption.*") { if (Settings::use_allocation_guard()) { MetaspaceGtestContext context; MetaspaceTestArena* arena = context.create_arena(Metaspace::StandardMetaspaceType); // We allocate two blocks. We then write over the end of the first block, which - // should corrupt the eyecatcher at the start of the second block. + // should corrupt the fence between the two blocks. // Note: there is of course no guarantee that blocks allocated sequentially are neighbors; // but in this case (clean standard-sized test arena and very small allocations) it can // be safely assumed). @@ -59,10 +59,9 @@ TEST_VM_ASSERT_MSG(metaspace, test_overwriter, ".*failed: Corrupt block") { p1[8] = (MetaWord)0x9345; // Overwriter // Now we delete the arena (as happens during class unloading); this will check all // block canaries and should trigger an assert (see MetaspaceArena::verify_allocation_guards()). - tty->print_cr("Death test, please ignore the following \"Corrupt block\" printout."); delete arena; } else { - assert(false, "Corrupt block fake message to satisfy tests"); + assert(false, "Metaspace corruption - please ignore this, fake message to satisfy tests"); } } diff --git a/test/hotspot/gtest/metaspace/test_metaspace_misc.cpp b/test/hotspot/gtest/metaspace/test_metaspace_misc.cpp index e749c84c88e..dba19bd60ab 100644 --- a/test/hotspot/gtest/metaspace/test_metaspace_misc.cpp +++ b/test/hotspot/gtest/metaspace/test_metaspace_misc.cpp @@ -55,11 +55,12 @@ TEST_VM(metaspace, misc_sizes) { TEST_VM(metaspace, misc_max_alloc_size) { - // Make sure we can allocate what we promise to allocate + // Make sure we can allocate what we promise to allocate... const size_t sz = Metaspace::max_allocation_word_size(); ClassLoaderData* cld = ClassLoaderData::the_null_class_loader_data(); MetaWord* p = cld->metaspace_non_null()->allocate(sz, Metaspace::NonClassType); ASSERT_NOT_NULL(p); + // And also, successfully deallocate it. cld->metaspace_non_null()->deallocate(p, sz, false); } diff --git a/test/hotspot/gtest/metaspace/test_metaspacearena.cpp b/test/hotspot/gtest/metaspace/test_metaspacearena.cpp index 62d0ddfec53..f1512c94e91 100644 --- a/test/hotspot/gtest/metaspace/test_metaspacearena.cpp +++ b/test/hotspot/gtest/metaspace/test_metaspacearena.cpp @@ -27,6 +27,7 @@ #include "memory/metaspace/commitLimiter.hpp" #include "memory/metaspace/counters.hpp" #include "memory/metaspace/internalStats.hpp" +#include "memory/metaspace/metaspaceAlignment.hpp" #include "memory/metaspace/metaspaceArena.hpp" #include "memory/metaspace/metaspaceArenaGrowthPolicy.hpp" #include "memory/metaspace/metaspaceSettings.hpp" @@ -50,11 +51,6 @@ using metaspace::SizeAtomicCounter; using metaspace::Settings; using metaspace::ArenaStats; -// See metaspaceArena.cpp : needed for predicting commit sizes. -namespace metaspace { - extern size_t get_raw_word_size_for_requested_word_size(size_t net_word_size); -} - class MetaspaceArenaTestHelper { MetaspaceGtestContext& _context; @@ -62,16 +58,19 @@ class MetaspaceArenaTestHelper { Mutex* _lock; const ArenaGrowthPolicy* _growth_policy; SizeAtomicCounter _used_words_counter; + int _alignment_words; MetaspaceArena* _arena; - void initialize(const ArenaGrowthPolicy* growth_policy, const char* name = "gtest-MetaspaceArena") { + void initialize(const ArenaGrowthPolicy* growth_policy, int alignment_words, + const char* name = "gtest-MetaspaceArena") { _growth_policy = growth_policy; _lock = new Mutex(Monitor::leaf, "gtest-MetaspaceArenaTest-lock", Monitor::_safepoint_check_never); + _alignment_words = alignment_words; // Lock during space creation, since this is what happens in the VM too // (see ClassLoaderData::metaspace_non_null(), which we mimick here). { MutexLocker ml(_lock, Mutex::_no_safepoint_check_flag); - _arena = new MetaspaceArena(&_context.cm(), _growth_policy, _lock, &_used_words_counter, name); + _arena = new MetaspaceArena(&_context.cm(), _growth_policy, alignment_words, _lock, &_used_words_counter, name); } DEBUG_ONLY(_arena->verify()); @@ -85,7 +84,7 @@ public: const char* name = "gtest-MetaspaceArena") : _context(helper) { - initialize(ArenaGrowthPolicy::policy_for_space_type(space_type, is_class), name); + initialize(ArenaGrowthPolicy::policy_for_space_type(space_type, is_class), metaspace::MetaspaceMinAlignmentWords, name); } // Create a helper; growth policy is directly specified @@ -93,7 +92,7 @@ public: const char* name = "gtest-MetaspaceArena") : _context(helper) { - initialize(growth_policy, name); + initialize(growth_policy, metaspace::MetaspaceMinAlignmentWords, name); } ~MetaspaceArenaTestHelper() { @@ -281,7 +280,7 @@ static void test_chunk_enlargment_simple(Metaspace::MetaspaceType spacetype, boo metaspace::InternalStats::num_chunks_enlarged() == n1) { size_t s = IntRange(32, 128).random_value(); helper.allocate_from_arena_with_tests_expect_success(s); - allocated += metaspace::get_raw_word_size_for_requested_word_size(s); + allocated += metaspace::get_raw_word_size_for_requested_word_size(s, metaspace::MetaspaceMinAlignmentWords); } EXPECT_GT(metaspace::InternalStats::num_chunks_enlarged(), n1); @@ -338,7 +337,7 @@ TEST_VM(metaspace, MetaspaceArena_test_enlarge_in_place_2) { while (allocated <= MAX_CHUNK_WORD_SIZE) { size_t s = IntRange(32, 128).random_value(); helper.allocate_from_arena_with_tests_expect_success(s); - allocated += metaspace::get_raw_word_size_for_requested_word_size(s); + allocated += metaspace::get_raw_word_size_for_requested_word_size(s, metaspace::MetaspaceMinAlignmentWords); if (allocated <= MAX_CHUNK_WORD_SIZE) { // Chunk should have been enlarged in place ASSERT_EQ(1, helper.get_number_of_chunks()); @@ -595,7 +594,7 @@ static void test_controlled_growth(Metaspace::MetaspaceType type, bool is_class, } smhelper.allocate_from_arena_with_tests_expect_success(alloc_words); - words_allocated += metaspace::get_raw_word_size_for_requested_word_size(alloc_words); + words_allocated += metaspace::get_raw_word_size_for_requested_word_size(alloc_words, metaspace::MetaspaceMinAlignmentWords); num_allocated++; size_t used2 = 0, committed2 = 0, capacity2 = 0; diff --git a/test/hotspot/gtest/metaspace/test_metaspacearena_stress.cpp b/test/hotspot/gtest/metaspace/test_metaspacearena_stress.cpp index 17839b44080..386456456dc 100644 --- a/test/hotspot/gtest/metaspace/test_metaspacearena_stress.cpp +++ b/test/hotspot/gtest/metaspace/test_metaspacearena_stress.cpp @@ -26,8 +26,10 @@ #include "precompiled.hpp" #include "memory/metaspace/chunkManager.hpp" #include "memory/metaspace/counters.hpp" +#include "memory/metaspace/metaspaceAlignment.hpp" #include "memory/metaspace/metaspaceArena.hpp" #include "memory/metaspace/metaspaceArenaGrowthPolicy.hpp" +#include "memory/metaspace/metaspaceSettings.hpp" #include "memory/metaspace/metaspaceStatistics.hpp" #include "runtime/mutexLocker.hpp" #include "utilities/debug.hpp" @@ -51,20 +53,15 @@ static bool fifty_fifty() { return IntRange(100).random_value() < 50; } -// See metaspaceArena.cpp : needed for predicting commit sizes. -namespace metaspace { - extern size_t get_raw_word_size_for_requested_word_size(size_t net_word_size); -} - // A MetaspaceArenaTestBed contains a single MetaspaceArena and its lock. // It keeps track of allocations done from this MetaspaceArena. class MetaspaceArenaTestBed : public CHeapObj { - MetaspaceArena* _arena; + const SizeRange _allocation_range; + const size_t _alignment_words; + MetaspaceArena* _arena; Mutex* _lock; - - const SizeRange _allocation_range; size_t _size_of_last_failed_allocation; // We keep track of all allocations done thru the MetaspaceArena to @@ -86,7 +83,8 @@ class MetaspaceArenaTestBed : public CHeapObj { allocation_t* _allocations; // We count how much we did allocate and deallocate - MemRangeCounter _alloc_count; + MemRangeCounter _alloc_count_net; + MemRangeCounter _alloc_count_raw; MemRangeCounter _dealloc_count; // Check statistics returned by MetaspaceArena::add_to_statistics() against what @@ -98,8 +96,8 @@ class MetaspaceArenaTestBed : public CHeapObj { _arena->add_to_statistics(&stats); InUseChunkStats in_use_stats = stats.totals(); - assert(_dealloc_count.total_size() <= _alloc_count.total_size() && - _dealloc_count.count() <= _alloc_count.count(), "Sanity"); + assert(_dealloc_count.total_size() <= _alloc_count_net.total_size() && + _dealloc_count.count() <= _alloc_count_net.count(), "Sanity"); // Check consistency of stats ASSERT_GE(in_use_stats._word_size, in_use_stats._committed_words); @@ -111,39 +109,41 @@ class MetaspaceArenaTestBed : public CHeapObj { // - alignment/padding of allocations // - inside used counter contains blocks in free list // - free block list splinter threshold + // - if +MetaspaceGuardAllocations, guard costs // Since what we deallocated may have been given back to us in a following allocation, // we only know fore sure we allocated what we did not give back. - const size_t at_least_allocated = _alloc_count.total_size() - _dealloc_count.total_size(); + const size_t at_least_allocated = _alloc_count_net.total_size() - _dealloc_count.total_size(); // At most we allocated this: - const size_t max_word_overhead_per_alloc = 4; - const size_t at_most_allocated = _alloc_count.total_size() + max_word_overhead_per_alloc * _alloc_count.count(); + const size_t max_word_overhead_per_alloc = + 4 + (metaspace::Settings::use_allocation_guard() ? 4 : 0); + const size_t at_most_allocated = _alloc_count_raw.total_size() + max_word_overhead_per_alloc * _alloc_count_raw.count(); ASSERT_LE(at_least_allocated, in_use_stats._used_words - stats._free_blocks_word_size); ASSERT_GE(at_most_allocated, in_use_stats._used_words - stats._free_blocks_word_size); - } public: MetaspaceArena* arena() { return _arena; } - MetaspaceArenaTestBed(ChunkManager* cm, const ArenaGrowthPolicy* alloc_sequence, + MetaspaceArenaTestBed(ChunkManager* cm, const ArenaGrowthPolicy* alloc_sequence, size_t alignment_words, SizeAtomicCounter* used_words_counter, SizeRange allocation_range) : + _allocation_range(allocation_range), + _alignment_words(alignment_words), _arena(NULL), _lock(NULL), - _allocation_range(allocation_range), _size_of_last_failed_allocation(0), _allocations(NULL), - _alloc_count(), + _alloc_count_net(), _dealloc_count() { _lock = new Mutex(Monitor::leaf, "gtest-MetaspaceArenaTestBed-lock", Monitor::_safepoint_check_never); // Lock during space creation, since this is what happens in the VM too // (see ClassLoaderData::metaspace_non_null(), which we mimick here). MutexLocker ml(_lock, Mutex::_no_safepoint_check_flag); - _arena = new MetaspaceArena(cm, alloc_sequence, _lock, used_words_counter, "gtest-MetaspaceArenaTestBed-sm"); + _arena = new MetaspaceArena(cm, alloc_sequence, alignment_words, _lock, used_words_counter, "gtest-MetaspaceArenaTestBed-sm"); } ~MetaspaceArenaTestBed() { @@ -166,25 +166,30 @@ public: } - size_t words_allocated() const { return _alloc_count.total_size(); } - int num_allocations() const { return _alloc_count.count(); } + size_t words_allocated() const { return _alloc_count_net.total_size(); } + int num_allocations() const { return _alloc_count_net.count(); } size_t size_of_last_failed_allocation() const { return _size_of_last_failed_allocation; } + size_t calc_expected_usage_for_allocated_words(size_t word_size) { + return metaspace::get_raw_word_size_for_requested_word_size(word_size, _alignment_words); + } + // Allocate a random amount. Return false if the allocation failed. bool checked_random_allocate() { size_t word_size = 1 + _allocation_range.random_value(); MetaWord* p = _arena->allocate(word_size); if (p != NULL) { - EXPECT_TRUE(is_aligned(p, sizeof(MetaWord))); + EXPECT_TRUE(is_aligned(p, _alignment_words * BytesPerWord)); allocation_t* a = NEW_C_HEAP_OBJ(allocation_t, mtInternal); a->word_size = word_size; a->p = p; a->mark(); a->next = _allocations; _allocations = a; - _alloc_count.add(word_size); - if ((_alloc_count.count() % 20) == 0) { + _alloc_count_net.add(word_size); + _alloc_count_raw.add(calc_expected_usage_for_allocated_words(word_size)); + if ((_alloc_count_net.count() % 20) == 0) { verify_arena_statistics(); DEBUG_ONLY(_arena->verify();) } @@ -226,9 +231,9 @@ class MetaspaceArenaTest { //////// Bed creation, destruction /////// - void create_new_test_bed_at(int slotindex, const ArenaGrowthPolicy* growth_policy, SizeRange allocation_range) { + void create_new_test_bed_at(int slotindex, const ArenaGrowthPolicy* growth_policy, size_t alignment_words, SizeRange allocation_range) { DEBUG_ONLY(_testbeds.check_slot_is_null(slotindex)); - MetaspaceArenaTestBed* bed = new MetaspaceArenaTestBed(&_context.cm(), growth_policy, + MetaspaceArenaTestBed* bed = new MetaspaceArenaTestBed(&_context.cm(), growth_policy, alignment_words, &_used_words_counter, allocation_range); _testbeds.set_at(slotindex, bed); _num_beds.increment(); @@ -239,7 +244,10 @@ class MetaspaceArenaTest { const ArenaGrowthPolicy* growth_policy = ArenaGrowthPolicy::policy_for_space_type( (fifty_fifty() ? Metaspace::StandardMetaspaceType : Metaspace::ReflectionMetaspaceType), fifty_fifty()); - create_new_test_bed_at(slotindex, growth_policy, allocation_range); + const size_t alignment_bytes = + 1 << IntRange(metaspace::LogMetaspaceMinimalAlignment, + metaspace::LogMetaspaceMinimalAlignment + 7).random_value(); // zw 8 byte and 1K + create_new_test_bed_at(slotindex, growth_policy, alignment_bytes / BytesPerWord, allocation_range); } // Randomly create a random test bed at a random slot, and return its slot index @@ -252,15 +260,6 @@ class MetaspaceArenaTest { return slot; } - // Create test beds for all slots - void create_all_test_beds() { - for (int slot = 0; slot < _testbeds.size(); slot++) { - if (_testbeds.slot_is_null(slot)) { - create_random_test_bed_at(slot); - } - } - } - void delete_test_bed_at(int slotindex) { DEBUG_ONLY(_testbeds.check_slot_is_not_null(slotindex)); MetaspaceArenaTestBed* bed = _testbeds.at(slotindex); @@ -296,7 +295,7 @@ class MetaspaceArenaTest { if (success == false) { // We must have hit a limit. EXPECT_LT(_context.commit_limiter().possible_expansion_words(), - metaspace::get_raw_word_size_for_requested_word_size(bed->size_of_last_failed_allocation())); + bed->calc_expected_usage_for_allocated_words(bed->size_of_last_failed_allocation())); } return success; }