396
397
398 // Decide if large pages should be committed when the memory is reserved.
399 static bool should_commit_large_pages_when_reserving(size_t bytes) {
400 if (UseLargePages && UseLargePagesInMetaspace && !os::can_commit_large_page_memory()) {
401 size_t words = bytes / BytesPerWord;
402 bool is_class = false; // We never reserve large pages for the class space.
403 if (MetaspaceGC::can_expand(words, is_class) &&
404 MetaspaceGC::allowed_expansion() >= words) {
405 return true;
406 }
407 }
408
409 return false;
410 }
411
412 // byte_size is the size of the associated virtualspace.
413 VirtualSpaceNode::VirtualSpaceNode(size_t bytes) : _top(NULL), _next(NULL), _rs(), _container_count(0) {
414 assert_is_size_aligned(bytes, Metaspace::reserve_alignment());
415
416 // This allocates memory with mmap. For DumpSharedspaces, try to reserve
417 // configurable address, generally at the top of the Java heap so other
418 // memory addresses don't conflict.
419 if (DumpSharedSpaces) {
420 bool large_pages = false; // No large pages when dumping the CDS archive.
421 char* shared_base = (char*)align_ptr_up((char*)SharedBaseAddress, Metaspace::reserve_alignment());
422
423 _rs = ReservedSpace(bytes, Metaspace::reserve_alignment(), large_pages, shared_base, 0);
424 if (_rs.is_reserved()) {
425 assert(shared_base == 0 || _rs.base() == shared_base, "should match");
426 } else {
427 // Get a mmap region anywhere if the SharedBaseAddress fails.
428 _rs = ReservedSpace(bytes, Metaspace::reserve_alignment(), large_pages);
429 }
430 MetaspaceShared::set_shared_rs(&_rs);
431 } else {
432 bool large_pages = should_commit_large_pages_when_reserving(bytes);
433
434 _rs = ReservedSpace(bytes, Metaspace::reserve_alignment(), large_pages);
435 }
436
437 if (_rs.is_reserved()) {
438 assert(_rs.base() != NULL, "Catch if we get a NULL address");
439 assert(_rs.size() != 0, "Catch if we get a 0 size");
440 assert_is_ptr_aligned(_rs.base(), Metaspace::reserve_alignment());
441 assert_is_size_aligned(_rs.size(), Metaspace::reserve_alignment());
442
443 MemTracker::record_virtual_memory_type((address)_rs.base(), mtClass);
444 }
445 }
446
447 void VirtualSpaceNode::purge(ChunkManager* chunk_manager) {
448 Metachunk* chunk = first_chunk();
449 Metachunk* invalid_chunk = (Metachunk*) top();
450 while (chunk < invalid_chunk ) {
451 assert(chunk->is_tagged_free(), "Should be tagged free");
2922 }
2923
2924 VirtualSpaceList* Metaspace::_space_list = NULL;
2925 VirtualSpaceList* Metaspace::_class_space_list = NULL;
2926
2927 ChunkManager* Metaspace::_chunk_manager_metadata = NULL;
2928 ChunkManager* Metaspace::_chunk_manager_class = NULL;
2929
2930 #define VIRTUALSPACEMULTIPLIER 2
2931
2932 #ifdef _LP64
2933 static const uint64_t UnscaledClassSpaceMax = (uint64_t(max_juint) + 1);
2934
2935 void Metaspace::set_narrow_klass_base_and_shift(address metaspace_base, address cds_base) {
2936 // Figure out the narrow_klass_base and the narrow_klass_shift. The
2937 // narrow_klass_base is the lower of the metaspace base and the cds base
2938 // (if cds is enabled). The narrow_klass_shift depends on the distance
2939 // between the lower base and higher address.
2940 address lower_base;
2941 address higher_address;
2942 if (UseSharedSpaces) {
2943 higher_address = MAX2((address)(cds_base + FileMapInfo::shared_spaces_size()),
2944 (address)(metaspace_base + compressed_class_space_size()));
2945 lower_base = MIN2(metaspace_base, cds_base);
2946 } else {
2947 higher_address = metaspace_base + compressed_class_space_size();
2948 lower_base = metaspace_base;
2949
2950 uint64_t klass_encoding_max = UnscaledClassSpaceMax << LogKlassAlignmentInBytes;
2951 // If compressed class space fits in lower 32G, we don't need a base.
2952 if (higher_address <= (address)klass_encoding_max) {
2953 lower_base = 0; // Effectively lower base is zero.
2954 }
2955 }
2956
2957 Universe::set_narrow_klass_base(lower_base);
2958
2959 if ((uint64_t)(higher_address - lower_base) <= UnscaledClassSpaceMax) {
2960 Universe::set_narrow_klass_shift(0);
2961 } else {
2962 assert(!UseSharedSpaces, "Cannot shift with UseSharedSpaces");
2963 Universe::set_narrow_klass_shift(LogKlassAlignmentInBytes);
2964 }
2965 }
2966
2967 // Return TRUE if the specified metaspace_base and cds_base are close enough
2968 // to work with compressed klass pointers.
2969 bool Metaspace::can_use_cds_with_metaspace_addr(char* metaspace_base, address cds_base) {
2970 assert(cds_base != 0 && UseSharedSpaces, "Only use with CDS");
2971 assert(UseCompressedClassPointers, "Only use with CompressedKlassPtrs");
2972 address lower_base = MIN2((address)metaspace_base, cds_base);
2973 address higher_address = MAX2((address)(cds_base + FileMapInfo::shared_spaces_size()),
2974 (address)(metaspace_base + compressed_class_space_size()));
2975 return ((uint64_t)(higher_address - lower_base) <= UnscaledClassSpaceMax);
2976 }
2977
2978 // Try to allocate the metaspace at the requested addr.
2979 void Metaspace::allocate_metaspace_compressed_klass_ptrs(char* requested_addr, address cds_base) {
2980 assert(using_class_space(), "called improperly");
2981 assert(UseCompressedClassPointers, "Only use with CompressedKlassPtrs");
2982 assert(compressed_class_space_size() < KlassEncodingMetaspaceMax,
2983 "Metaspace size is too big");
2984 assert_is_ptr_aligned(requested_addr, _reserve_alignment);
2985 assert_is_ptr_aligned(cds_base, _reserve_alignment);
2986 assert_is_size_aligned(compressed_class_space_size(), _reserve_alignment);
2987
2988 // Don't use large pages for the class space.
2989 bool large_pages = false;
2990
2991 ReservedSpace metaspace_rs = ReservedSpace(compressed_class_space_size(),
2992 _reserve_alignment,
2993 large_pages,
2994 requested_addr, 0);
2995 if (!metaspace_rs.is_reserved()) {
2996 if (UseSharedSpaces) {
2997 size_t increment = align_size_up(1*G, _reserve_alignment);
2998
2999 // Keep trying to allocate the metaspace, increasing the requested_addr
3000 // by 1GB each time, until we reach an address that will no longer allow
3001 // use of CDS with compressed klass pointers.
3002 char *addr = requested_addr;
3003 while (!metaspace_rs.is_reserved() && (addr + increment > addr) &&
3004 can_use_cds_with_metaspace_addr(addr + increment, cds_base)) {
3005 addr = addr + increment;
3006 metaspace_rs = ReservedSpace(compressed_class_space_size(),
3007 _reserve_alignment, large_pages, addr, 0);
3008 }
3009 }
3010
3011 // If no successful allocation then try to allocate the space anywhere. If
3012 // that fails then OOM doom. At this point we cannot try allocating the
3013 // metaspace as if UseCompressedClassPointers is off because too much
3014 // initialization has happened that depends on UseCompressedClassPointers.
3015 // So, UseCompressedClassPointers cannot be turned off at this point.
3016 if (!metaspace_rs.is_reserved()) {
3017 metaspace_rs = ReservedSpace(compressed_class_space_size(),
3018 _reserve_alignment, large_pages);
3019 if (!metaspace_rs.is_reserved()) {
3020 vm_exit_during_initialization(err_msg("Could not allocate metaspace: %d bytes",
3021 compressed_class_space_size()));
3022 }
3023 }
3024 }
3025
3026 // If we got here then the metaspace got allocated.
3027 MemTracker::record_virtual_memory_type((address)metaspace_rs.base(), mtClass);
3028
3029 // Verify that we can use shared spaces. Otherwise, turn off CDS.
3030 if (UseSharedSpaces && !can_use_cds_with_metaspace_addr(metaspace_rs.base(), cds_base)) {
3031 FileMapInfo::stop_sharing_and_unmap(
3032 "Could not allocate metaspace at a compatible address");
3033 }
3034
3035 set_narrow_klass_base_and_shift((address)metaspace_rs.base(),
3036 UseSharedSpaces ? (address)cds_base : 0);
3037
3038 initialize_class_space(metaspace_rs);
3039
3040 if (PrintCompressedOopsMode || (PrintMiscellaneous && Verbose)) {
3041 gclog_or_tty->print_cr("Narrow klass base: " PTR_FORMAT ", Narrow klass shift: " SIZE_FORMAT,
3042 Universe::narrow_klass_base(), Universe::narrow_klass_shift());
3043 gclog_or_tty->print_cr("Compressed class space size: " SIZE_FORMAT " Address: " PTR_FORMAT " Req Addr: " PTR_FORMAT,
3044 compressed_class_space_size(), metaspace_rs.base(), requested_addr);
3045 }
3046 }
3047
3048 // For UseCompressedClassPointers the class space is reserved above the top of
3049 // the Java heap. The argument passed in is at the base of the compressed space.
3050 void Metaspace::initialize_class_space(ReservedSpace rs) {
3051 // The reserved space size may be bigger because of alignment, esp with UseLargePages
3052 assert(rs.size() >= CompressedClassSpaceSize,
3053 err_msg(SIZE_FORMAT " != " UINTX_FORMAT, rs.size(), CompressedClassSpaceSize));
3054 assert(using_class_space(), "Must be using class space");
3098 vm_exit_during_initialization("Too small initial Metaspace size");
3099 }
3100
3101 MinMetaspaceExpansion = align_size_down_bounded(MinMetaspaceExpansion, _commit_alignment);
3102 MaxMetaspaceExpansion = align_size_down_bounded(MaxMetaspaceExpansion, _commit_alignment);
3103
3104 CompressedClassSpaceSize = align_size_down_bounded(CompressedClassSpaceSize, _reserve_alignment);
3105 set_compressed_class_space_size(CompressedClassSpaceSize);
3106 }
3107
3108 void Metaspace::global_initialize() {
3109 MetaspaceGC::initialize();
3110
3111 // Initialize the alignment for shared spaces.
3112 int max_alignment = os::vm_allocation_granularity();
3113 size_t cds_total = 0;
3114
3115 MetaspaceShared::set_max_alignment(max_alignment);
3116
3117 if (DumpSharedSpaces) {
3118 SharedReadOnlySize = align_size_up(SharedReadOnlySize, max_alignment);
3119 SharedReadWriteSize = align_size_up(SharedReadWriteSize, max_alignment);
3120 SharedMiscDataSize = align_size_up(SharedMiscDataSize, max_alignment);
3121 SharedMiscCodeSize = align_size_up(SharedMiscCodeSize, max_alignment);
3122
3123 // Initialize with the sum of the shared space sizes. The read-only
3124 // and read write metaspace chunks will be allocated out of this and the
3125 // remainder is the misc code and data chunks.
3126 cds_total = FileMapInfo::shared_spaces_size();
3127 cds_total = align_size_up(cds_total, _reserve_alignment);
3128 _space_list = new VirtualSpaceList(cds_total/wordSize);
3129 _chunk_manager_metadata = new ChunkManager(SpecializedChunk, SmallChunk, MediumChunk);
3130
3131 if (!_space_list->initialization_succeeded()) {
3132 vm_exit_during_initialization("Unable to dump shared archive.", NULL);
3133 }
3134
3135 #ifdef _LP64
3136 if (cds_total + compressed_class_space_size() > UnscaledClassSpaceMax) {
3137 vm_exit_during_initialization("Unable to dump shared archive.",
3138 err_msg("Size of archive (" SIZE_FORMAT ") + compressed class space ("
3139 SIZE_FORMAT ") == total (" SIZE_FORMAT ") is larger than compressed "
3140 "klass limit: " SIZE_FORMAT, cds_total, compressed_class_space_size(),
3141 cds_total + compressed_class_space_size(), UnscaledClassSpaceMax));
3142 }
3143
3144 // Set the compressed klass pointer base so that decoding of these pointers works
3145 // properly when creating the shared archive.
3146 assert(UseCompressedOops && UseCompressedClassPointers,
3147 "UseCompressedOops and UseCompressedClassPointers must be set");
3148 Universe::set_narrow_klass_base((address)_space_list->current_virtual_space()->bottom());
3149 if (TraceMetavirtualspaceAllocation && Verbose) {
3150 gclog_or_tty->print_cr("Setting_narrow_klass_base to Address: " PTR_FORMAT,
3151 _space_list->current_virtual_space()->bottom());
3152 }
3153
3154 Universe::set_narrow_klass_shift(0);
3155 #endif
3156
3157 } else {
3158 // If using shared space, open the file that contains the shared space
3159 // and map in the memory before initializing the rest of metaspace (so
3160 // the addresses don't conflict)
3161 address cds_address = NULL;
3162 if (UseSharedSpaces) {
3163 FileMapInfo* mapinfo = new FileMapInfo();
3164 memset(mapinfo, 0, sizeof(FileMapInfo));
3165
3166 // Open the shared archive file, read and validate the header. If
3167 // initialization fails, shared spaces [UseSharedSpaces] are
3168 // disabled and the file is closed.
3169 // Map in spaces now also
3170 if (mapinfo->initialize() && MetaspaceShared::map_shared_spaces(mapinfo)) {
3171 FileMapInfo::set_current_info(mapinfo);
3172 cds_total = FileMapInfo::shared_spaces_size();
3173 cds_address = (address)mapinfo->region_base(0);
3174 } else {
3175 assert(!mapinfo->is_open() && !UseSharedSpaces,
3176 "archive file not closed or shared spaces not disabled.");
3177 }
3178 }
3179
3180 #ifdef _LP64
3181 // If UseCompressedClassPointers is set then allocate the metaspace area
3182 // above the heap and above the CDS area (if it exists).
3183 if (using_class_space()) {
3184 if (UseSharedSpaces) {
3185 char* cds_end = (char*)(cds_address + cds_total);
3186 cds_end = (char *)align_ptr_up(cds_end, _reserve_alignment);
3187 allocate_metaspace_compressed_klass_ptrs(cds_end, cds_address);
3188 } else {
3189 char* base = (char*)align_ptr_up(Universe::heap()->reserved_region().end(), _reserve_alignment);
3190 allocate_metaspace_compressed_klass_ptrs(base, 0);
3191 }
3192 }
3193 #endif
3194
3195 // Initialize these before initializing the VirtualSpaceList
3196 _first_chunk_word_size = InitialBootClassLoaderMetaspaceSize / BytesPerWord;
3197 _first_chunk_word_size = align_word_size_up(_first_chunk_word_size);
3198 // Make the first class chunk bigger than a medium chunk so it's not put
3199 // on the medium chunk list. The next chunk will be small and progress
3200 // from there. This size calculated by -version.
3201 _first_class_chunk_word_size = MIN2((size_t)MediumChunk*6,
3202 (CompressedClassSpaceSize/BytesPerWord)*2);
3203 _first_class_chunk_word_size = align_word_size_up(_first_class_chunk_word_size);
3204 // Arbitrarily set the initial virtual space to a multiple
3205 // of the boot class loader size.
3206 size_t word_size = VIRTUALSPACEMULTIPLIER * _first_chunk_word_size;
3207 word_size = align_size_up(word_size, Metaspace::reserve_alignment_words());
3208
3209 // Initialize the list of virtual spaces.
3210 _space_list = new VirtualSpaceList(word_size);
3211 _chunk_manager_metadata = new ChunkManager(SpecializedChunk, SmallChunk, MediumChunk);
3212
3213 if (!_space_list->initialization_succeeded()) {
3363 }
3364
3365 size_t Metaspace::capacity_bytes_slow(MetadataType mdtype) const {
3366 return capacity_words_slow(mdtype) * BytesPerWord;
3367 }
3368
3369 size_t Metaspace::allocated_blocks_bytes() const {
3370 return vsm()->allocated_blocks_bytes() +
3371 (using_class_space() ? class_vsm()->allocated_blocks_bytes() : 0);
3372 }
3373
3374 size_t Metaspace::allocated_chunks_bytes() const {
3375 return vsm()->allocated_chunks_bytes() +
3376 (using_class_space() ? class_vsm()->allocated_chunks_bytes() : 0);
3377 }
3378
3379 void Metaspace::deallocate(MetaWord* ptr, size_t word_size, bool is_class) {
3380 assert(!SafepointSynchronize::is_at_safepoint()
3381 || Thread::current()->is_VM_thread(), "should be the VM thread");
3382
3383 MutexLockerEx ml(vsm()->lock(), Mutex::_no_safepoint_check_flag);
3384
3385 if (word_size < TreeChunk<Metablock, FreeList<Metablock> >::min_size()) {
3386 // Dark matter. Too small for dictionary.
3387 #ifdef ASSERT
3388 Copy::fill_to_words((HeapWord*)ptr, word_size, 0xf5f5f5f5);
3389 #endif
3390 return;
3391 }
3392 if (is_class && using_class_space()) {
3393 class_vsm()->deallocate(ptr, word_size);
3394 } else {
3395 vsm()->deallocate(ptr, word_size);
3396 }
3397 }
3398
3399
3400 MetaWord* Metaspace::allocate(ClassLoaderData* loader_data, size_t word_size,
3401 bool read_only, MetaspaceObj::Type type, TRAPS) {
3402 if (HAS_PENDING_EXCEPTION) {
3403 assert(false, "Should not allocate with exception pending");
3404 return NULL; // caller does a CHECK_NULL too
3405 }
3406
3407 assert(loader_data != NULL, "Should never pass around a NULL loader_data. "
3408 "ClassLoaderData::the_null_class_loader_data() should have been used.");
3409
3410 // Allocate in metaspaces without taking out a lock, because it deadlocks
3411 // with the SymbolTable_lock. Dumping is single threaded for now. We'll have
3412 // to revisit this for application class data sharing.
3413 if (DumpSharedSpaces) {
3414 assert(type > MetaspaceObj::UnknownType && type < MetaspaceObj::_number_of_types, "sanity");
3415 Metaspace* space = read_only ? loader_data->ro_metaspace() : loader_data->rw_metaspace();
3416 MetaWord* result = space->allocate(word_size, NonClassType);
3417 if (result == NULL) {
3418 report_out_of_shared_space(read_only ? SharedReadOnly : SharedReadWrite);
3419 }
3420
3421 space->record_allocation(result, type, space->vsm()->get_raw_word_size(word_size));
3422
3423 // Zero initialize.
3424 Copy::fill_to_aligned_words((HeapWord*)result, word_size, 0);
3425
3426 return result;
3427 }
3428
3429 MetadataType mdtype = (type == MetaspaceObj::ClassType) ? ClassType : NonClassType;
3430
3431 // Try to allocate metadata.
3432 MetaWord* result = loader_data->metaspace_non_null()->allocate(word_size, mdtype);
3433
3434 if (result == NULL) {
3435 tracer()->report_metaspace_allocation_failure(loader_data, word_size, type, mdtype);
3436
3437 // Allocation failed.
3438 if (is_init_completed()) {
3439 // Only start a GC if the bootstrapping has completed.
3440
3441 // Try to clean out some memory and retry.
3500 if (out_of_compressed_class_space) {
3501 THROW_OOP(Universe::out_of_memory_error_class_metaspace());
3502 } else {
3503 THROW_OOP(Universe::out_of_memory_error_metaspace());
3504 }
3505 }
3506
3507 const char* Metaspace::metadata_type_name(Metaspace::MetadataType mdtype) {
3508 switch (mdtype) {
3509 case Metaspace::ClassType: return "Class";
3510 case Metaspace::NonClassType: return "Metadata";
3511 default:
3512 assert(false, err_msg("Got bad mdtype: %d", (int) mdtype));
3513 return NULL;
3514 }
3515 }
3516
3517 void Metaspace::record_allocation(void* ptr, MetaspaceObj::Type type, size_t word_size) {
3518 assert(DumpSharedSpaces, "sanity");
3519
3520 AllocRecord *rec = new AllocRecord((address)ptr, type, (int)word_size * HeapWordSize);
3521 if (_alloc_record_head == NULL) {
3522 _alloc_record_head = _alloc_record_tail = rec;
3523 } else {
3524 _alloc_record_tail->_next = rec;
3525 _alloc_record_tail = rec;
3526 }
3527 }
3528
3529 void Metaspace::iterate(Metaspace::AllocRecordClosure *closure) {
3530 assert(DumpSharedSpaces, "unimplemented for !DumpSharedSpaces");
3531
3532 address last_addr = (address)bottom();
3533
3534 for (AllocRecord *rec = _alloc_record_head; rec; rec = rec->_next) {
3535 address ptr = rec->_ptr;
3536 if (last_addr < ptr) {
3537 closure->doit(last_addr, MetaspaceObj::UnknownType, ptr - last_addr);
3538 }
3539 closure->doit(ptr, rec->_type, rec->_byte_size);
3540 last_addr = ptr + rec->_byte_size;
3541 }
3542
3543 address top = ((address)bottom()) + used_bytes_slow(Metaspace::NonClassType);
3544 if (last_addr < top) {
3545 closure->doit(last_addr, MetaspaceObj::UnknownType, top - last_addr);
3546 }
|
396
397
398 // Decide if large pages should be committed when the memory is reserved.
399 static bool should_commit_large_pages_when_reserving(size_t bytes) {
400 if (UseLargePages && UseLargePagesInMetaspace && !os::can_commit_large_page_memory()) {
401 size_t words = bytes / BytesPerWord;
402 bool is_class = false; // We never reserve large pages for the class space.
403 if (MetaspaceGC::can_expand(words, is_class) &&
404 MetaspaceGC::allowed_expansion() >= words) {
405 return true;
406 }
407 }
408
409 return false;
410 }
411
412 // byte_size is the size of the associated virtualspace.
413 VirtualSpaceNode::VirtualSpaceNode(size_t bytes) : _top(NULL), _next(NULL), _rs(), _container_count(0) {
414 assert_is_size_aligned(bytes, Metaspace::reserve_alignment());
415
416 #if INCLUDE_CDS
417 // This allocates memory with mmap. For DumpSharedspaces, try to reserve
418 // configurable address, generally at the top of the Java heap so other
419 // memory addresses don't conflict.
420 if (DumpSharedSpaces) {
421 bool large_pages = false; // No large pages when dumping the CDS archive.
422 char* shared_base = (char*)align_ptr_up((char*)SharedBaseAddress, Metaspace::reserve_alignment());
423
424 _rs = ReservedSpace(bytes, Metaspace::reserve_alignment(), large_pages, shared_base, 0);
425 if (_rs.is_reserved()) {
426 assert(shared_base == 0 || _rs.base() == shared_base, "should match");
427 } else {
428 // Get a mmap region anywhere if the SharedBaseAddress fails.
429 _rs = ReservedSpace(bytes, Metaspace::reserve_alignment(), large_pages);
430 }
431 MetaspaceShared::set_shared_rs(&_rs);
432 } else
433 #endif
434 {
435 bool large_pages = should_commit_large_pages_when_reserving(bytes);
436
437 _rs = ReservedSpace(bytes, Metaspace::reserve_alignment(), large_pages);
438 }
439
440 if (_rs.is_reserved()) {
441 assert(_rs.base() != NULL, "Catch if we get a NULL address");
442 assert(_rs.size() != 0, "Catch if we get a 0 size");
443 assert_is_ptr_aligned(_rs.base(), Metaspace::reserve_alignment());
444 assert_is_size_aligned(_rs.size(), Metaspace::reserve_alignment());
445
446 MemTracker::record_virtual_memory_type((address)_rs.base(), mtClass);
447 }
448 }
449
450 void VirtualSpaceNode::purge(ChunkManager* chunk_manager) {
451 Metachunk* chunk = first_chunk();
452 Metachunk* invalid_chunk = (Metachunk*) top();
453 while (chunk < invalid_chunk ) {
454 assert(chunk->is_tagged_free(), "Should be tagged free");
2925 }
2926
2927 VirtualSpaceList* Metaspace::_space_list = NULL;
2928 VirtualSpaceList* Metaspace::_class_space_list = NULL;
2929
2930 ChunkManager* Metaspace::_chunk_manager_metadata = NULL;
2931 ChunkManager* Metaspace::_chunk_manager_class = NULL;
2932
2933 #define VIRTUALSPACEMULTIPLIER 2
2934
2935 #ifdef _LP64
2936 static const uint64_t UnscaledClassSpaceMax = (uint64_t(max_juint) + 1);
2937
2938 void Metaspace::set_narrow_klass_base_and_shift(address metaspace_base, address cds_base) {
2939 // Figure out the narrow_klass_base and the narrow_klass_shift. The
2940 // narrow_klass_base is the lower of the metaspace base and the cds base
2941 // (if cds is enabled). The narrow_klass_shift depends on the distance
2942 // between the lower base and higher address.
2943 address lower_base;
2944 address higher_address;
2945 #if INCLUDE_CDS
2946 if (UseSharedSpaces) {
2947 higher_address = MAX2((address)(cds_base + FileMapInfo::shared_spaces_size()),
2948 (address)(metaspace_base + compressed_class_space_size()));
2949 lower_base = MIN2(metaspace_base, cds_base);
2950 } else
2951 #endif
2952 {
2953 higher_address = metaspace_base + compressed_class_space_size();
2954 lower_base = metaspace_base;
2955
2956 uint64_t klass_encoding_max = UnscaledClassSpaceMax << LogKlassAlignmentInBytes;
2957 // If compressed class space fits in lower 32G, we don't need a base.
2958 if (higher_address <= (address)klass_encoding_max) {
2959 lower_base = 0; // Effectively lower base is zero.
2960 }
2961 }
2962
2963 Universe::set_narrow_klass_base(lower_base);
2964
2965 if ((uint64_t)(higher_address - lower_base) <= UnscaledClassSpaceMax) {
2966 Universe::set_narrow_klass_shift(0);
2967 } else {
2968 assert(!UseSharedSpaces, "Cannot shift with UseSharedSpaces");
2969 Universe::set_narrow_klass_shift(LogKlassAlignmentInBytes);
2970 }
2971 }
2972
2973 #if INCLUDE_CDS
2974 // Return TRUE if the specified metaspace_base and cds_base are close enough
2975 // to work with compressed klass pointers.
2976 bool Metaspace::can_use_cds_with_metaspace_addr(char* metaspace_base, address cds_base) {
2977 assert(cds_base != 0 && UseSharedSpaces, "Only use with CDS");
2978 assert(UseCompressedClassPointers, "Only use with CompressedKlassPtrs");
2979 address lower_base = MIN2((address)metaspace_base, cds_base);
2980 address higher_address = MAX2((address)(cds_base + FileMapInfo::shared_spaces_size()),
2981 (address)(metaspace_base + compressed_class_space_size()));
2982 return ((uint64_t)(higher_address - lower_base) <= UnscaledClassSpaceMax);
2983 }
2984 #endif
2985
2986 // Try to allocate the metaspace at the requested addr.
2987 void Metaspace::allocate_metaspace_compressed_klass_ptrs(char* requested_addr, address cds_base) {
2988 assert(using_class_space(), "called improperly");
2989 assert(UseCompressedClassPointers, "Only use with CompressedKlassPtrs");
2990 assert(compressed_class_space_size() < KlassEncodingMetaspaceMax,
2991 "Metaspace size is too big");
2992 assert_is_ptr_aligned(requested_addr, _reserve_alignment);
2993 assert_is_ptr_aligned(cds_base, _reserve_alignment);
2994 assert_is_size_aligned(compressed_class_space_size(), _reserve_alignment);
2995
2996 // Don't use large pages for the class space.
2997 bool large_pages = false;
2998
2999 ReservedSpace metaspace_rs = ReservedSpace(compressed_class_space_size(),
3000 _reserve_alignment,
3001 large_pages,
3002 requested_addr, 0);
3003 if (!metaspace_rs.is_reserved()) {
3004 #if INCLUDE_CDS
3005 if (UseSharedSpaces) {
3006 size_t increment = align_size_up(1*G, _reserve_alignment);
3007
3008 // Keep trying to allocate the metaspace, increasing the requested_addr
3009 // by 1GB each time, until we reach an address that will no longer allow
3010 // use of CDS with compressed klass pointers.
3011 char *addr = requested_addr;
3012 while (!metaspace_rs.is_reserved() && (addr + increment > addr) &&
3013 can_use_cds_with_metaspace_addr(addr + increment, cds_base)) {
3014 addr = addr + increment;
3015 metaspace_rs = ReservedSpace(compressed_class_space_size(),
3016 _reserve_alignment, large_pages, addr, 0);
3017 }
3018 }
3019 #endif
3020 // If no successful allocation then try to allocate the space anywhere. If
3021 // that fails then OOM doom. At this point we cannot try allocating the
3022 // metaspace as if UseCompressedClassPointers is off because too much
3023 // initialization has happened that depends on UseCompressedClassPointers.
3024 // So, UseCompressedClassPointers cannot be turned off at this point.
3025 if (!metaspace_rs.is_reserved()) {
3026 metaspace_rs = ReservedSpace(compressed_class_space_size(),
3027 _reserve_alignment, large_pages);
3028 if (!metaspace_rs.is_reserved()) {
3029 vm_exit_during_initialization(err_msg("Could not allocate metaspace: %d bytes",
3030 compressed_class_space_size()));
3031 }
3032 }
3033 }
3034
3035 // If we got here then the metaspace got allocated.
3036 MemTracker::record_virtual_memory_type((address)metaspace_rs.base(), mtClass);
3037
3038 #if INCLUDE_CDS
3039 // Verify that we can use shared spaces. Otherwise, turn off CDS.
3040 if (UseSharedSpaces && !can_use_cds_with_metaspace_addr(metaspace_rs.base(), cds_base)) {
3041 FileMapInfo::stop_sharing_and_unmap(
3042 "Could not allocate metaspace at a compatible address");
3043 }
3044 #endif
3045 set_narrow_klass_base_and_shift((address)metaspace_rs.base(),
3046 UseSharedSpaces ? (address)cds_base : 0);
3047
3048 initialize_class_space(metaspace_rs);
3049
3050 if (PrintCompressedOopsMode || (PrintMiscellaneous && Verbose)) {
3051 gclog_or_tty->print_cr("Narrow klass base: " PTR_FORMAT ", Narrow klass shift: " SIZE_FORMAT,
3052 Universe::narrow_klass_base(), Universe::narrow_klass_shift());
3053 gclog_or_tty->print_cr("Compressed class space size: " SIZE_FORMAT " Address: " PTR_FORMAT " Req Addr: " PTR_FORMAT,
3054 compressed_class_space_size(), metaspace_rs.base(), requested_addr);
3055 }
3056 }
3057
3058 // For UseCompressedClassPointers the class space is reserved above the top of
3059 // the Java heap. The argument passed in is at the base of the compressed space.
3060 void Metaspace::initialize_class_space(ReservedSpace rs) {
3061 // The reserved space size may be bigger because of alignment, esp with UseLargePages
3062 assert(rs.size() >= CompressedClassSpaceSize,
3063 err_msg(SIZE_FORMAT " != " UINTX_FORMAT, rs.size(), CompressedClassSpaceSize));
3064 assert(using_class_space(), "Must be using class space");
3108 vm_exit_during_initialization("Too small initial Metaspace size");
3109 }
3110
3111 MinMetaspaceExpansion = align_size_down_bounded(MinMetaspaceExpansion, _commit_alignment);
3112 MaxMetaspaceExpansion = align_size_down_bounded(MaxMetaspaceExpansion, _commit_alignment);
3113
3114 CompressedClassSpaceSize = align_size_down_bounded(CompressedClassSpaceSize, _reserve_alignment);
3115 set_compressed_class_space_size(CompressedClassSpaceSize);
3116 }
3117
3118 void Metaspace::global_initialize() {
3119 MetaspaceGC::initialize();
3120
3121 // Initialize the alignment for shared spaces.
3122 int max_alignment = os::vm_allocation_granularity();
3123 size_t cds_total = 0;
3124
3125 MetaspaceShared::set_max_alignment(max_alignment);
3126
3127 if (DumpSharedSpaces) {
3128 #if INCLUDE_CDS
3129 SharedReadOnlySize = align_size_up(SharedReadOnlySize, max_alignment);
3130 SharedReadWriteSize = align_size_up(SharedReadWriteSize, max_alignment);
3131 SharedMiscDataSize = align_size_up(SharedMiscDataSize, max_alignment);
3132 SharedMiscCodeSize = align_size_up(SharedMiscCodeSize, max_alignment);
3133
3134 // Initialize with the sum of the shared space sizes. The read-only
3135 // and read write metaspace chunks will be allocated out of this and the
3136 // remainder is the misc code and data chunks.
3137 cds_total = FileMapInfo::shared_spaces_size();
3138 cds_total = align_size_up(cds_total, _reserve_alignment);
3139 _space_list = new VirtualSpaceList(cds_total/wordSize);
3140 _chunk_manager_metadata = new ChunkManager(SpecializedChunk, SmallChunk, MediumChunk);
3141
3142 if (!_space_list->initialization_succeeded()) {
3143 vm_exit_during_initialization("Unable to dump shared archive.", NULL);
3144 }
3145
3146 #ifdef _LP64
3147 if (cds_total + compressed_class_space_size() > UnscaledClassSpaceMax) {
3148 vm_exit_during_initialization("Unable to dump shared archive.",
3149 err_msg("Size of archive (" SIZE_FORMAT ") + compressed class space ("
3150 SIZE_FORMAT ") == total (" SIZE_FORMAT ") is larger than compressed "
3151 "klass limit: " SIZE_FORMAT, cds_total, compressed_class_space_size(),
3152 cds_total + compressed_class_space_size(), UnscaledClassSpaceMax));
3153 }
3154
3155 // Set the compressed klass pointer base so that decoding of these pointers works
3156 // properly when creating the shared archive.
3157 assert(UseCompressedOops && UseCompressedClassPointers,
3158 "UseCompressedOops and UseCompressedClassPointers must be set");
3159 Universe::set_narrow_klass_base((address)_space_list->current_virtual_space()->bottom());
3160 if (TraceMetavirtualspaceAllocation && Verbose) {
3161 gclog_or_tty->print_cr("Setting_narrow_klass_base to Address: " PTR_FORMAT,
3162 _space_list->current_virtual_space()->bottom());
3163 }
3164
3165 Universe::set_narrow_klass_shift(0);
3166 #endif // _LP64
3167 #endif // INCLUDE_CDS
3168 } else {
3169 #if INCLUDE_CDS
3170 // If using shared space, open the file that contains the shared space
3171 // and map in the memory before initializing the rest of metaspace (so
3172 // the addresses don't conflict)
3173 address cds_address = NULL;
3174 if (UseSharedSpaces) {
3175 FileMapInfo* mapinfo = new FileMapInfo();
3176
3177 // Open the shared archive file, read and validate the header. If
3178 // initialization fails, shared spaces [UseSharedSpaces] are
3179 // disabled and the file is closed.
3180 // Map in spaces now also
3181 if (mapinfo->initialize() && MetaspaceShared::map_shared_spaces(mapinfo)) {
3182 cds_total = FileMapInfo::shared_spaces_size();
3183 cds_address = (address)mapinfo->region_base(0);
3184 } else {
3185 assert(!mapinfo->is_open() && !UseSharedSpaces,
3186 "archive file not closed or shared spaces not disabled.");
3187 }
3188 }
3189 #endif // INCLUDE_CDS
3190 #ifdef _LP64
3191 // If UseCompressedClassPointers is set then allocate the metaspace area
3192 // above the heap and above the CDS area (if it exists).
3193 if (using_class_space()) {
3194 if (UseSharedSpaces) {
3195 #if INCLUDE_CDS
3196 char* cds_end = (char*)(cds_address + cds_total);
3197 cds_end = (char *)align_ptr_up(cds_end, _reserve_alignment);
3198 allocate_metaspace_compressed_klass_ptrs(cds_end, cds_address);
3199 #endif
3200 } else {
3201 char* base = (char*)align_ptr_up(Universe::heap()->reserved_region().end(), _reserve_alignment);
3202 allocate_metaspace_compressed_klass_ptrs(base, 0);
3203 }
3204 }
3205 #endif // _LP64
3206
3207 // Initialize these before initializing the VirtualSpaceList
3208 _first_chunk_word_size = InitialBootClassLoaderMetaspaceSize / BytesPerWord;
3209 _first_chunk_word_size = align_word_size_up(_first_chunk_word_size);
3210 // Make the first class chunk bigger than a medium chunk so it's not put
3211 // on the medium chunk list. The next chunk will be small and progress
3212 // from there. This size calculated by -version.
3213 _first_class_chunk_word_size = MIN2((size_t)MediumChunk*6,
3214 (CompressedClassSpaceSize/BytesPerWord)*2);
3215 _first_class_chunk_word_size = align_word_size_up(_first_class_chunk_word_size);
3216 // Arbitrarily set the initial virtual space to a multiple
3217 // of the boot class loader size.
3218 size_t word_size = VIRTUALSPACEMULTIPLIER * _first_chunk_word_size;
3219 word_size = align_size_up(word_size, Metaspace::reserve_alignment_words());
3220
3221 // Initialize the list of virtual spaces.
3222 _space_list = new VirtualSpaceList(word_size);
3223 _chunk_manager_metadata = new ChunkManager(SpecializedChunk, SmallChunk, MediumChunk);
3224
3225 if (!_space_list->initialization_succeeded()) {
3375 }
3376
3377 size_t Metaspace::capacity_bytes_slow(MetadataType mdtype) const {
3378 return capacity_words_slow(mdtype) * BytesPerWord;
3379 }
3380
3381 size_t Metaspace::allocated_blocks_bytes() const {
3382 return vsm()->allocated_blocks_bytes() +
3383 (using_class_space() ? class_vsm()->allocated_blocks_bytes() : 0);
3384 }
3385
3386 size_t Metaspace::allocated_chunks_bytes() const {
3387 return vsm()->allocated_chunks_bytes() +
3388 (using_class_space() ? class_vsm()->allocated_chunks_bytes() : 0);
3389 }
3390
3391 void Metaspace::deallocate(MetaWord* ptr, size_t word_size, bool is_class) {
3392 assert(!SafepointSynchronize::is_at_safepoint()
3393 || Thread::current()->is_VM_thread(), "should be the VM thread");
3394
3395 if (DumpSharedSpaces && PrintSharedSpaces) {
3396 record_deallocation(ptr, vsm()->get_raw_word_size(word_size));
3397 }
3398
3399 MutexLockerEx ml(vsm()->lock(), Mutex::_no_safepoint_check_flag);
3400
3401 if (word_size < TreeChunk<Metablock, FreeList<Metablock> >::min_size()) {
3402 // Dark matter. Too small for dictionary.
3403 #ifdef ASSERT
3404 Copy::fill_to_words((HeapWord*)ptr, word_size, 0xf5f5f5f5);
3405 #endif
3406 return;
3407 }
3408 if (is_class && using_class_space()) {
3409 class_vsm()->deallocate(ptr, word_size);
3410 } else {
3411 vsm()->deallocate(ptr, word_size);
3412 }
3413 }
3414
3415
3416 MetaWord* Metaspace::allocate(ClassLoaderData* loader_data, size_t word_size,
3417 bool read_only, MetaspaceObj::Type type, TRAPS) {
3418 if (HAS_PENDING_EXCEPTION) {
3419 assert(false, "Should not allocate with exception pending");
3420 return NULL; // caller does a CHECK_NULL too
3421 }
3422
3423 assert(loader_data != NULL, "Should never pass around a NULL loader_data. "
3424 "ClassLoaderData::the_null_class_loader_data() should have been used.");
3425
3426 // Allocate in metaspaces without taking out a lock, because it deadlocks
3427 // with the SymbolTable_lock. Dumping is single threaded for now. We'll have
3428 // to revisit this for application class data sharing.
3429 if (DumpSharedSpaces) {
3430 assert(type > MetaspaceObj::UnknownType && type < MetaspaceObj::_number_of_types, "sanity");
3431 Metaspace* space = read_only ? loader_data->ro_metaspace() : loader_data->rw_metaspace();
3432 MetaWord* result = space->allocate(word_size, NonClassType);
3433 if (result == NULL) {
3434 report_out_of_shared_space(read_only ? SharedReadOnly : SharedReadWrite);
3435 }
3436 if (PrintSharedSpaces) {
3437 space->record_allocation(result, type, space->vsm()->get_raw_word_size(word_size));
3438 }
3439
3440 // Zero initialize.
3441 Copy::fill_to_aligned_words((HeapWord*)result, word_size, 0);
3442
3443 return result;
3444 }
3445
3446 MetadataType mdtype = (type == MetaspaceObj::ClassType) ? ClassType : NonClassType;
3447
3448 // Try to allocate metadata.
3449 MetaWord* result = loader_data->metaspace_non_null()->allocate(word_size, mdtype);
3450
3451 if (result == NULL) {
3452 tracer()->report_metaspace_allocation_failure(loader_data, word_size, type, mdtype);
3453
3454 // Allocation failed.
3455 if (is_init_completed()) {
3456 // Only start a GC if the bootstrapping has completed.
3457
3458 // Try to clean out some memory and retry.
3517 if (out_of_compressed_class_space) {
3518 THROW_OOP(Universe::out_of_memory_error_class_metaspace());
3519 } else {
3520 THROW_OOP(Universe::out_of_memory_error_metaspace());
3521 }
3522 }
3523
3524 const char* Metaspace::metadata_type_name(Metaspace::MetadataType mdtype) {
3525 switch (mdtype) {
3526 case Metaspace::ClassType: return "Class";
3527 case Metaspace::NonClassType: return "Metadata";
3528 default:
3529 assert(false, err_msg("Got bad mdtype: %d", (int) mdtype));
3530 return NULL;
3531 }
3532 }
3533
3534 void Metaspace::record_allocation(void* ptr, MetaspaceObj::Type type, size_t word_size) {
3535 assert(DumpSharedSpaces, "sanity");
3536
3537 int byte_size = (int)word_size * HeapWordSize;
3538 AllocRecord *rec = new AllocRecord((address)ptr, type, byte_size);
3539
3540 if (_alloc_record_head == NULL) {
3541 _alloc_record_head = _alloc_record_tail = rec;
3542 } else if (_alloc_record_tail->_ptr + _alloc_record_tail->_byte_size == (address)ptr) {
3543 _alloc_record_tail->_next = rec;
3544 _alloc_record_tail = rec;
3545 } else {
3546 // slow linear search, but this doesn't happen that often, and only when dumping
3547 for (AllocRecord *old = _alloc_record_head; old; old = old->_next) {
3548 if (old->_ptr == ptr) {
3549 assert(old->_type == MetaspaceObj::DeallocatedType, "sanity");
3550 int remain_bytes = old->_byte_size - byte_size;
3551 assert(remain_bytes >= 0, "sanity");
3552 old->_type = type;
3553
3554 if (remain_bytes == 0) {
3555 delete(rec);
3556 } else {
3557 address remain_ptr = address(ptr) + byte_size;
3558 rec->_ptr = remain_ptr;
3559 rec->_byte_size = remain_bytes;
3560 rec->_type = MetaspaceObj::DeallocatedType;
3561 rec->_next = old->_next;
3562 old->_byte_size = byte_size;
3563 old->_next = rec;
3564 }
3565 return;
3566 }
3567 }
3568 assert(0, "reallocating a freed pointer that was not recorded");
3569 }
3570 }
3571
3572 void Metaspace::record_deallocation(void* ptr, size_t word_size) {
3573 assert(DumpSharedSpaces, "sanity");
3574
3575 for (AllocRecord *rec = _alloc_record_head; rec; rec = rec->_next) {
3576 if (rec->_ptr == ptr) {
3577 assert(rec->_byte_size == (int)word_size * HeapWordSize, "sanity");
3578 rec->_type = MetaspaceObj::DeallocatedType;
3579 return;
3580 }
3581 }
3582
3583 assert(0, "deallocating a pointer that was not recorded");
3584 }
3585
3586 void Metaspace::iterate(Metaspace::AllocRecordClosure *closure) {
3587 assert(DumpSharedSpaces, "unimplemented for !DumpSharedSpaces");
3588
3589 address last_addr = (address)bottom();
3590
3591 for (AllocRecord *rec = _alloc_record_head; rec; rec = rec->_next) {
3592 address ptr = rec->_ptr;
3593 if (last_addr < ptr) {
3594 closure->doit(last_addr, MetaspaceObj::UnknownType, ptr - last_addr);
3595 }
3596 closure->doit(ptr, rec->_type, rec->_byte_size);
3597 last_addr = ptr + rec->_byte_size;
3598 }
3599
3600 address top = ((address)bottom()) + used_bytes_slow(Metaspace::NonClassType);
3601 if (last_addr < top) {
3602 closure->doit(last_addr, MetaspaceObj::UnknownType, top - last_addr);
3603 }
|