src/share/vm/code/codeCache.cpp
Index Unified diffs Context diffs Sdiffs Patch New Old Previous File Next File
*** old/src/share/vm/code/codeCache.cpp	Fri Oct 11 15:44:35 2013
--- new/src/share/vm/code/codeCache.cpp	Fri Oct 11 15:44:35 2013

*** 46,57 **** --- 46,57 ---- #include "runtime/mutexLocker.hpp" #include "services/memoryService.hpp" #include "trace/tracing.hpp" #include "utilities/xmlstream.hpp" // Helper class for printing in CodeCache + // Helper class for printing in CodeCache class CodeBlob_sizes { private: int count; int total_size; int header_size;
*** 113,211 **** --- 113,334 ---- code_size += cb->code_size(); } } }; // CodeCache implementation + // Iterate over all CodeHeaps + #define FOR_ALL_HEAPS(it) for (GrowableArrayIterator<CodeHeap*> it = _heaps->begin(); it != _heaps->end(); ++it) + // Iterate over all CodeHeaps containing nmethods + #define FOR_ALL_METHOD_HEAPS(it) for (GrowableArrayFilterIterator<CodeHeap*, IsMethodPredicate> it(_heaps->begin(), IsMethodPredicate()); it != _heaps->end(); ++it) + // Iterate over all CodeBlobs (cb) on the given CodeHeap + #define FOR_ALL_BLOBS(cb, heap) for (CodeBlob* cb = first_blob(heap); cb != NULL; cb = next_blob(heap, cb)) + // Iterate over all alive CodeBlobs (cb) on the given CodeHeap + #define FOR_ALL_ALIVE_BLOBS(cb, heap) for (CodeBlob* cb = first_alive_blob(heap); cb != NULL; cb = next_alive_blob(heap, cb)) ! CodeHeap * CodeCache::_heap = new CodeHeap(); ! address CodeCache::_low_bound = 0; + address CodeCache::_high_bound = 0; int CodeCache::_number_of_blobs = 0; int CodeCache::_number_of_adapters = 0; int CodeCache::_number_of_nmethods = 0; int CodeCache::_number_of_nmethods_with_dependencies = 0; bool CodeCache::_needs_cache_clean = false; nmethod* CodeCache::_scavenge_root_nmethods = NULL; int CodeCache::_codemem_full_count = 0; CodeBlob* CodeCache::first() { ! assert_locked_or_safepoint(CodeCache_lock); return (CodeBlob*)_heap->first(); + // Initialize array of CodeHeaps ! GrowableArray<CodeHeap*>* CodeCache::_heaps = new(ResourceObj::C_HEAP, mtCode) GrowableArray<CodeHeap*> (3, true); + + void CodeCache::initialize_heaps() { + // Check if custom ReservedCodeCacheSize is set and adapt CodeHeap sizes accordingly + if (!FLAG_IS_DEFAULT(ReservedCodeCacheSize) && FLAG_IS_DEFAULT(NonMethodCodeHeapSize) + && FLAG_IS_DEFAULT(ProfiledCodeHeapSize) && FLAG_IS_DEFAULT(NonProfiledCodeHeapSize)) { + if (ReservedCodeCacheSize > NonMethodCodeHeapSize) { + // Use the default value for NonMethodCodeHeapSize and use 2/3 of the + // remaining size for non-profiled methods and 1/3 for profiled methods + size_t remaining_size = ReservedCodeCacheSize - NonMethodCodeHeapSize; + FLAG_SET_DEFAULT(ProfiledCodeHeapSize, remaining_size * (double)1/3); + FLAG_SET_DEFAULT(NonProfiledCodeHeapSize, remaining_size * (double)2/3); + } else { + // Use all space for the non-method heap and set other heaps to minimal size + FLAG_SET_DEFAULT(NonMethodCodeHeapSize, ReservedCodeCacheSize - os::vm_page_size() * 2); + FLAG_SET_DEFAULT(ProfiledCodeHeapSize, os::vm_page_size()); + FLAG_SET_DEFAULT(NonProfiledCodeHeapSize, os::vm_page_size()); + } + } + + // We do not need the profiled CodeHeap, use all space for the non-profiled CodeHeap + if(!heap_available(CodeBlobType::MethodProfiled)) { + FLAG_SET_DEFAULT(NonProfiledCodeHeapSize, NonProfiledCodeHeapSize + ProfiledCodeHeapSize); + FLAG_SET_DEFAULT(ProfiledCodeHeapSize, 0); + } + + // Size check + guarantee(NonProfiledCodeHeapSize + ProfiledCodeHeapSize + NonMethodCodeHeapSize <= ReservedCodeCacheSize, "Size check"); + + // Align reserved sizes of CodeHeaps + size_t non_method_size = ReservedCodeSpace::allocation_align_size_up(NonMethodCodeHeapSize); + size_t profiled_size = ReservedCodeSpace::allocation_align_size_up(ProfiledCodeHeapSize); + size_t non_profiled_size = ReservedCodeSpace::allocation_align_size_up(NonProfiledCodeHeapSize); + + // Compute initial sizes of CodeHeaps + size_t init_non_method_size = MIN2(InitialCodeCacheSize, non_method_size); + size_t init_profiled_size = MIN2(InitialCodeCacheSize, profiled_size); + size_t init_non_profiled_size = MIN2(InitialCodeCacheSize, non_profiled_size); + + // Reserve one continuous chunk of memory for CodeHeaps and split it into + // parts for the individual heaps. The memory layout looks like this: + // ---------- high ----------- + // Non-profiled nmethods + // Profiled nmethods + // Non-methods + // ---------- low ------------ + ReservedCodeSpace rs = reserve_heap_memory(non_profiled_size + profiled_size + non_method_size); + ReservedSpace non_method_space = rs.first_part(non_method_size); + ReservedSpace rest = rs.last_part(non_method_size); + ReservedSpace profiled_space = rest.first_part(profiled_size); + ReservedSpace non_profiled_space = rest.last_part(profiled_size); + + // Non-methods (stubs, adapters, ...) + add_heap(non_method_space, "Non-methods", init_non_method_size, CodeBlobType::NonMethod); + // Tier 2 and tier 3 (profiled) methods + add_heap(profiled_space, "Profiled nmethods", init_profiled_size, CodeBlobType::MethodProfiled); + // Tier 1 and tier 4 (non-profiled) methods and native methods + add_heap(non_profiled_space, "Non-profiled nmethods", init_non_profiled_size, CodeBlobType::MethodNonProfiled); + } + + ReservedCodeSpace CodeCache::reserve_heap_memory(size_t size) { + // Determine alignment + const size_t page_size = os::can_execute_large_page_memory() ? + os::page_size_for_region(InitialCodeCacheSize, size, 8) : + os::vm_page_size(); + const size_t granularity = os::vm_allocation_granularity(); + const size_t r_align = MAX2(page_size, granularity); + const size_t r_size = align_size_up(size, r_align); + const size_t rs_align = page_size == (size_t) os::vm_page_size() ? 0 : + MAX2(page_size, granularity); + + ReservedCodeSpace rs(r_size, rs_align, rs_align > 0); + + // Initialize bounds + _low_bound = (address)rs.base(); + _high_bound = _low_bound + rs.size(); + guarantee(low_bound() < high_bound(), "Bound check"); + + return rs; + } + + bool CodeCache::heap_available(int code_blob_type) { + if (TieredCompilation || code_blob_type == CodeBlobType::NonMethod) { + // Use all heaps for TieredCompilation + return true; + } else { + // Without TieredCompilation we only need the non-profiled heap + return (code_blob_type == CodeBlobType::MethodNonProfiled); + } } + void CodeCache::add_heap(ReservedSpace rs, const char* name, size_t size_initial, int code_blob_type) { + // Check if heap is needed + if (!heap_available(code_blob_type)) { + return; + } CodeBlob* CodeCache::next(CodeBlob* cb) { ! assert_locked_or_safepoint(CodeCache_lock); ! return (CodeBlob*)_heap->next(cb); } + // Create CodeHeap ! CodeHeap* heap = new CodeHeap(name, code_blob_type); ! _heaps->append(heap); + // Reserve Space + size_initial = round_to(size_initial, os::vm_page_size()); ! CodeBlob* CodeCache::alive(CodeBlob *cb) { ! assert_locked_or_safepoint(CodeCache_lock); while (cb != NULL && !cb->is_alive()) cb = next(cb); return cb; ! if (!heap->reserve(rs, size_initial, CodeCacheSegmentSize)) { ! vm_exit_during_initialization("Could not reserve enough space for code cache"); + } + + // Register the CodeHeap + MemoryService::add_code_heap_memory_pool(heap, name); } + CodeHeap* CodeCache::get_code_heap(int code_blob_type) { + FOR_ALL_HEAPS(it) { + if ((*it)->accepts(code_blob_type)) { + return (*it); + } + } + return NULL; + } ! nmethod* CodeCache::alive_nmethod(CodeBlob* cb) { ! CodeBlob* CodeCache::first_blob(CodeHeap* heap) { assert_locked_or_safepoint(CodeCache_lock); while (cb != NULL && (!cb->is_alive() || !cb->is_nmethod())) cb = next(cb); ! return (nmethod*)cb; + if (heap != NULL) { ! return (CodeBlob*)heap->first(); + } + return NULL; } ! nmethod* CodeCache::first_nmethod() { ! CodeBlob* CodeCache::next_blob(CodeHeap* heap, CodeBlob* cb) { assert_locked_or_safepoint(CodeCache_lock); CodeBlob* cb = first(); while (cb != NULL && !cb->is_nmethod()) { cb = next(cb); + if (heap != NULL) { + return (CodeBlob*)heap->next(cb); } ! return (nmethod*)cb; ! return NULL; } ! nmethod* CodeCache::next_nmethod (CodeBlob* cb) { ! CodeBlob* CodeCache::first_alive_blob(CodeHeap* heap) { assert_locked_or_safepoint(CodeCache_lock); ! cb = next(cb); ! while (cb != NULL && !cb->is_nmethod()) { ! cb = next(cb); ! CodeBlob* cb = first_blob(heap); ! while (cb != NULL && !cb->is_alive()) { ! cb = next_blob(heap, cb); } - return (nmethod*)cb; } static size_t maxCodeCacheUsed = 0; + CodeBlob* CodeCache::next_alive_blob(CodeHeap* heap, CodeBlob* cb) { + assert_locked_or_safepoint(CodeCache_lock); + cb = next_blob(heap, cb); + while (cb != NULL && !cb->is_alive()) { + cb = next_blob(heap, cb); + } + return cb; + } ! CodeBlob* CodeCache::allocate(int size, int code_blob_type, bool is_critical) { // Do not seize the CodeCache lock here--if the caller has not // already done so, we are going to lose bigtime, since the code // cache will contain a garbage CodeBlob until the caller can // run the constructor for the CodeBlob subclass he is busy // instantiating. guarantee(size >= 0, "allocation request must be reasonable"); assert_locked_or_safepoint(CodeCache_lock); CodeBlob* cb = NULL; _number_of_blobs++; + + // Get CodeHeap for the given CodeBlobType + CodeHeap* heap = get_code_heap(code_blob_type); + assert (heap != NULL, "Heap exists"); + while (true) { - cb = (CodeBlob*)_heap->allocate(size, is_critical); if (cb != NULL) break; - if (!_heap->expand_by(CodeCacheExpansionSize)) { // Expansion failed return NULL; } if (PrintCodeCacheExtension) { ResourceMark rm; ! tty->print_cr("code cache extended to [" INTPTR_FORMAT ", " INTPTR_FORMAT "] (%d bytes)", ! (intptr_t)_heap->low_boundary(), (intptr_t)_heap->high(), ! (address)_heap->high() - (address)_heap->low_boundary()); ! tty->print_cr("CodeHeap '%s' extended to [" INTPTR_FORMAT ", " INTPTR_FORMAT "] (%d bytes)", ! heap->name(), (intptr_t)heap->low_boundary(), (intptr_t)heap->high(), ! (address)heap->high() - (address)heap->low_boundary()); } } maxCodeCacheUsed = MAX2(maxCodeCacheUsed, ((address)_heap->high_boundary() - (address)_heap->low_boundary()) - unallocated_capacity()); + verify_if_often(); print_trace("allocation", cb, size); + return cb; } ! void CodeCache::free(CodeBlob* cb, int code_blob_type) { assert_locked_or_safepoint(CodeCache_lock); verify_if_often(); print_trace("free", cb); if (cb->is_nmethod()) {
*** 217,233 **** --- 340,356 ---- if (cb->is_adapter_blob()) { _number_of_adapters--; } _number_of_blobs--; _heap->deallocate(cb); + // Get heap for given CodeBlobType and deallocate + get_code_heap(code_blob_type)->deallocate(cb); verify_if_often(); assert(_number_of_blobs >= 0, "sanity check"); } void CodeCache::commit(CodeBlob* cb) { // this is called by nmethod::nmethod, which must already own CodeCache_lock assert_locked_or_safepoint(CodeCache_lock); if (cb->is_nmethod()) { _number_of_nmethods++;
*** 241,339 **** --- 364,483 ---- // flush the hardware I-cache ICache::invalidate_range(cb->content_begin(), cb->content_size()); } void CodeCache::flush() { assert_locked_or_safepoint(CodeCache_lock); Unimplemented(); } // Iteration over CodeBlobs #define FOR_ALL_BLOBS(var) for (CodeBlob *var = first() ; var != NULL; var = next(var) ) #define FOR_ALL_ALIVE_BLOBS(var) for (CodeBlob *var = alive(first()); var != NULL; var = alive(next(var))) #define FOR_ALL_ALIVE_NMETHODS(var) for (nmethod *var = alive_nmethod(first()); var != NULL; var = alive_nmethod(next(var))) bool CodeCache::contains(void *p) { // It should be ok to call contains without holding a lock return _heap->contains(p); + FOR_ALL_HEAPS(it) { + if ((*it)->contains(p)) { + return true; + } + } + return false; } // This method is safe to call without holding the CodeCache_lock, as long as a dead codeblob is not // looked up (i.e., one that has been marked for deletion). It only dependes on the _segmap to contain + // This method is safe to call without holding the CodeCache_lock, as long as a dead CodeBlob is not + // looked up (i.e., one that has been marked for deletion). It only depends on the _segmap to contain // valid indices, which it will always do, as long as the CodeBlob is not in the process of being recycled. CodeBlob* CodeCache::find_blob(void* start) { CodeBlob* result = find_blob_unsafe(start); if (result == NULL) return NULL; // We could potientially look up non_entrant methods guarantee(!result->is_zombie() || result->is_locked_by_vm() || is_error_reported(), "unsafe access to zombie method"); + // We could potentially look up non_entrant methods + guarantee(result == NULL || !result->is_zombie() || result->is_locked_by_vm() || is_error_reported(), "unsafe access to zombie method"); return result; } + // Lookup that does not fail if you lookup a zombie method (if you call this, be sure to know + // what you are doing) + CodeBlob* CodeCache::find_blob_unsafe(void* start) { + // NMT can walk the stack before code cache is created + if (_heaps->first() == NULL) return NULL; + + FOR_ALL_HEAPS(it) { + CodeBlob* result = (CodeBlob*) (*it)->find_start(start); + if (result != NULL && result->blob_contains((address)start)) { + return result; + } + } + return NULL; + } + nmethod* CodeCache::find_nmethod(void* start) { ! CodeBlob *cb = find_blob(start); - assert(cb == NULL || cb->is_nmethod(), "did not find an nmethod"); ! CodeBlob* cb = find_blob(start); ! assert(cb->is_nmethod(), "did not find an nmethod"); return (nmethod*)cb; } + bool CodeCache::contains_nmethod(nmethod* nm) { + FOR_ALL_METHOD_HEAPS(it) { + if ((*it)->contains(nm)) { + return true; + } + } + return false; + } void CodeCache::blobs_do(void f(CodeBlob* nm)) { assert_locked_or_safepoint(CodeCache_lock); ! FOR_ALL_BLOBS(p) { f(p); ! FOR_ALL_HEAPS(it) { + FOR_ALL_BLOBS(cb, *it) { + f(cb); + } } } void CodeCache::nmethods_do(void f(nmethod* nm)) { assert_locked_or_safepoint(CodeCache_lock); ! FOR_ALL_BLOBS(nm) { if (nm->is_nmethod()) f((nmethod*)nm); ! FOR_ALL_METHOD_HEAPS(it) { + FOR_ALL_BLOBS(cb, *it) { + f((nmethod*)cb); + } } } void CodeCache::alive_nmethods_do(void f(nmethod* nm)) { assert_locked_or_safepoint(CodeCache_lock); ! FOR_ALL_ALIVE_NMETHODS(nm) { f(nm); ! FOR_ALL_METHOD_HEAPS(it) { + FOR_ALL_ALIVE_BLOBS(cb, *it) { + f((nmethod*)cb); + } } } int CodeCache::alignment_unit() { ! return (int)_heaps->first()->alignment_unit(); } int CodeCache::alignment_offset() { ! return (int)_heaps->first()->alignment_offset(); } // Mark nmethods for unloading if they contain otherwise unreachable // oops. + // Mark nmethods for unloading if they contain otherwise unreachable oops. void CodeCache::do_unloading(BoolObjectClosure* is_alive, bool unloading_occurred) { assert_locked_or_safepoint(CodeCache_lock); ! FOR_ALL_ALIVE_NMETHODS(nm) { ! FOR_ALL_METHOD_HEAPS(it) { + FOR_ALL_ALIVE_BLOBS(cb, *it) { + nmethod* nm = (nmethod*)cb; nm->do_unloading(is_alive, unloading_occurred); } + } } void CodeCache::blobs_do(CodeBlobClosure* f) { assert_locked_or_safepoint(CodeCache_lock); ! FOR_ALL_ALIVE_BLOBS(cb) { ! FOR_ALL_HEAPS(it) { + FOR_ALL_BLOBS(cb, *it) { + if (cb->is_alive()) { f->do_code_blob(cb); #ifdef ASSERT if (cb->is_nmethod()) ((nmethod*)cb)->verify_scavenge_root_oops(); #endif //ASSERT } + } + } } // Walk the list of methods which might contain non-perm oops. void CodeCache::scavenge_root_nmethods_do(CodeBlobClosure* f) { assert_locked_or_safepoint(CodeCache_lock);
*** 432,480 **** --- 576,621 ---- verify_perm_nmethods(f); } // Temporarily mark nmethods that are claimed to be on the non-perm list. void CodeCache::mark_scavenge_root_nmethods() { ! FOR_ALL_ALIVE_BLOBS(cb) { ! if (cb->is_nmethod()) { ! nmethod *nm = (nmethod*)cb; ! FOR_ALL_METHOD_HEAPS(it) { ! FOR_ALL_ALIVE_BLOBS(cb, *it) { ! nmethod* nm = (nmethod*)cb; assert(nm->scavenge_root_not_marked(), "clean state"); if (nm->on_scavenge_root_list()) nm->set_scavenge_root_marked(); } } } // If the closure is given, run it on the unlisted nmethods. // Also make sure that the effects of mark_scavenge_root_nmethods is gone. void CodeCache::verify_perm_nmethods(CodeBlobClosure* f_or_null) { ! FOR_ALL_ALIVE_BLOBS(cb) { ! FOR_ALL_METHOD_HEAPS(it) { + FOR_ALL_ALIVE_BLOBS(cb, *it) { + nmethod* nm = (nmethod*)cb; bool call_f = (f_or_null != NULL); if (cb->is_nmethod()) { nmethod *nm = (nmethod*)cb; assert(nm->scavenge_root_not_marked(), "must be already processed"); if (nm->on_scavenge_root_list()) call_f = false; // don't show this one to the client nm->verify_scavenge_root_oops(); } else { call_f = false; // not an nmethod + if (call_f) f_or_null->do_code_blob(nm); } if (call_f) f_or_null->do_code_blob(cb); } } #endif //PRODUCT void CodeCache::gc_prologue() { assert(!nmethod::oops_do_marking_is_active(), "oops_do_marking_epilogue must be called"); } void CodeCache::gc_epilogue() { assert_locked_or_safepoint(CodeCache_lock); ! FOR_ALL_ALIVE_BLOBS(cb) { ! if (cb->is_nmethod()) { ! nmethod *nm = (nmethod*)cb; ! FOR_ALL_METHOD_HEAPS(it) { ! FOR_ALL_ALIVE_BLOBS(cb, *it) { ! nmethod* nm = (nmethod*)cb; assert(!nm->is_unloaded(), "Tautology"); if (needs_cache_clean()) { nm->cleanup_inline_caches(); } DEBUG_ONLY(nm->verify());
*** 486,497 **** --- 627,638 ---- assert(!nmethod::oops_do_marking_is_active(), "oops_do_marking_prologue must be called"); #ifdef ASSERT // make sure that we aren't leaking icholders int count = 0; ! FOR_ALL_BLOBS(cb) { ! if (cb->is_nmethod()) { ! FOR_ALL_METHOD_HEAPS(it) { ! FOR_ALL_BLOBS(cb, *it) { RelocIterator iter((nmethod*)cb); while(iter.next()) { if (iter.type() == relocInfo::virtual_call_type) { if (CompiledIC::is_icholder_call_site(iter.virtual_call_reloc())) { CompiledIC *ic = CompiledIC_at(iter.reloc());
*** 510,551 **** --- 651,707 ---- assert(count + InlineCacheBuffer::pending_icholder_count() + CompiledICHolder::live_not_claimed_count() == CompiledICHolder::live_count(), "must agree"); #endif } void CodeCache::verify_oops() { MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag); VerifyOopClosure voc; ! FOR_ALL_ALIVE_BLOBS(cb) { ! if (cb->is_nmethod()) { ! nmethod *nm = (nmethod*)cb; ! FOR_ALL_METHOD_HEAPS(it) { ! FOR_ALL_ALIVE_BLOBS(cb, *it) { ! nmethod* nm = (nmethod*)cb; nm->oops_do(&voc); nm->verify_oop_relocations(); } } } address CodeCache::first_address() { assert_locked_or_safepoint(CodeCache_lock); ! return (address)_heap->low_boundary(); + size_t CodeCache::capacity() { + size_t cap = 0; + FOR_ALL_HEAPS(it) { ! cap += (*it)->capacity(); + } + return cap; } + size_t CodeCache::unallocated_capacity() { + size_t unallocated_cap = 0; + FOR_ALL_HEAPS(it) { + unallocated_cap += (*it)->unallocated_capacity(); + } + return unallocated_cap; + } ! address CodeCache::last_address() { ! assert_locked_or_safepoint(CodeCache_lock); return (address)_heap->high(); ! size_t CodeCache::max_capacity() { ! size_t max_cap = 0; + FOR_ALL_HEAPS(it) { + max_cap += (*it)->max_capacity(); + } + return max_cap; } /** * Returns the reverse free ratio. E.g., if 25% (1/4) of the code cache * is free, reverse_free_ratio() returns 4. */ ! double CodeCache::reverse_free_ratio(int code_blob_type) { ! double unallocated_capacity = (double)(CodeCache::unallocated_capacity() - CodeCacheMinimumFreeSpace); double max_capacity = (double)CodeCache::max_capacity(); ! CodeHeap* heap = get_code_heap(code_blob_type); + if (heap == NULL) { + return 0; + } + double unallocated_capacity = (double)(heap->unallocated_capacity() - CodeCacheMinimumFreeSpace); + double max_capacity = (double)heap->max_capacity(); return max_capacity / unallocated_capacity; } void icache_init();
*** 557,602 **** --- 713,746 ---- assert(CodeCacheSegmentSize >= sizeof(jdouble), "CodeCacheSegmentSize must be large enough to align constants"); // This was originally just a check of the alignment, causing failure, instead, round // the code cache to the page size. In particular, Solaris is moving to a larger // default page size. CodeCacheExpansionSize = round_to(CodeCacheExpansionSize, os::vm_page_size()); InitialCodeCacheSize = round_to(InitialCodeCacheSize, os::vm_page_size()); ReservedCodeCacheSize = round_to(ReservedCodeCacheSize, os::vm_page_size()); if (!_heap->reserve(ReservedCodeCacheSize, InitialCodeCacheSize, CodeCacheSegmentSize)) { vm_exit_during_initialization("Could not reserve enough space for code cache"); } MemoryService::add_code_heap_memory_pool(_heap); + // Reserve space and create heaps + initialize_heaps(); // Initialize ICache flush mechanism // This service is needed for os::register_code_area icache_init(); // Give OS a chance to register generated code area. // This is used on Windows 64 bit platforms to register // Structured Exception Handlers for our generated code. ! os::register_code_area(_heap->low_boundary(), _heap->high_boundary()); ! os::register_code_area((char*)low_bound(), (char*)high_bound()); } void codeCache_init() { CodeCache::initialize(); } //------------------------------------------------------------------------------------------------ int CodeCache::number_of_nmethods_with_dependencies() { return _number_of_nmethods_with_dependencies; } void CodeCache::clear_inline_caches() { assert_locked_or_safepoint(CodeCache_lock); FOR_ALL_ALIVE_NMETHODS(nm) { nm->clear_inline_caches(); } } #ifndef PRODUCT // used to keep track of how much time is spent in mark_for_deoptimization static elapsedTimer dependentCheckTime; static int dependentCheckCount = 0; #endif // PRODUCT
*** 626,646 **** --- 770,793 ---- } if (VerifyDependencies) { // Turn off dependency tracing while actually testing deps. NOT_PRODUCT( FlagSetting fs(TraceDependencies, false) ); ! FOR_ALL_ALIVE_NMETHODS(nm) { ! FOR_ALL_METHOD_HEAPS(it) { + FOR_ALL_ALIVE_BLOBS(cb, *it) { + nmethod* nm = (nmethod*)cb; if (!nm->is_marked_for_deoptimization() && nm->check_all_dependencies()) { ResourceMark rm; tty->print_cr("Should have been marked for deoptimization:"); changes.print(); nm->print(); nm->print_dependencies(); } } } + } #ifndef PRODUCT dependentCheckTime.stop(); #endif // PRODUCT
*** 663,673 **** --- 810,822 ---- nm->mark_for_deoptimization(); number_of_marked_CodeBlobs++; } } ! FOR_ALL_ALIVE_NMETHODS(nm) { ! FOR_ALL_METHOD_HEAPS(it) { + FOR_ALL_ALIVE_BLOBS(cb, *it) { + nmethod* nm = (nmethod*)cb; if (nm->is_marked_for_deoptimization()) { // ...Already marked in the previous pass; don't count it again. } else if (nm->is_evol_dependent_on(dependee())) { ResourceMark rm; nm->mark_for_deoptimization();
*** 675,717 **** --- 824,874 ---- } else { // flush caches in case they refer to a redefined Method* nm->clear_inline_caches(); } } + } return number_of_marked_CodeBlobs; } #endif // HOTSWAP // Deoptimize all methods void CodeCache::mark_all_nmethods_for_deoptimization() { MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag); ! FOR_ALL_ALIVE_NMETHODS(nm) { ! FOR_ALL_METHOD_HEAPS(it) { + FOR_ALL_ALIVE_BLOBS(cb, *it) { + nmethod* nm = (nmethod*)cb; nm->mark_for_deoptimization(); } + } } int CodeCache::mark_for_deoptimization(Method* dependee) { MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag); int number_of_marked_CodeBlobs = 0; ! FOR_ALL_ALIVE_NMETHODS(nm) { ! FOR_ALL_METHOD_HEAPS(it) { + FOR_ALL_ALIVE_BLOBS(cb, *it) { + nmethod* nm = (nmethod*)cb; if (nm->is_dependent_on_method(dependee)) { ResourceMark rm; nm->mark_for_deoptimization(); number_of_marked_CodeBlobs++; } } + } return number_of_marked_CodeBlobs; } void CodeCache::make_marked_nmethods_zombies() { assert(SafepointSynchronize::is_at_safepoint(), "must be at a safepoint"); ! FOR_ALL_ALIVE_NMETHODS(nm) { ! FOR_ALL_METHOD_HEAPS(it) { + FOR_ALL_ALIVE_BLOBS(cb, *it) { + nmethod* nm = (nmethod*)cb; if (nm->is_marked_for_deoptimization()) { // If the nmethod has already been made non-entrant and it can be converted // then zombie it now. Otherwise make it non-entrant and it will eventually // be zombied when it is no longer seen on the stack. Note that the nmethod
*** 724,762 **** --- 881,952 ---- } else { nm->make_not_entrant(); } } } + } } void CodeCache::make_marked_nmethods_not_entrant() { assert_locked_or_safepoint(CodeCache_lock); ! FOR_ALL_ALIVE_NMETHODS(nm) { ! FOR_ALL_METHOD_HEAPS(it) { + FOR_ALL_ALIVE_BLOBS(cb, *it) { + nmethod* nm = (nmethod*)cb; if (nm->is_marked_for_deoptimization()) { nm->make_not_entrant(); } } + } } void CodeCache::verify() { ! _heap->verify(); ! FOR_ALL_ALIVE_BLOBS(p) { ! p->verify(); ! assert_locked_or_safepoint(CodeCache_lock); ! FOR_ALL_HEAPS(it) { ! CodeHeap* heap = *it; + heap->verify(); + FOR_ALL_BLOBS(cb, heap) { + if (cb->is_alive()) { + cb->verify(); + } + } } } void CodeCache::report_codemem_full() { + // A CodeHeap is full. Print out warning and report event. + void CodeCache::report_codemem_full(int code_blob_type) { + // Get nmethod heap for the given CodeBlobType and build CodeCacheFull event + CodeHeap* heap = get_code_heap(code_blob_type); + + if (!heap->was_full()) { + // Not yet reported for this heap, report + heap->report_full(); + warning("CodeHeap for %s is full. Compiler has been disabled.", CodeCache::get_heap_name(code_blob_type)); + warning("Try increasing the code heap size using -XX:%s=", + (code_blob_type == CodeBlobType::MethodNonProfiled) ? "NonProfiledCodeHeapSize" : "ProfiledCodeHeapSize"); + + ResourceMark rm; + stringStream s; + // Dump CodeCache summary into a buffer before locking the tty + { + MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag); + print_summary(&s, true); + } + ttyLocker ttyl; + tty->print(s.as_string()); + } + _codemem_full_count++; EventCodeCacheFull event; if (event.should_commit()) { ! event.set_startAddress((u8)low_bound()); ! event.set_commitedTopAddress((u8)high()); ! event.set_reservedTopAddress((u8)high_bound()); ! event.set_codeBlobType(code_blob_type); ! event.set_startAddress((u8)heap->low_boundary()); ! event.set_commitedTopAddress((u8)heap->high()); + event.set_reservedTopAddress((u8)heap->high_boundary()); event.set_entryCount(nof_blobs()); event.set_methodCount(nof_nmethods()); event.set_adaptorCount(nof_adapters()); ! event.set_unallocatedCapacity(heap->unallocated_capacity()/K); event.set_fullCount(_codemem_full_count); event.commit(); } }
*** 765,775 **** --- 955,967 ---- #ifndef PRODUCT void CodeCache::verify_if_often() { if (VerifyCodeCacheOften) { _heap->verify(); + FOR_ALL_HEAPS(it) { + (*it)->verify(); + } } } void CodeCache::print_trace(const char* event, CodeBlob* cb, int size) { if (PrintCodeCache2) { // Need to add a new flag
*** 794,813 **** --- 986,1009 ---- int nmethodJava = 0; int nmethodNative = 0; int maxCodeSize = 0; ResourceMark rm; ! CodeBlob *cb; ! for (cb = first(); cb != NULL; cb = next(cb)) { ! int i = 0; ! FOR_ALL_HEAPS(it) { + if (Verbose) { + tty->print_cr("## Heap '%s' ##", (*it)->name()); + } + FOR_ALL_BLOBS(cb, *it) { total++; if (cb->is_nmethod()) { nmethod* nm = (nmethod*)cb; if (Verbose && nm->method() != NULL) { ResourceMark rm; char *method_name = nm->method()->name_and_sig_as_C_string(); ! tty->print("%s", method_name); ! tty->print("%s %d", method_name, nm->comp_level()); if(nm->is_alive()) { tty->print_cr(" alive"); } if(nm->is_not_entrant()) { tty->print_cr(" not-entrant"); } if(nm->is_zombie()) { tty->print_cr(" zombie"); } }
*** 815,825 **** --- 1011,1021 ---- if(nm->is_alive()) { nmethodAlive++; } if(nm->is_not_entrant()) { nmethodNotEntrant++; } if(nm->is_zombie()) { nmethodZombie++; } if(nm->is_unloaded()) { nmethodUnloaded++; } ! if(nm->method() != NULL && nm->is_native_method()) { nmethodNative++; } if(nm->method() != NULL && nm->is_java_method()) { nmethodJava++; if (nm->insts_size() > maxCodeSize) { maxCodeSize = nm->insts_size();
*** 835,854 **** --- 1031,1051 ---- adapterCount++; } else if (cb->is_buffer_blob()) { bufferBlobCount++; } } + } int bucketSize = 512; int bucketLimit = maxCodeSize / bucketSize + 1; int *buckets = NEW_C_HEAP_ARRAY(int, bucketLimit, mtCode); memset(buckets,0,sizeof(int) * bucketLimit); ! for (cb = first(); cb != NULL; cb = next(cb)) { ! if (cb->is_nmethod()) { ! FOR_ALL_METHOD_HEAPS(it) { ! FOR_ALL_BLOBS(cb, *it) { nmethod* nm = (nmethod*)cb; ! if(nm->method() != NULL && nm->is_java_method()) { buckets[nm->insts_size() / bucketSize]++; } } } tty->print_cr("Code Cache Entries (total of %d)",total);
*** 866,876 **** --- 1063,1073 ---- tty->print_cr("deoptimization_stubs: %d",deoptimizationStubCount); tty->print_cr("uncommon_traps: %d",uncommonTrapStubCount); tty->print_cr("\nnmethod size distribution (non-zombie java)"); tty->print_cr("-------------------------------------------------"); ! for(int i=0; i<bucketLimit; i++) { ! for(int i = 0; i < bucketLimit; ++i) { if(buckets[i] != 0) { tty->print("%d - %d bytes",i*bucketSize,(i+1)*bucketSize); tty->fill_to(40); tty->print_cr("%d",buckets[i]); }
*** 888,902 **** --- 1085,1101 ---- if (!Verbose) return; CodeBlob_sizes live; CodeBlob_sizes dead; ! FOR_ALL_BLOBS(p) { ! if (!p->is_alive()) { dead.add(p); ! FOR_ALL_HEAPS(it) { ! FOR_ALL_BLOBS(cb, *it) { + if (!cb->is_alive()) { + dead.add(cb); } else { ! live.add(p); ! live.add(cb); + } } } tty->print_cr("CodeCache:");
*** 908,935 **** --- 1107,1135 ---- } if (!dead.is_empty()) { dead.print("dead"); } if (WizardMode) { // print the oop_map usage int code_size = 0; int number_of_blobs = 0; int number_of_oop_maps = 0; int map_size = 0; ! FOR_ALL_BLOBS(p) { ! if (p->is_alive()) { ! FOR_ALL_HEAPS(it) { ! FOR_ALL_BLOBS(cb, *it) { + if (cb->is_alive()) { number_of_blobs++; ! code_size += p->code_size(); ! OopMapSet* set = p->oop_maps(); ! code_size += cb->code_size(); ! OopMapSet* set = cb->oop_maps(); if (set != NULL) { number_of_oop_maps += set->size(); map_size += set->heap_size(); } } } + } tty->print_cr("OopMaps"); tty->print_cr(" #blobs = %d", number_of_blobs); tty->print_cr(" code size = %d", code_size); tty->print_cr(" #oop_maps = %d", number_of_oop_maps); tty->print_cr(" map size = %d", map_size);
*** 937,969 **** --- 1137,1174 ---- #endif // !PRODUCT } void CodeCache::print_summary(outputStream* st, bool detailed) { ! size_t total = (_heap->high_boundary() - _heap->low_boundary()); st->print_cr("CodeCache: size=" SIZE_FORMAT "Kb used=" SIZE_FORMAT ! st->print_cr("CodeCache Summary:"); + FOR_ALL_HEAPS(it) { + CodeHeap* heap = (*it); + size_t total = (heap->high_boundary() - heap->low_boundary()); + st->print_cr("Heap '%s': size=" SIZE_FORMAT "Kb used=" SIZE_FORMAT "Kb max_used=" SIZE_FORMAT "Kb free=" SIZE_FORMAT "Kb", ! heap->name(), total/K, (total - heap->unallocated_capacity())/K, ! maxCodeCacheUsed/K, unallocated_capacity()/K); ! heap->max_allocated_capacity()/K, heap->unallocated_capacity()/K); if (detailed) { st->print_cr(" bounds [" INTPTR_FORMAT ", " INTPTR_FORMAT ", " INTPTR_FORMAT "]", ! _heap->low_boundary(), ! _heap->high(), ! _heap->high_boundary()); st->print_cr(" total_blobs=" UINT32_FORMAT " nmethods=" UINT32_FORMAT " adapters=" UINT32_FORMAT, nof_blobs(), nof_nmethods(), nof_adapters()); ! heap->low_boundary(), ! heap->high(), ! heap->high_boundary()); + + } + } + + if (detailed) { + log_state(st); st->print_cr(" compilation: %s", CompileBroker::should_compile_new_jobs() ? "enabled" : Arguments::mode() == Arguments::_int ? "disabled (interpreter mode)" : "disabled (not enough contiguous free space left)"); } } void CodeCache::log_state(outputStream* st) { st->print(" total_blobs='" UINT32_FORMAT "' nmethods='" UINT32_FORMAT "'" - " adapters='" UINT32_FORMAT "' free_code_cache='" SIZE_FORMAT "'", ! nof_blobs(), nof_nmethods(), nof_adapters(), unallocated_capacity()); ! nof_blobs(), nof_nmethods(), nof_adapters()); }

src/share/vm/code/codeCache.cpp
Index Unified diffs Context diffs Sdiffs Patch New Old Previous File Next File