src/share/vm/code/codeCache.cpp
Index Unified diffs Context diffs Sdiffs Patch New Old Previous File Next File hotspot Cdiff src/share/vm/code/codeCache.cpp

src/share/vm/code/codeCache.cpp

Print this page

        

*** 42,51 **** --- 42,52 ---- #include "runtime/handles.inline.hpp" #include "runtime/arguments.hpp" #include "runtime/icache.hpp" #include "runtime/java.hpp" #include "runtime/mutexLocker.hpp" + #include "runtime/sweeper.hpp" #include "runtime/compilationPolicy.hpp" #include "services/memoryService.hpp" #include "trace/tracing.hpp" #include "utilities/xmlstream.hpp" #ifdef COMPILER1
*** 190,200 **** FLAG_SET_ERGO(uintx, NonNMethodCodeHeapSize, NonNMethodCodeHeapSize + NonProfiledCodeHeapSize); FLAG_SET_ERGO(uintx, NonProfiledCodeHeapSize, 0); } // Make sure we have enough space for VM internal code ! uint min_code_cache_size = (CodeCacheMinimumUseSpace DEBUG_ONLY(* 3)) + CodeCacheMinimumFreeSpace; if (NonNMethodCodeHeapSize < (min_code_cache_size + code_buffers_size)) { vm_exit_during_initialization("Not enough space in non-nmethod code heap to run VM."); } guarantee(NonProfiledCodeHeapSize + ProfiledCodeHeapSize + NonNMethodCodeHeapSize <= ReservedCodeCacheSize, "Size check"); --- 191,201 ---- FLAG_SET_ERGO(uintx, NonNMethodCodeHeapSize, NonNMethodCodeHeapSize + NonProfiledCodeHeapSize); FLAG_SET_ERGO(uintx, NonProfiledCodeHeapSize, 0); } // Make sure we have enough space for VM internal code ! uint min_code_cache_size = CodeCacheMinimumUseSpace DEBUG_ONLY(* 3); if (NonNMethodCodeHeapSize < (min_code_cache_size + code_buffers_size)) { vm_exit_during_initialization("Not enough space in non-nmethod code heap to run VM."); } guarantee(NonProfiledCodeHeapSize + ProfiledCodeHeapSize + NonNMethodCodeHeapSize <= ReservedCodeCacheSize, "Size check");
*** 331,365 **** CodeBlob* CodeCache::next_blob(CodeBlob* cb) { return next_blob(get_code_heap(cb), cb); } ! CodeBlob* CodeCache::allocate(int size, int code_blob_type, bool is_critical) { ! // Do not seize the CodeCache lock here--if the caller has not ! // already done so, we are going to lose bigtime, since the code ! // cache will contain a garbage CodeBlob until the caller can ! // run the constructor for the CodeBlob subclass he is busy ! // instantiating. assert_locked_or_safepoint(CodeCache_lock); ! assert(size > 0, "allocation request must be reasonable"); if (size <= 0) { return NULL; } CodeBlob* cb = NULL; // Get CodeHeap for the given CodeBlobType CodeHeap* heap = get_code_heap(code_blob_type); assert(heap != NULL, "heap is null"); while (true) { ! cb = (CodeBlob*)heap->allocate(size, is_critical); if (cb != NULL) break; if (!heap->expand_by(CodeCacheExpansionSize)) { // Expansion failed if (SegmentedCodeCache && (code_blob_type == CodeBlobType::NonNMethod)) { // Fallback solution: Store non-nmethod code in the non-profiled code heap ! return allocate(size, CodeBlobType::MethodNonProfiled, is_critical); } return NULL; } if (PrintCodeCacheExtension) { ResourceMark rm; --- 332,372 ---- CodeBlob* CodeCache::next_blob(CodeBlob* cb) { return next_blob(get_code_heap(cb), cb); } ! /** ! * Do not seize the CodeCache lock here--if the caller has not ! * already done so, we are going to lose bigtime, since the code ! * cache will contain a garbage CodeBlob until the caller can ! * run the constructor for the CodeBlob subclass he is busy ! * instantiating. ! */ ! CodeBlob* CodeCache::allocate(int size, int code_blob_type) { ! NMethodSweeper::notify(); assert_locked_or_safepoint(CodeCache_lock); ! assert(size > 0, "Code cache allocation request must be > 0"); if (size <= 0) { return NULL; } CodeBlob* cb = NULL; // Get CodeHeap for the given CodeBlobType CodeHeap* heap = get_code_heap(code_blob_type); assert(heap != NULL, "heap is null"); while (true) { ! cb = (CodeBlob*)heap->allocate(size); if (cb != NULL) break; if (!heap->expand_by(CodeCacheExpansionSize)) { + MutexUnlockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag); + CompileBroker::handle_full_code_cache(code_blob_type); + // Expansion failed if (SegmentedCodeCache && (code_blob_type == CodeBlobType::NonNMethod)) { // Fallback solution: Store non-nmethod code in the non-profiled code heap ! return allocate(size, CodeBlobType::MethodNonProfiled); } return NULL; } if (PrintCodeCacheExtension) { ResourceMark rm;
*** 754,785 **** } return max_cap; } /** - * Returns true if a CodeHeap is full and sets code_blob_type accordingly. - */ - bool CodeCache::is_full(int* code_blob_type) { - FOR_ALL_HEAPS(heap) { - if ((*heap)->unallocated_capacity() < CodeCacheMinimumFreeSpace) { - *code_blob_type = (*heap)->code_blob_type(); - return true; - } - } - return false; - } - - /** * Returns the reverse free ratio. E.g., if 25% (1/4) of the code heap * is free, reverse_free_ratio() returns 4. */ double CodeCache::reverse_free_ratio(int code_blob_type) { CodeHeap* heap = get_code_heap(code_blob_type); if (heap == NULL) { return 0; } ! double unallocated_capacity = (double)(heap->unallocated_capacity() - CodeCacheMinimumFreeSpace); double max_capacity = (double)heap->max_capacity(); return max_capacity / unallocated_capacity; } size_t CodeCache::bytes_allocated_in_freelists() { --- 761,779 ---- } return max_cap; } /** * Returns the reverse free ratio. E.g., if 25% (1/4) of the code heap * is free, reverse_free_ratio() returns 4. */ double CodeCache::reverse_free_ratio(int code_blob_type) { CodeHeap* heap = get_code_heap(code_blob_type); if (heap == NULL) { return 0; } ! double unallocated_capacity = (double)CodeCache::unallocated_capacity() + 1; // Avoid division by 0 double max_capacity = (double)heap->max_capacity(); return max_capacity / unallocated_capacity; } size_t CodeCache::bytes_allocated_in_freelists() {
src/share/vm/code/codeCache.cpp
Index Unified diffs Context diffs Sdiffs Patch New Old Previous File Next File