src/share/vm/code/codeCache.cpp
Index Unified diffs Context diffs Sdiffs Patch New Old Previous File Next File hotspot Cdiff src/share/vm/code/codeCache.cpp

src/share/vm/code/codeCache.cpp

Print this page

        

*** 42,51 **** --- 42,52 ---- #include "runtime/handles.inline.hpp" #include "runtime/arguments.hpp" #include "runtime/icache.hpp" #include "runtime/java.hpp" #include "runtime/mutexLocker.hpp" + #include "runtime/sweeper.hpp" #include "runtime/compilationPolicy.hpp" #include "services/memoryService.hpp" #include "trace/tracing.hpp" #include "utilities/xmlstream.hpp" #ifdef COMPILER1
*** 190,200 **** FLAG_SET_ERGO(uintx, NonNMethodCodeHeapSize, NonNMethodCodeHeapSize + NonProfiledCodeHeapSize); FLAG_SET_ERGO(uintx, NonProfiledCodeHeapSize, 0); } // Make sure we have enough space for VM internal code ! uint min_code_cache_size = (CodeCacheMinimumUseSpace DEBUG_ONLY(* 3)) + CodeCacheMinimumFreeSpace; if (NonNMethodCodeHeapSize < (min_code_cache_size + code_buffers_size)) { vm_exit_during_initialization("Not enough space in non-nmethod code heap to run VM."); } guarantee(NonProfiledCodeHeapSize + ProfiledCodeHeapSize + NonNMethodCodeHeapSize <= ReservedCodeCacheSize, "Size check"); --- 191,201 ---- FLAG_SET_ERGO(uintx, NonNMethodCodeHeapSize, NonNMethodCodeHeapSize + NonProfiledCodeHeapSize); FLAG_SET_ERGO(uintx, NonProfiledCodeHeapSize, 0); } // Make sure we have enough space for VM internal code ! uint min_code_cache_size = CodeCacheMinimumUseSpace DEBUG_ONLY(* 3); if (NonNMethodCodeHeapSize < (min_code_cache_size + code_buffers_size)) { vm_exit_during_initialization("Not enough space in non-nmethod code heap to run VM."); } guarantee(NonProfiledCodeHeapSize + ProfiledCodeHeapSize + NonNMethodCodeHeapSize <= ReservedCodeCacheSize, "Size check");
*** 331,366 **** CodeBlob* CodeCache::next_blob(CodeBlob* cb) { return next_blob(get_code_heap(cb), cb); } ! CodeBlob* CodeCache::allocate(int size, int code_blob_type, bool is_critical) { ! // Do not seize the CodeCache lock here--if the caller has not ! // already done so, we are going to lose bigtime, since the code ! // cache will contain a garbage CodeBlob until the caller can ! // run the constructor for the CodeBlob subclass he is busy ! // instantiating. assert_locked_or_safepoint(CodeCache_lock); ! assert(size > 0, "allocation request must be reasonable"); if (size <= 0) { return NULL; } CodeBlob* cb = NULL; // Get CodeHeap for the given CodeBlobType CodeHeap* heap = get_code_heap(code_blob_type); assert(heap != NULL, "heap is null"); while (true) { ! cb = (CodeBlob*)heap->allocate(size, is_critical); if (cb != NULL) break; if (!heap->expand_by(CodeCacheExpansionSize)) { // Expansion failed if (SegmentedCodeCache && (code_blob_type == CodeBlobType::NonNMethod)) { ! // Fallback solution: Store non-nmethod code in the non-profiled code heap ! return allocate(size, CodeBlobType::MethodNonProfiled, is_critical); } return NULL; } if (PrintCodeCacheExtension) { ResourceMark rm; if (SegmentedCodeCache) { --- 332,375 ---- CodeBlob* CodeCache::next_blob(CodeBlob* cb) { return next_blob(get_code_heap(cb), cb); } ! /** ! * Do not seize the CodeCache lock here--if the caller has not ! * already done so, we are going to lose bigtime, since the code ! * cache will contain a garbage CodeBlob until the caller can ! * run the constructor for the CodeBlob subclass he is busy ! * instantiating. ! */ ! CodeBlob* CodeCache::allocate(int size, int code_blob_type) { ! // Possibly wakes up the sweeper thread. ! NMethodSweeper::notify(code_blob_type); assert_locked_or_safepoint(CodeCache_lock); ! assert(size > 0, err_msg_res("Code cache allocation request must be > 0 but is %d", size)); if (size <= 0) { return NULL; } CodeBlob* cb = NULL; // Get CodeHeap for the given CodeBlobType CodeHeap* heap = get_code_heap(code_blob_type); assert(heap != NULL, "heap is null"); while (true) { ! cb = (CodeBlob*)heap->allocate(size); if (cb != NULL) break; if (!heap->expand_by(CodeCacheExpansionSize)) { // Expansion failed if (SegmentedCodeCache && (code_blob_type == CodeBlobType::NonNMethod)) { ! // Fallback solution: Store non-nmethod code in the non-profiled code heap. ! // Note that at in the sweeper, we check the reverse_free_ratio of the non-profiled ! // code heap and force stack scanning if less than 10% if the code heap are free. ! return allocate(size, CodeBlobType::MethodNonProfiled); } + MutexUnlockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag); + CompileBroker::handle_full_code_cache(code_blob_type); return NULL; } if (PrintCodeCacheExtension) { ResourceMark rm; if (SegmentedCodeCache) {
*** 754,787 **** } return max_cap; } /** - * Returns true if a CodeHeap is full and sets code_blob_type accordingly. - */ - bool CodeCache::is_full(int* code_blob_type) { - FOR_ALL_HEAPS(heap) { - if ((*heap)->unallocated_capacity() < CodeCacheMinimumFreeSpace) { - *code_blob_type = (*heap)->code_blob_type(); - return true; - } - } - return false; - } - - /** * Returns the reverse free ratio. E.g., if 25% (1/4) of the code heap * is free, reverse_free_ratio() returns 4. */ double CodeCache::reverse_free_ratio(int code_blob_type) { CodeHeap* heap = get_code_heap(code_blob_type); if (heap == NULL) { return 0; } ! double unallocated_capacity = (double)(heap->unallocated_capacity() - CodeCacheMinimumFreeSpace); double max_capacity = (double)heap->max_capacity(); ! return max_capacity / unallocated_capacity; } size_t CodeCache::bytes_allocated_in_freelists() { size_t allocated_bytes = 0; FOR_ALL_HEAPS(heap) { --- 763,787 ---- } return max_cap; } /** * Returns the reverse free ratio. E.g., if 25% (1/4) of the code heap * is free, reverse_free_ratio() returns 4. */ double CodeCache::reverse_free_ratio(int code_blob_type) { CodeHeap* heap = get_code_heap(code_blob_type); if (heap == NULL) { return 0; } ! ! double unallocated_capacity = MAX2((double)heap->unallocated_capacity(), 1.0); // Avoid division by 0; double max_capacity = (double)heap->max_capacity(); ! double result = max_capacity / unallocated_capacity; ! assert (max_capacity >= unallocated_capacity, "Must be"); ! assert (result >= 1.0, err_msg_res("reverse_free_ratio must be at least 1. It is %f", result)); ! return result; } size_t CodeCache::bytes_allocated_in_freelists() { size_t allocated_bytes = 0; FOR_ALL_HEAPS(heap) {
src/share/vm/code/codeCache.cpp
Index Unified diffs Context diffs Sdiffs Patch New Old Previous File Next File