27 #include "code/codeCache.hpp"
28 #include "code/compiledIC.hpp"
29 #include "code/dependencies.hpp"
30 #include "code/icBuffer.hpp"
31 #include "code/nmethod.hpp"
32 #include "code/pcDesc.hpp"
33 #include "compiler/compileBroker.hpp"
34 #include "gc_implementation/shared/markSweep.hpp"
35 #include "memory/allocation.inline.hpp"
36 #include "memory/gcLocker.hpp"
37 #include "memory/iterator.hpp"
38 #include "memory/resourceArea.hpp"
39 #include "oops/method.hpp"
40 #include "oops/objArrayOop.hpp"
41 #include "oops/oop.inline.hpp"
42 #include "runtime/handles.inline.hpp"
43 #include "runtime/arguments.hpp"
44 #include "runtime/icache.hpp"
45 #include "runtime/java.hpp"
46 #include "runtime/mutexLocker.hpp"
47 #include "runtime/compilationPolicy.hpp"
48 #include "services/memoryService.hpp"
49 #include "trace/tracing.hpp"
50 #include "utilities/xmlstream.hpp"
51 #ifdef COMPILER1
52 #include "c1/c1_Compilation.hpp"
53 #include "c1/c1_Compiler.hpp"
54 #endif
55 #ifdef COMPILER2
56 #include "opto/c2compiler.hpp"
57 #include "opto/compile.hpp"
58 #include "opto/node.hpp"
59 #endif
60
61 // Helper class for printing in CodeCache
62 class CodeBlob_sizes {
63 private:
64 int count;
65 int total_size;
66 int header_size;
175 } else {
176 // Use all space for the non-nmethod heap and set other heaps to minimal size
177 FLAG_SET_ERGO(uintx, NonNMethodCodeHeapSize, ReservedCodeCacheSize - os::vm_page_size() * 2);
178 FLAG_SET_ERGO(uintx, ProfiledCodeHeapSize, os::vm_page_size());
179 FLAG_SET_ERGO(uintx, NonProfiledCodeHeapSize, os::vm_page_size());
180 }
181 }
182
183 // We do not need the profiled CodeHeap, use all space for the non-profiled CodeHeap
184 if(!heap_available(CodeBlobType::MethodProfiled)) {
185 FLAG_SET_ERGO(uintx, NonProfiledCodeHeapSize, NonProfiledCodeHeapSize + ProfiledCodeHeapSize);
186 FLAG_SET_ERGO(uintx, ProfiledCodeHeapSize, 0);
187 }
188 // We do not need the non-profiled CodeHeap, use all space for the non-nmethod CodeHeap
189 if(!heap_available(CodeBlobType::MethodNonProfiled)) {
190 FLAG_SET_ERGO(uintx, NonNMethodCodeHeapSize, NonNMethodCodeHeapSize + NonProfiledCodeHeapSize);
191 FLAG_SET_ERGO(uintx, NonProfiledCodeHeapSize, 0);
192 }
193
194 // Make sure we have enough space for VM internal code
195 uint min_code_cache_size = (CodeCacheMinimumUseSpace DEBUG_ONLY(* 3)) + CodeCacheMinimumFreeSpace;
196 if (NonNMethodCodeHeapSize < (min_code_cache_size + code_buffers_size)) {
197 vm_exit_during_initialization("Not enough space in non-nmethod code heap to run VM.");
198 }
199 guarantee(NonProfiledCodeHeapSize + ProfiledCodeHeapSize + NonNMethodCodeHeapSize <= ReservedCodeCacheSize, "Size check");
200
201 // Align reserved sizes of CodeHeaps
202 size_t non_method_size = ReservedCodeSpace::allocation_align_size_up(NonNMethodCodeHeapSize);
203 size_t profiled_size = ReservedCodeSpace::allocation_align_size_up(ProfiledCodeHeapSize);
204 size_t non_profiled_size = ReservedCodeSpace::allocation_align_size_up(NonProfiledCodeHeapSize);
205
206 // Compute initial sizes of CodeHeaps
207 size_t init_non_method_size = MIN2(InitialCodeCacheSize, non_method_size);
208 size_t init_profiled_size = MIN2(InitialCodeCacheSize, profiled_size);
209 size_t init_non_profiled_size = MIN2(InitialCodeCacheSize, non_profiled_size);
210
211 // Reserve one continuous chunk of memory for CodeHeaps and split it into
212 // parts for the individual heaps. The memory layout looks like this:
213 // ---------- high -----------
214 // Non-profiled nmethods
215 // Profiled nmethods
316 }
317
318 CodeBlob* CodeCache::first_blob(int code_blob_type) {
319 if (heap_available(code_blob_type)) {
320 return first_blob(get_code_heap(code_blob_type));
321 } else {
322 return NULL;
323 }
324 }
325
326 CodeBlob* CodeCache::next_blob(CodeHeap* heap, CodeBlob* cb) {
327 assert_locked_or_safepoint(CodeCache_lock);
328 assert(heap != NULL, "heap is null");
329 return (CodeBlob*)heap->next(cb);
330 }
331
332 CodeBlob* CodeCache::next_blob(CodeBlob* cb) {
333 return next_blob(get_code_heap(cb), cb);
334 }
335
336 CodeBlob* CodeCache::allocate(int size, int code_blob_type, bool is_critical) {
337 // Do not seize the CodeCache lock here--if the caller has not
338 // already done so, we are going to lose bigtime, since the code
339 // cache will contain a garbage CodeBlob until the caller can
340 // run the constructor for the CodeBlob subclass he is busy
341 // instantiating.
342 assert_locked_or_safepoint(CodeCache_lock);
343 assert(size > 0, "allocation request must be reasonable");
344 if (size <= 0) {
345 return NULL;
346 }
347 CodeBlob* cb = NULL;
348
349 // Get CodeHeap for the given CodeBlobType
350 CodeHeap* heap = get_code_heap(code_blob_type);
351 assert(heap != NULL, "heap is null");
352
353 while (true) {
354 cb = (CodeBlob*)heap->allocate(size, is_critical);
355 if (cb != NULL) break;
356 if (!heap->expand_by(CodeCacheExpansionSize)) {
357 // Expansion failed
358 if (SegmentedCodeCache && (code_blob_type == CodeBlobType::NonNMethod)) {
359 // Fallback solution: Store non-nmethod code in the non-profiled code heap
360 return allocate(size, CodeBlobType::MethodNonProfiled, is_critical);
361 }
362 return NULL;
363 }
364 if (PrintCodeCacheExtension) {
365 ResourceMark rm;
366 if (SegmentedCodeCache) {
367 tty->print("%s", heap->name());
368 } else {
369 tty->print("CodeCache");
370 }
371 tty->print_cr(" extended to [" INTPTR_FORMAT ", " INTPTR_FORMAT "] (" SSIZE_FORMAT " bytes)",
372 (intptr_t)heap->low_boundary(), (intptr_t)heap->high(),
373 (address)heap->high() - (address)heap->low_boundary());
374 }
375 }
376 print_trace("allocation", cb, size);
377 _number_of_blobs++;
378 return cb;
379 }
380
381 void CodeCache::free(CodeBlob* cb) {
739 return (heap != NULL) ? heap->unallocated_capacity() : 0;
740 }
741
742 size_t CodeCache::unallocated_capacity() {
743 size_t unallocated_cap = 0;
744 FOR_ALL_HEAPS(heap) {
745 unallocated_cap += (*heap)->unallocated_capacity();
746 }
747 return unallocated_cap;
748 }
749
750 size_t CodeCache::max_capacity() {
751 size_t max_cap = 0;
752 FOR_ALL_HEAPS(heap) {
753 max_cap += (*heap)->max_capacity();
754 }
755 return max_cap;
756 }
757
758 /**
759 * Returns true if a CodeHeap is full and sets code_blob_type accordingly.
760 */
761 bool CodeCache::is_full(int* code_blob_type) {
762 FOR_ALL_HEAPS(heap) {
763 if ((*heap)->unallocated_capacity() < CodeCacheMinimumFreeSpace) {
764 *code_blob_type = (*heap)->code_blob_type();
765 return true;
766 }
767 }
768 return false;
769 }
770
771 /**
772 * Returns the reverse free ratio. E.g., if 25% (1/4) of the code heap
773 * is free, reverse_free_ratio() returns 4.
774 */
775 double CodeCache::reverse_free_ratio(int code_blob_type) {
776 CodeHeap* heap = get_code_heap(code_blob_type);
777 if (heap == NULL) {
778 return 0;
779 }
780 double unallocated_capacity = (double)(heap->unallocated_capacity() - CodeCacheMinimumFreeSpace);
781 double max_capacity = (double)heap->max_capacity();
782 return max_capacity / unallocated_capacity;
783 }
784
785 size_t CodeCache::bytes_allocated_in_freelists() {
786 size_t allocated_bytes = 0;
787 FOR_ALL_HEAPS(heap) {
788 allocated_bytes += (*heap)->allocated_in_freelist();
789 }
790 return allocated_bytes;
791 }
792
793 int CodeCache::allocated_segments() {
794 int number_of_segments = 0;
795 FOR_ALL_HEAPS(heap) {
796 number_of_segments += (*heap)->allocated_segments();
797 }
798 return number_of_segments;
799 }
800
801 size_t CodeCache::freelists_length() {
802 size_t length = 0;
|
27 #include "code/codeCache.hpp"
28 #include "code/compiledIC.hpp"
29 #include "code/dependencies.hpp"
30 #include "code/icBuffer.hpp"
31 #include "code/nmethod.hpp"
32 #include "code/pcDesc.hpp"
33 #include "compiler/compileBroker.hpp"
34 #include "gc_implementation/shared/markSweep.hpp"
35 #include "memory/allocation.inline.hpp"
36 #include "memory/gcLocker.hpp"
37 #include "memory/iterator.hpp"
38 #include "memory/resourceArea.hpp"
39 #include "oops/method.hpp"
40 #include "oops/objArrayOop.hpp"
41 #include "oops/oop.inline.hpp"
42 #include "runtime/handles.inline.hpp"
43 #include "runtime/arguments.hpp"
44 #include "runtime/icache.hpp"
45 #include "runtime/java.hpp"
46 #include "runtime/mutexLocker.hpp"
47 #include "runtime/sweeper.hpp"
48 #include "runtime/compilationPolicy.hpp"
49 #include "services/memoryService.hpp"
50 #include "trace/tracing.hpp"
51 #include "utilities/xmlstream.hpp"
52 #ifdef COMPILER1
53 #include "c1/c1_Compilation.hpp"
54 #include "c1/c1_Compiler.hpp"
55 #endif
56 #ifdef COMPILER2
57 #include "opto/c2compiler.hpp"
58 #include "opto/compile.hpp"
59 #include "opto/node.hpp"
60 #endif
61
62 // Helper class for printing in CodeCache
63 class CodeBlob_sizes {
64 private:
65 int count;
66 int total_size;
67 int header_size;
176 } else {
177 // Use all space for the non-nmethod heap and set other heaps to minimal size
178 FLAG_SET_ERGO(uintx, NonNMethodCodeHeapSize, ReservedCodeCacheSize - os::vm_page_size() * 2);
179 FLAG_SET_ERGO(uintx, ProfiledCodeHeapSize, os::vm_page_size());
180 FLAG_SET_ERGO(uintx, NonProfiledCodeHeapSize, os::vm_page_size());
181 }
182 }
183
184 // We do not need the profiled CodeHeap, use all space for the non-profiled CodeHeap
185 if(!heap_available(CodeBlobType::MethodProfiled)) {
186 FLAG_SET_ERGO(uintx, NonProfiledCodeHeapSize, NonProfiledCodeHeapSize + ProfiledCodeHeapSize);
187 FLAG_SET_ERGO(uintx, ProfiledCodeHeapSize, 0);
188 }
189 // We do not need the non-profiled CodeHeap, use all space for the non-nmethod CodeHeap
190 if(!heap_available(CodeBlobType::MethodNonProfiled)) {
191 FLAG_SET_ERGO(uintx, NonNMethodCodeHeapSize, NonNMethodCodeHeapSize + NonProfiledCodeHeapSize);
192 FLAG_SET_ERGO(uintx, NonProfiledCodeHeapSize, 0);
193 }
194
195 // Make sure we have enough space for VM internal code
196 uint min_code_cache_size = CodeCacheMinimumUseSpace DEBUG_ONLY(* 3);
197 if (NonNMethodCodeHeapSize < (min_code_cache_size + code_buffers_size)) {
198 vm_exit_during_initialization("Not enough space in non-nmethod code heap to run VM.");
199 }
200 guarantee(NonProfiledCodeHeapSize + ProfiledCodeHeapSize + NonNMethodCodeHeapSize <= ReservedCodeCacheSize, "Size check");
201
202 // Align reserved sizes of CodeHeaps
203 size_t non_method_size = ReservedCodeSpace::allocation_align_size_up(NonNMethodCodeHeapSize);
204 size_t profiled_size = ReservedCodeSpace::allocation_align_size_up(ProfiledCodeHeapSize);
205 size_t non_profiled_size = ReservedCodeSpace::allocation_align_size_up(NonProfiledCodeHeapSize);
206
207 // Compute initial sizes of CodeHeaps
208 size_t init_non_method_size = MIN2(InitialCodeCacheSize, non_method_size);
209 size_t init_profiled_size = MIN2(InitialCodeCacheSize, profiled_size);
210 size_t init_non_profiled_size = MIN2(InitialCodeCacheSize, non_profiled_size);
211
212 // Reserve one continuous chunk of memory for CodeHeaps and split it into
213 // parts for the individual heaps. The memory layout looks like this:
214 // ---------- high -----------
215 // Non-profiled nmethods
216 // Profiled nmethods
317 }
318
319 CodeBlob* CodeCache::first_blob(int code_blob_type) {
320 if (heap_available(code_blob_type)) {
321 return first_blob(get_code_heap(code_blob_type));
322 } else {
323 return NULL;
324 }
325 }
326
327 CodeBlob* CodeCache::next_blob(CodeHeap* heap, CodeBlob* cb) {
328 assert_locked_or_safepoint(CodeCache_lock);
329 assert(heap != NULL, "heap is null");
330 return (CodeBlob*)heap->next(cb);
331 }
332
333 CodeBlob* CodeCache::next_blob(CodeBlob* cb) {
334 return next_blob(get_code_heap(cb), cb);
335 }
336
337 /**
338 * Do not seize the CodeCache lock here--if the caller has not
339 * already done so, we are going to lose bigtime, since the code
340 * cache will contain a garbage CodeBlob until the caller can
341 * run the constructor for the CodeBlob subclass he is busy
342 * instantiating.
343 */
344 CodeBlob* CodeCache::allocate(int size, int code_blob_type) {
345 // Possibly wakes up the sweeper thread.
346 NMethodSweeper::notify(code_blob_type);
347 assert_locked_or_safepoint(CodeCache_lock);
348 assert(size > 0, err_msg_res("Code cache allocation request must be > 0 but is %d", size));
349 if (size <= 0) {
350 return NULL;
351 }
352 CodeBlob* cb = NULL;
353
354 // Get CodeHeap for the given CodeBlobType
355 CodeHeap* heap = get_code_heap(code_blob_type);
356 assert(heap != NULL, "heap is null");
357
358 while (true) {
359 cb = (CodeBlob*)heap->allocate(size);
360 if (cb != NULL) break;
361 if (!heap->expand_by(CodeCacheExpansionSize)) {
362 // Expansion failed
363 if (SegmentedCodeCache && (code_blob_type == CodeBlobType::NonNMethod)) {
364 // Fallback solution: Store non-nmethod code in the non-profiled code heap.
365 // Note that at in the sweeper, we check the reverse_free_ratio of the non-profiled
366 // code heap and force stack scanning if less than 10% if the code heap are free.
367 return allocate(size, CodeBlobType::MethodNonProfiled);
368 }
369 MutexUnlockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
370 CompileBroker::handle_full_code_cache(code_blob_type);
371 return NULL;
372 }
373 if (PrintCodeCacheExtension) {
374 ResourceMark rm;
375 if (SegmentedCodeCache) {
376 tty->print("%s", heap->name());
377 } else {
378 tty->print("CodeCache");
379 }
380 tty->print_cr(" extended to [" INTPTR_FORMAT ", " INTPTR_FORMAT "] (" SSIZE_FORMAT " bytes)",
381 (intptr_t)heap->low_boundary(), (intptr_t)heap->high(),
382 (address)heap->high() - (address)heap->low_boundary());
383 }
384 }
385 print_trace("allocation", cb, size);
386 _number_of_blobs++;
387 return cb;
388 }
389
390 void CodeCache::free(CodeBlob* cb) {
748 return (heap != NULL) ? heap->unallocated_capacity() : 0;
749 }
750
751 size_t CodeCache::unallocated_capacity() {
752 size_t unallocated_cap = 0;
753 FOR_ALL_HEAPS(heap) {
754 unallocated_cap += (*heap)->unallocated_capacity();
755 }
756 return unallocated_cap;
757 }
758
759 size_t CodeCache::max_capacity() {
760 size_t max_cap = 0;
761 FOR_ALL_HEAPS(heap) {
762 max_cap += (*heap)->max_capacity();
763 }
764 return max_cap;
765 }
766
767 /**
768 * Returns the reverse free ratio. E.g., if 25% (1/4) of the code heap
769 * is free, reverse_free_ratio() returns 4.
770 */
771 double CodeCache::reverse_free_ratio(int code_blob_type) {
772 CodeHeap* heap = get_code_heap(code_blob_type);
773 if (heap == NULL) {
774 return 0;
775 }
776
777 double unallocated_capacity = MAX2((double)heap->unallocated_capacity(), 1.0); // Avoid division by 0;
778 double max_capacity = (double)heap->max_capacity();
779 double result = max_capacity / unallocated_capacity;
780 assert (max_capacity >= unallocated_capacity, "Must be");
781 assert (result >= 1.0, err_msg_res("reverse_free_ratio must be at least 1. It is %f", result));
782 return result;
783 }
784
785 size_t CodeCache::bytes_allocated_in_freelists() {
786 size_t allocated_bytes = 0;
787 FOR_ALL_HEAPS(heap) {
788 allocated_bytes += (*heap)->allocated_in_freelist();
789 }
790 return allocated_bytes;
791 }
792
793 int CodeCache::allocated_segments() {
794 int number_of_segments = 0;
795 FOR_ALL_HEAPS(heap) {
796 number_of_segments += (*heap)->allocated_segments();
797 }
798 return number_of_segments;
799 }
800
801 size_t CodeCache::freelists_length() {
802 size_t length = 0;
|