128 scopes_pcs_size += nm->scopes_pcs_size();
129 } else {
130 code_size += cb->code_size();
131 }
132 }
133 };
134
135 // Iterate over all CodeHeaps
136 #define FOR_ALL_HEAPS(heap) for (GrowableArrayIterator<CodeHeap*> heap = _heaps->begin(); heap != _heaps->end(); ++heap)
137 #define FOR_ALL_NMETHOD_HEAPS(heap) for (GrowableArrayIterator<CodeHeap*> heap = _nmethod_heaps->begin(); heap != _nmethod_heaps->end
138 #define FOR_ALL_ALLOCABLE_HEAPS(heap) for (GrowableArrayIterator<CodeHeap*> heap = _allocable_heaps->begin(); heap != _allocable_heap
139
140 // Iterate over all CodeBlobs (cb) on the given CodeHeap
141 #define FOR_ALL_BLOBS(cb, heap) for (CodeBlob* cb = first_blob(heap); cb != NULL; cb = next_blob(heap, cb))
142
143 address CodeCache::_low_bound = 0;
144 address CodeCache::_high_bound = 0;
145 int CodeCache::_number_of_nmethods_with_dependencies = 0;
146 bool CodeCache::_needs_cache_clean = false;
147 nmethod* CodeCache::_scavenge_root_nmethods = NULL;
148
149 // Initialize arrays of CodeHeap subsets
150 GrowableArray<CodeHeap*>* CodeCache::_heaps = new(ResourceObj::C_HEAP, mtCode) GrowableArray<CodeHeap*> (CodeBlobType::All, true);
151 GrowableArray<CodeHeap*>* CodeCache::_compiled_heaps = new(ResourceObj::C_HEAP, mtCode) GrowableArray<CodeHeap*> (CodeBlobType::All,
152 GrowableArray<CodeHeap*>* CodeCache::_nmethod_heaps = new(ResourceObj::C_HEAP, mtCode) GrowableArray<CodeHeap*> (CodeBlobType::All, t
153 GrowableArray<CodeHeap*>* CodeCache::_allocable_heaps = new(ResourceObj::C_HEAP, mtCode) GrowableArray<CodeHeap*> (CodeBlobType::All,
154
155 void CodeCache::check_heap_sizes(size_t non_nmethod_size, size_t profiled_size, size_t non_profiled_size, size_t cache_size, bool all
156 size_t total_size = non_nmethod_size + profiled_size + non_profiled_size;
157 // Prepare error message
158 const char* error = "Invalid code heap sizes";
159 err_msg message("NonNMethodCodeHeapSize (" SIZE_FORMAT "K) + ProfiledCodeHeapSize (" SIZE_FORMAT "K)"
160 " + NonProfiledCodeHeapSize (" SIZE_FORMAT "K) = " SIZE_FORMAT "K",
161 non_nmethod_size/K, profiled_size/K, non_profiled_size/K, total_size/K);
162
163 if (total_size > cache_size) {
164 // Some code heap sizes were explicitly set: total_size must be <= cache_size
165 message.append(" is greater than ReservedCodeCacheSize (" SIZE_FORMAT "K).", cache_size/K);
166 vm_exit_during_initialization(error, message);
|
128 scopes_pcs_size += nm->scopes_pcs_size();
129 } else {
130 code_size += cb->code_size();
131 }
132 }
133 };
134
135 // Iterate over all CodeHeaps
136 #define FOR_ALL_HEAPS(heap) for (GrowableArrayIterator<CodeHeap*> heap = _heaps->begin(); heap != _heaps->end(); ++heap)
137 #define FOR_ALL_NMETHOD_HEAPS(heap) for (GrowableArrayIterator<CodeHeap*> heap = _nmethod_heaps->begin(); heap != _nmethod_heaps->end
138 #define FOR_ALL_ALLOCABLE_HEAPS(heap) for (GrowableArrayIterator<CodeHeap*> heap = _allocable_heaps->begin(); heap != _allocable_heap
139
140 // Iterate over all CodeBlobs (cb) on the given CodeHeap
141 #define FOR_ALL_BLOBS(cb, heap) for (CodeBlob* cb = first_blob(heap); cb != NULL; cb = next_blob(heap, cb))
142
143 address CodeCache::_low_bound = 0;
144 address CodeCache::_high_bound = 0;
145 int CodeCache::_number_of_nmethods_with_dependencies = 0;
146 bool CodeCache::_needs_cache_clean = false;
147 nmethod* CodeCache::_scavenge_root_nmethods = NULL;
148 ExceptionCache* volatile CodeCache::_exception_cache_purge_list = NULL;
149
150 // Initialize arrays of CodeHeap subsets
151 GrowableArray<CodeHeap*>* CodeCache::_heaps = new(ResourceObj::C_HEAP, mtCode) GrowableArray<CodeHeap*> (CodeBlobType::All, true);
152 GrowableArray<CodeHeap*>* CodeCache::_compiled_heaps = new(ResourceObj::C_HEAP, mtCode) GrowableArray<CodeHeap*> (CodeBlobType::All,
153 GrowableArray<CodeHeap*>* CodeCache::_nmethod_heaps = new(ResourceObj::C_HEAP, mtCode) GrowableArray<CodeHeap*> (CodeBlobType::All, t
154 GrowableArray<CodeHeap*>* CodeCache::_allocable_heaps = new(ResourceObj::C_HEAP, mtCode) GrowableArray<CodeHeap*> (CodeBlobType::All,
155
156 void CodeCache::check_heap_sizes(size_t non_nmethod_size, size_t profiled_size, size_t non_profiled_size, size_t cache_size, bool all
157 size_t total_size = non_nmethod_size + profiled_size + non_profiled_size;
158 // Prepare error message
159 const char* error = "Invalid code heap sizes";
160 err_msg message("NonNMethodCodeHeapSize (" SIZE_FORMAT "K) + ProfiledCodeHeapSize (" SIZE_FORMAT "K)"
161 " + NonProfiledCodeHeapSize (" SIZE_FORMAT "K) = " SIZE_FORMAT "K",
162 non_nmethod_size/K, profiled_size/K, non_profiled_size/K, total_size/K);
163
164 if (total_size > cache_size) {
165 // Some code heap sizes were explicitly set: total_size must be <= cache_size
166 message.append(" is greater than ReservedCodeCacheSize (" SIZE_FORMAT "K).", cache_size/K);
167 vm_exit_during_initialization(error, message);
|
882 nm->verify();
883 }
884 #endif
885 }
886
887 void CodeCache::verify_icholder_relocations() {
888 #ifdef ASSERT
889 // make sure that we aren't leaking icholders
890 int count = 0;
891 FOR_ALL_HEAPS(heap) {
892 FOR_ALL_BLOBS(cb, *heap) {
893 CompiledMethod *nm = cb->as_compiled_method_or_null();
894 if (nm != NULL) {
895 count += nm->verify_icholder_relocations();
896 }
897 }
898 }
899 assert(count + InlineCacheBuffer::pending_icholder_count() + CompiledICHolder::live_not_claimed_count() ==
900 CompiledICHolder::live_count(), "must agree");
901 #endif
902 }
903
904 void CodeCache::gc_prologue() { }
905
906 void CodeCache::gc_epilogue() {
907 prune_scavenge_root_nmethods();
908 }
909
910
911 void CodeCache::do_unloading_nmethod_caches(bool class_unloading_occurred) {
912 assert_locked_or_safepoint(CodeCache_lock);
913 // Even if classes are not unloaded, there may have been some nmethods that are
914 // unloaded because oops in them are no longer reachable.
915 NOT_DEBUG(if (needs_cache_clean() || class_unloading_occurred)) {
916 CompiledMethodIterator iter;
917 while(iter.next_alive()) {
918 CompiledMethod* cm = iter.method();
919 assert(!cm->is_unloaded(), "Tautology");
920 DEBUG_ONLY(if (needs_cache_clean() || class_unloading_occurred)) {
|
883 nm->verify();
884 }
885 #endif
886 }
887
888 void CodeCache::verify_icholder_relocations() {
889 #ifdef ASSERT
890 // make sure that we aren't leaking icholders
891 int count = 0;
892 FOR_ALL_HEAPS(heap) {
893 FOR_ALL_BLOBS(cb, *heap) {
894 CompiledMethod *nm = cb->as_compiled_method_or_null();
895 if (nm != NULL) {
896 count += nm->verify_icholder_relocations();
897 }
898 }
899 }
900 assert(count + InlineCacheBuffer::pending_icholder_count() + CompiledICHolder::live_not_claimed_count() ==
901 CompiledICHolder::live_count(), "must agree");
902 #endif
903 }
904
905 // Defer freeing of concurrently cleaned ExceptionCache entries until
906 // after a global handshake operation.
907 void CodeCache::release_exception_cache(ExceptionCache* entry) {
908 if (SafepointSynchronize::is_at_safepoint()) {
909 delete entry;
910 } else {
911 for (;;) {
912 ExceptionCache* free_list_head = Atomic::load(&_exception_cache_purge_list);
913 entry->set_purge_list_next(free_list_head);
914 if (Atomic::cmpxchg(entry, &_exception_cache_purge_list, free_list_head) == free_list_head) {
915 break;
916 }
917 }
918 }
919 }
920
921 // Delete exception caches that have been concurrently unlinked,
922 // followed by a global handshake operation.
923 void CodeCache::purge_exception_caches() {
924 ExceptionCache* curr = _exception_cache_purge_list;
925 while (curr != NULL) {
926 ExceptionCache* next = curr->purge_list_next();
927 delete curr;
928 curr = next;
929 }
930 _exception_cache_purge_list = NULL;
931 }
932
933 void CodeCache::gc_prologue() { }
934
935 void CodeCache::gc_epilogue() {
936 prune_scavenge_root_nmethods();
937 }
938
939
940 void CodeCache::do_unloading_nmethod_caches(bool class_unloading_occurred) {
941 assert_locked_or_safepoint(CodeCache_lock);
942 // Even if classes are not unloaded, there may have been some nmethods that are
943 // unloaded because oops in them are no longer reachable.
944 NOT_DEBUG(if (needs_cache_clean() || class_unloading_occurred)) {
945 CompiledMethodIterator iter;
946 while(iter.next_alive()) {
947 CompiledMethod* cm = iter.method();
948 assert(!cm->is_unloaded(), "Tautology");
949 DEBUG_ONLY(if (needs_cache_clean() || class_unloading_occurred)) {
|