6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "precompiled.hpp"
26 #include "code/codeBlob.hpp"
27 #include "code/codeCache.hpp"
28 #include "code/compiledIC.hpp"
29 #include "code/dependencies.hpp"
30 #include "code/icBuffer.hpp"
31 #include "code/nmethod.hpp"
32 #include "code/pcDesc.hpp"
33 #include "compiler/compileBroker.hpp"
34 #include "gc/shared/gcLocker.hpp"
35 #include "memory/allocation.inline.hpp"
36 #include "memory/iterator.hpp"
37 #include "memory/resourceArea.hpp"
38 #include "oops/method.hpp"
39 #include "oops/objArrayOop.hpp"
40 #include "oops/oop.inline.hpp"
41 #include "oops/verifyOopClosure.hpp"
42 #include "runtime/arguments.hpp"
43 #include "runtime/compilationPolicy.hpp"
44 #include "runtime/deoptimization.hpp"
45 #include "runtime/handles.inline.hpp"
111 total_size += cb->size();
112 header_size += cb->header_size();
113 relocation_size += cb->relocation_size();
114 if (cb->is_nmethod()) {
115 nmethod* nm = cb->as_nmethod_or_null();
116 code_size += nm->insts_size();
117 stub_size += nm->stub_size();
118
119 scopes_oop_size += nm->oops_size();
120 scopes_metadata_size += nm->metadata_size();
121 scopes_data_size += nm->scopes_data_size();
122 scopes_pcs_size += nm->scopes_pcs_size();
123 } else {
124 code_size += cb->code_size();
125 }
126 }
127 };
128
129 // Iterate over all CodeHeaps
130 #define FOR_ALL_HEAPS(heap) for (GrowableArrayIterator<CodeHeap*> heap = _heaps->begin(); heap != _heaps->end(); ++heap)
131 // Iterate over all CodeBlobs (cb) on the given CodeHeap
132 #define FOR_ALL_BLOBS(cb, heap) for (CodeBlob* cb = first_blob(heap); cb != NULL; cb = next_blob(heap, cb))
133
134 address CodeCache::_low_bound = 0;
135 address CodeCache::_high_bound = 0;
136 int CodeCache::_number_of_nmethods_with_dependencies = 0;
137 bool CodeCache::_needs_cache_clean = false;
138 nmethod* CodeCache::_scavenge_root_nmethods = NULL;
139
140 // Initialize array of CodeHeaps
141 GrowableArray<CodeHeap*>* CodeCache::_heaps = new(ResourceObj::C_HEAP, mtCode) GrowableArray<CodeHeap*> (CodeBlobType::All, true);
142
143 void CodeCache::check_heap_sizes(size_t non_nmethod_size, size_t profiled_size, size_t non_profiled_size, size_t cache_size, bool all_set) {
144 size_t total_size = non_nmethod_size + profiled_size + non_profiled_size;
145 // Prepare error message
146 const char* error = "Invalid code heap sizes";
147 err_msg message("NonNMethodCodeHeapSize (%zuK) + ProfiledCodeHeapSize (%zuK) + NonProfiledCodeHeapSize (%zuK) = %zuK",
148 non_nmethod_size/K, profiled_size/K, non_profiled_size/K, total_size/K);
149
150 if (total_size > cache_size) {
151 // Some code heap sizes were explicitly set: total_size must be <= cache_size
152 message.append(" is greater than ReservedCodeCacheSize (%zuK).", cache_size/K);
153 vm_exit_during_initialization(error, message);
154 } else if (all_set && total_size != cache_size) {
155 // All code heap sizes were explicitly set: total_size must equal cache_size
156 message.append(" is not equal to ReservedCodeCacheSize (%zuK).", cache_size/K);
157 vm_exit_during_initialization(error, message);
158 }
159 }
160
161 void CodeCache::initialize_heaps() {
348 (code_blob_type == CodeBlobType::MethodNonProfiled);
349 }
350 }
351
352 const char* CodeCache::get_code_heap_flag_name(int code_blob_type) {
353 switch(code_blob_type) {
354 case CodeBlobType::NonNMethod:
355 return "NonNMethodCodeHeapSize";
356 break;
357 case CodeBlobType::MethodNonProfiled:
358 return "NonProfiledCodeHeapSize";
359 break;
360 case CodeBlobType::MethodProfiled:
361 return "ProfiledCodeHeapSize";
362 break;
363 }
364 ShouldNotReachHere();
365 return NULL;
366 }
367
368 void CodeCache::add_heap(ReservedSpace rs, const char* name, int code_blob_type) {
369 // Check if heap is needed
370 if (!heap_available(code_blob_type)) {
371 return;
372 }
373
374 // Create CodeHeap
375 CodeHeap* heap = new CodeHeap(name, code_blob_type);
376 _heaps->append(heap);
377
378 // Reserve Space
379 size_t size_initial = MIN2(InitialCodeCacheSize, rs.size());
380 size_initial = round_to(size_initial, os::vm_page_size());
381 if (!heap->reserve(rs, size_initial, CodeCacheSegmentSize)) {
382 vm_exit_during_initialization("Could not reserve enough space for code cache");
383 }
384
385 // Register the CodeHeap
386 MemoryService::add_code_heap_memory_pool(heap, name);
387 }
388
389 CodeHeap* CodeCache::get_code_heap(const CodeBlob* cb) {
390 assert(cb != NULL, "CodeBlob is null");
391 FOR_ALL_HEAPS(heap) {
392 if ((*heap)->contains(cb)) {
393 return *heap;
394 }
395 }
396 ShouldNotReachHere();
397 return NULL;
398 }
399
400 CodeHeap* CodeCache::get_code_heap(int code_blob_type) {
401 FOR_ALL_HEAPS(heap) {
402 if ((*heap)->accepts(code_blob_type)) {
403 return *heap;
404 }
405 }
406 return NULL;
407 }
408
409 CodeBlob* CodeCache::first_blob(CodeHeap* heap) {
410 assert_locked_or_safepoint(CodeCache_lock);
411 assert(heap != NULL, "heap is null");
412 return (CodeBlob*)heap->first();
413 }
414
415 CodeBlob* CodeCache::first_blob(int code_blob_type) {
416 if (heap_available(code_blob_type)) {
417 return first_blob(get_code_heap(code_blob_type));
418 } else {
419 return NULL;
420 }
421 }
422
423 CodeBlob* CodeCache::next_blob(CodeHeap* heap, CodeBlob* cb) {
424 assert_locked_or_safepoint(CodeCache_lock);
425 assert(heap != NULL, "heap is null");
426 return (CodeBlob*)heap->next(cb);
427 }
428
429 CodeBlob* CodeCache::next_blob(CodeBlob* cb) {
430 return next_blob(get_code_heap(cb), cb);
431 }
432
433 /**
434 * Do not seize the CodeCache lock here--if the caller has not
435 * already done so, we are going to lose bigtime, since the code
436 * cache will contain a garbage CodeBlob until the caller can
437 * run the constructor for the CodeBlob subclass he is busy
438 * instantiating.
439 */
440 CodeBlob* CodeCache::allocate(int size, int code_blob_type, int orig_code_blob_type) {
441 // Possibly wakes up the sweeper thread.
442 NMethodSweeper::notify(code_blob_type);
443 assert_locked_or_safepoint(CodeCache_lock);
444 assert(size > 0, "Code cache allocation request must be > 0 but is %d", size);
445 if (size <= 0) {
446 return NULL;
447 }
448 CodeBlob* cb = NULL;
449
450 // Get CodeHeap for the given CodeBlobType
451 CodeHeap* heap = get_code_heap(code_blob_type);
452 assert(heap != NULL, "heap is null");
477 // Avoid loop if we already tried that code heap
478 if (type == orig_code_blob_type) {
479 type = CodeBlobType::MethodNonProfiled;
480 }
481 break;
482 }
483 if (type != code_blob_type && type != orig_code_blob_type && heap_available(type)) {
484 if (PrintCodeCacheExtension) {
485 tty->print_cr("Extension of %s failed. Trying to allocate in %s.",
486 heap->name(), get_code_heap(type)->name());
487 }
488 return allocate(size, type, orig_code_blob_type);
489 }
490 }
491 MutexUnlockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
492 CompileBroker::handle_full_code_cache(orig_code_blob_type);
493 return NULL;
494 }
495 if (PrintCodeCacheExtension) {
496 ResourceMark rm;
497 if (_heaps->length() >= 1) {
498 tty->print("%s", heap->name());
499 } else {
500 tty->print("CodeCache");
501 }
502 tty->print_cr(" extended to [" INTPTR_FORMAT ", " INTPTR_FORMAT "] (" SSIZE_FORMAT " bytes)",
503 (intptr_t)heap->low_boundary(), (intptr_t)heap->high(),
504 (address)heap->high() - (address)heap->low_boundary());
505 }
506 }
507 print_trace("allocation", cb, size);
508 return cb;
509 }
510
511 void CodeCache::free(CodeBlob* cb) {
512 assert_locked_or_safepoint(CodeCache_lock);
513 CodeHeap* heap = get_code_heap(cb);
514 print_trace("free", cb);
515 if (cb->is_nmethod()) {
516 heap->set_nmethod_count(heap->nmethod_count() - 1);
517 if (((nmethod *)cb)->has_dependencies()) {
542 heap->set_adapter_count(heap->adapter_count() + 1);
543 }
544
545 // flush the hardware I-cache
546 ICache::invalidate_range(cb->content_begin(), cb->content_size());
547 }
548
549 bool CodeCache::contains(void *p) {
550 // S390 uses contains() in current_frame(), which is used before
551 // code cache initialization if NativeMemoryTracking=detail is set.
552 S390_ONLY(if (_heaps == NULL) return false;)
553 // It should be ok to call contains without holding a lock.
554 FOR_ALL_HEAPS(heap) {
555 if ((*heap)->contains(p)) {
556 return true;
557 }
558 }
559 return false;
560 }
561
562 // This method is safe to call without holding the CodeCache_lock, as long as a dead CodeBlob is not
563 // looked up (i.e., one that has been marked for deletion). It only depends on the _segmap to contain
564 // valid indices, which it will always do, as long as the CodeBlob is not in the process of being recycled.
565 CodeBlob* CodeCache::find_blob(void* start) {
566 CodeBlob* result = find_blob_unsafe(start);
567 // We could potentially look up non_entrant methods
568 guarantee(result == NULL || !result->is_zombie() || result->is_locked_by_vm() || is_error_reported(), "unsafe access to zombie method");
569 return result;
570 }
571
572 // Lookup that does not fail if you lookup a zombie method (if you call this, be sure to know
573 // what you are doing)
574 CodeBlob* CodeCache::find_blob_unsafe(void* start) {
575 // NMT can walk the stack before code cache is created
576 if (_heaps != NULL && !_heaps->is_empty()) {
577 FOR_ALL_HEAPS(heap) {
578 CodeBlob* result = (CodeBlob*) (*heap)->find_start(start);
579 if (result != NULL && result->blob_contains((address)start)) {
580 return result;
581 }
582 }
583 }
584 return NULL;
585 }
586
587 nmethod* CodeCache::find_nmethod(void* start) {
588 CodeBlob* cb = find_blob(start);
589 assert(cb->is_nmethod(), "did not find an nmethod");
590 return (nmethod*)cb;
591 }
592
593 void CodeCache::blobs_do(void f(CodeBlob* nm)) {
594 assert_locked_or_safepoint(CodeCache_lock);
595 FOR_ALL_HEAPS(heap) {
596 FOR_ALL_BLOBS(cb, *heap) {
597 f(cb);
598 }
599 }
600 }
601
602 void CodeCache::nmethods_do(void f(nmethod* nm)) {
603 assert_locked_or_safepoint(CodeCache_lock);
604 NMethodIterator iter;
605 while(iter.next()) {
606 f(iter.method());
607 }
608 }
609
610 void CodeCache::metadata_do(void f(Metadata* m)) {
611 assert_locked_or_safepoint(CodeCache_lock);
612 NMethodIterator iter;
613 while(iter.next_alive()) {
614 iter.method()->metadata_do(f);
615 }
616 }
617
618 int CodeCache::alignment_unit() {
619 return (int)_heaps->first()->alignment_unit();
620 }
621
622 int CodeCache::alignment_offset() {
623 return (int)_heaps->first()->alignment_offset();
624 }
625
626 // Mark nmethods for unloading if they contain otherwise unreachable oops.
627 void CodeCache::do_unloading(BoolObjectClosure* is_alive, bool unloading_occurred) {
628 assert_locked_or_safepoint(CodeCache_lock);
629 CompiledMethodIterator iter;
630 while(iter.next_alive()) {
631 iter.method()->do_unloading(is_alive, unloading_occurred);
632 }
633 }
634
635 void CodeCache::blobs_do(CodeBlobClosure* f) {
636 assert_locked_or_safepoint(CodeCache_lock);
637 FOR_ALL_HEAPS(heap) {
638 FOR_ALL_BLOBS(cb, *heap) {
639 if (cb->is_alive()) {
640 f->do_code_blob(cb);
641
642 #ifdef ASSERT
643 if (cb->is_nmethod())
644 ((nmethod*)cb)->verify_scavenge_root_oops();
645 #endif //ASSERT
646 }
647 }
648 }
649 }
650
651 // Walk the list of methods which might contain non-perm oops.
652 void CodeCache::scavenge_root_nmethods_do(CodeBlobToOopClosure* f) {
653 assert_locked_or_safepoint(CodeCache_lock);
654
655 if (UseG1GC) {
656 return;
657 }
658
659 const bool fix_relocations = f->fix_relocations();
660 debug_only(mark_scavenge_root_nmethods());
661
818 #endif //PRODUCT
819
820 void CodeCache::verify_clean_inline_caches() {
821 #ifdef ASSERT
822 NMethodIterator iter;
823 while(iter.next_alive()) {
824 nmethod* nm = iter.method();
825 assert(!nm->is_unloaded(), "Tautology");
826 nm->verify_clean_inline_caches();
827 nm->verify();
828 }
829 #endif
830 }
831
832 void CodeCache::verify_icholder_relocations() {
833 #ifdef ASSERT
834 // make sure that we aren't leaking icholders
835 int count = 0;
836 FOR_ALL_HEAPS(heap) {
837 FOR_ALL_BLOBS(cb, *heap) {
838 if (cb->is_nmethod()) {
839 nmethod* nm = (nmethod*)cb;
840 count += nm->verify_icholder_relocations();
841 }
842 }
843 }
844
845 assert(count + InlineCacheBuffer::pending_icholder_count() + CompiledICHolder::live_not_claimed_count() ==
846 CompiledICHolder::live_count(), "must agree");
847 #endif
848 }
849
850 void CodeCache::gc_prologue() {
851 }
852
853 void CodeCache::gc_epilogue() {
854 assert_locked_or_safepoint(CodeCache_lock);
855 NOT_DEBUG(if (needs_cache_clean())) {
856 CompiledMethodIterator iter;
857 while(iter.next_alive()) {
858 CompiledMethod* cm = iter.method();
859 assert(!cm->is_unloaded(), "Tautology");
860 DEBUG_ONLY(if (needs_cache_clean())) {
861 cm->cleanup_inline_caches();
862 }
863 DEBUG_ONLY(cm->verify());
864 DEBUG_ONLY(cm->verify_oop_relocations());
885 int CodeCache::blob_count(int code_blob_type) {
886 CodeHeap* heap = get_code_heap(code_blob_type);
887 return (heap != NULL) ? heap->blob_count() : 0;
888 }
889
890 int CodeCache::blob_count() {
891 int count = 0;
892 FOR_ALL_HEAPS(heap) {
893 count += (*heap)->blob_count();
894 }
895 return count;
896 }
897
898 int CodeCache::nmethod_count(int code_blob_type) {
899 CodeHeap* heap = get_code_heap(code_blob_type);
900 return (heap != NULL) ? heap->nmethod_count() : 0;
901 }
902
903 int CodeCache::nmethod_count() {
904 int count = 0;
905 FOR_ALL_HEAPS(heap) {
906 count += (*heap)->nmethod_count();
907 }
908 return count;
909 }
910
911 int CodeCache::adapter_count(int code_blob_type) {
912 CodeHeap* heap = get_code_heap(code_blob_type);
913 return (heap != NULL) ? heap->adapter_count() : 0;
914 }
915
916 int CodeCache::adapter_count() {
917 int count = 0;
918 FOR_ALL_HEAPS(heap) {
919 count += (*heap)->adapter_count();
920 }
921 return count;
922 }
923
924 address CodeCache::low_bound(int code_blob_type) {
925 CodeHeap* heap = get_code_heap(code_blob_type);
926 return (heap != NULL) ? (address)heap->low_boundary() : NULL;
927 }
928
929 address CodeCache::high_bound(int code_blob_type) {
930 CodeHeap* heap = get_code_heap(code_blob_type);
931 return (heap != NULL) ? (address)heap->high_boundary() : NULL;
932 }
933
934 size_t CodeCache::capacity() {
935 size_t cap = 0;
936 FOR_ALL_HEAPS(heap) {
937 cap += (*heap)->capacity();
938 }
939 return cap;
940 }
941
942 size_t CodeCache::unallocated_capacity(int code_blob_type) {
943 CodeHeap* heap = get_code_heap(code_blob_type);
944 return (heap != NULL) ? heap->unallocated_capacity() : 0;
945 }
946
947 size_t CodeCache::unallocated_capacity() {
948 size_t unallocated_cap = 0;
949 FOR_ALL_HEAPS(heap) {
950 unallocated_cap += (*heap)->unallocated_capacity();
951 }
952 return unallocated_cap;
953 }
954
955 size_t CodeCache::max_capacity() {
956 size_t max_cap = 0;
957 FOR_ALL_HEAPS(heap) {
958 max_cap += (*heap)->max_capacity();
959 }
960 return max_cap;
961 }
962
963 /**
964 * Returns the reverse free ratio. E.g., if 25% (1/4) of the code heap
965 * is free, reverse_free_ratio() returns 4.
966 */
967 double CodeCache::reverse_free_ratio(int code_blob_type) {
968 CodeHeap* heap = get_code_heap(code_blob_type);
969 if (heap == NULL) {
970 return 0;
971 }
972
973 double unallocated_capacity = MAX2((double)heap->unallocated_capacity(), 1.0); // Avoid division by 0;
974 double max_capacity = (double)heap->max_capacity();
975 double result = max_capacity / unallocated_capacity;
976 assert (max_capacity >= unallocated_capacity, "Must be");
977 assert (result >= 1.0, "reverse_free_ratio must be at least 1. It is %f", result);
978 return result;
979 }
980
981 size_t CodeCache::bytes_allocated_in_freelists() {
982 size_t allocated_bytes = 0;
983 FOR_ALL_HEAPS(heap) {
984 allocated_bytes += (*heap)->allocated_in_freelist();
985 }
986 return allocated_bytes;
987 }
988
989 int CodeCache::allocated_segments() {
990 int number_of_segments = 0;
991 FOR_ALL_HEAPS(heap) {
992 number_of_segments += (*heap)->allocated_segments();
993 }
994 return number_of_segments;
995 }
996
997 size_t CodeCache::freelists_length() {
998 size_t length = 0;
999 FOR_ALL_HEAPS(heap) {
1000 length += (*heap)->freelist_length();
1001 }
1002 return length;
1003 }
1004
1005 void icache_init();
1006
1007 void CodeCache::initialize() {
1008 assert(CodeCacheSegmentSize >= (uintx)CodeEntryAlignment, "CodeCacheSegmentSize must be large enough to align entry points");
1009 #ifdef COMPILER2
1010 assert(CodeCacheSegmentSize >= (uintx)OptoLoopAlignment, "CodeCacheSegmentSize must be large enough to align inner loops");
1011 #endif
1012 assert(CodeCacheSegmentSize >= sizeof(jdouble), "CodeCacheSegmentSize must be large enough to align constants");
1013 // This was originally just a check of the alignment, causing failure, instead, round
1014 // the code cache to the page size. In particular, Solaris is moving to a larger
1015 // default page size.
1016 CodeCacheExpansionSize = round_to(CodeCacheExpansionSize, os::vm_page_size());
1017
1018 if (SegmentedCodeCache) {
1019 // Use multiple code heaps
1022 // Use a single code heap
1023 FLAG_SET_ERGO(uintx, NonNMethodCodeHeapSize, 0);
1024 FLAG_SET_ERGO(uintx, ProfiledCodeHeapSize, 0);
1025 FLAG_SET_ERGO(uintx, NonProfiledCodeHeapSize, 0);
1026 ReservedCodeSpace rs = reserve_heap_memory(ReservedCodeCacheSize);
1027 add_heap(rs, "CodeCache", CodeBlobType::All);
1028 }
1029
1030 // Initialize ICache flush mechanism
1031 // This service is needed for os::register_code_area
1032 icache_init();
1033
1034 // Give OS a chance to register generated code area.
1035 // This is used on Windows 64 bit platforms to register
1036 // Structured Exception Handlers for our generated code.
1037 os::register_code_area((char*)low_bound(), (char*)high_bound());
1038 }
1039
1040 void codeCache_init() {
1041 CodeCache::initialize();
1042 }
1043
1044 //------------------------------------------------------------------------------------------------
1045
1046 int CodeCache::number_of_nmethods_with_dependencies() {
1047 return _number_of_nmethods_with_dependencies;
1048 }
1049
1050 void CodeCache::clear_inline_caches() {
1051 assert_locked_or_safepoint(CodeCache_lock);
1052 CompiledMethodIterator iter;
1053 while(iter.next_alive()) {
1054 iter.method()->clear_inline_caches();
1055 }
1056 }
1057
1058 void CodeCache::cleanup_inline_caches() {
1059 assert_locked_or_safepoint(CodeCache_lock);
1060 NMethodIterator iter;
1061 while(iter.next_alive()) {
1085
1086 #ifndef PRODUCT
1087 if (VerifyDependencies) {
1088 // Object pointers are used as unique identifiers for dependency arguments. This
1089 // is only possible if no safepoint, i.e., GC occurs during the verification code.
1090 dependentCheckTime.start();
1091 nmethod::check_all_dependencies(changes);
1092 dependentCheckTime.stop();
1093 }
1094 #endif
1095
1096 return number_of_marked_CodeBlobs;
1097 }
1098
1099 CompiledMethod* CodeCache::find_compiled(void* start) {
1100 CodeBlob *cb = find_blob(start);
1101 assert(cb == NULL || cb->is_compiled(), "did not find an compiled_method");
1102 return (CompiledMethod*)cb;
1103 }
1104
1105 #ifdef HOTSWAP
1106 int CodeCache::mark_for_evol_deoptimization(instanceKlassHandle dependee) {
1107 MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
1108 int number_of_marked_CodeBlobs = 0;
1109
1110 // Deoptimize all methods of the evolving class itself
1111 Array<Method*>* old_methods = dependee->methods();
1112 for (int i = 0; i < old_methods->length(); i++) {
1113 ResourceMark rm;
1114 Method* old_method = old_methods->at(i);
1115 CompiledMethod* nm = old_method->code();
1116 if (nm != NULL) {
1117 nm->mark_for_deoptimization();
1118 number_of_marked_CodeBlobs++;
1119 }
1120 }
1121
1122 CompiledMethodIterator iter;
1123 while(iter.next_alive()) {
1124 CompiledMethod* nm = iter.method();
1187
1188 // CodeCache can only be updated by a thread_in_VM and they will all be
1189 // stopped during the safepoint so CodeCache will be safe to update without
1190 // holding the CodeCache_lock.
1191
1192 KlassDepChange changes(dependee);
1193
1194 // Compute the dependent nmethods
1195 if (mark_for_deoptimization(changes) > 0) {
1196 // At least one nmethod has been marked for deoptimization
1197 VM_Deoptimize op;
1198 VMThread::execute(&op);
1199 }
1200 }
1201
1202 #ifdef HOTSWAP
1203 // Flushes compiled methods dependent on dependee in the evolutionary sense
1204 void CodeCache::flush_evol_dependents_on(instanceKlassHandle ev_k_h) {
1205 // --- Compile_lock is not held. However we are at a safepoint.
1206 assert_locked_or_safepoint(Compile_lock);
1207 if (number_of_nmethods_with_dependencies() == 0) return;
1208
1209 // CodeCache can only be updated by a thread_in_VM and they will all be
1210 // stopped during the safepoint so CodeCache will be safe to update without
1211 // holding the CodeCache_lock.
1212
1213 // Compute the dependent nmethods
1214 if (mark_for_evol_deoptimization(ev_k_h) > 0) {
1215 // At least one nmethod has been marked for deoptimization
1216
1217 // All this already happens inside a VM_Operation, so we'll do all the work here.
1218 // Stuff copied from VM_Deoptimize and modified slightly.
1219
1220 // We do not want any GCs to happen while we are in the middle of this VM operation
1221 ResourceMark rm;
1222 DeoptimizationMarker dm;
1223
1224 // Deoptimize all activations depending on marked nmethods
1225 Deoptimization::deoptimize_dependents();
1226
1227 // Make the dependent methods not entrant
1299
1300 heap->report_full();
1301
1302 EventCodeCacheFull event;
1303 if (event.should_commit()) {
1304 event.set_codeBlobType((u1)code_blob_type);
1305 event.set_startAddress((u8)heap->low_boundary());
1306 event.set_commitedTopAddress((u8)heap->high());
1307 event.set_reservedTopAddress((u8)heap->high_boundary());
1308 event.set_entryCount(heap->blob_count());
1309 event.set_methodCount(heap->nmethod_count());
1310 event.set_adaptorCount(heap->adapter_count());
1311 event.set_unallocatedCapacity(heap->unallocated_capacity());
1312 event.set_fullCount(heap->full_count());
1313 event.commit();
1314 }
1315 }
1316
1317 void CodeCache::print_memory_overhead() {
1318 size_t wasted_bytes = 0;
1319 FOR_ALL_HEAPS(heap) {
1320 CodeHeap* curr_heap = *heap;
1321 for (CodeBlob* cb = (CodeBlob*)curr_heap->first(); cb != NULL; cb = (CodeBlob*)curr_heap->next(cb)) {
1322 HeapBlock* heap_block = ((HeapBlock*)cb) - 1;
1323 wasted_bytes += heap_block->length() * CodeCacheSegmentSize - cb->size();
1324 }
1325 }
1326 // Print bytes that are allocated in the freelist
1327 ttyLocker ttl;
1328 tty->print_cr("Number of elements in freelist: " SSIZE_FORMAT, freelists_length());
1329 tty->print_cr("Allocated in freelist: " SSIZE_FORMAT "kB", bytes_allocated_in_freelists()/K);
1330 tty->print_cr("Unused bytes in CodeBlobs: " SSIZE_FORMAT "kB", (wasted_bytes/K));
1331 tty->print_cr("Segment map size: " SSIZE_FORMAT "kB", allocated_segments()/K); // 1 byte per segment
1332 }
1333
1334 //------------------------------------------------------------------------------------------------
1335 // Non-product version
1336
1337 #ifndef PRODUCT
1338
1339 void CodeCache::print_trace(const char* event, CodeBlob* cb, int size) {
1345 }
1346
1347 void CodeCache::print_internals() {
1348 int nmethodCount = 0;
1349 int runtimeStubCount = 0;
1350 int adapterCount = 0;
1351 int deoptimizationStubCount = 0;
1352 int uncommonTrapStubCount = 0;
1353 int bufferBlobCount = 0;
1354 int total = 0;
1355 int nmethodAlive = 0;
1356 int nmethodNotEntrant = 0;
1357 int nmethodZombie = 0;
1358 int nmethodUnloaded = 0;
1359 int nmethodJava = 0;
1360 int nmethodNative = 0;
1361 int max_nm_size = 0;
1362 ResourceMark rm;
1363
1364 int i = 0;
1365 FOR_ALL_HEAPS(heap) {
1366 if ((_heaps->length() >= 1) && Verbose) {
1367 tty->print_cr("-- %s --", (*heap)->name());
1368 }
1369 FOR_ALL_BLOBS(cb, *heap) {
1370 total++;
1371 if (cb->is_nmethod()) {
1372 nmethod* nm = (nmethod*)cb;
1373
1374 if (Verbose && nm->method() != NULL) {
1375 ResourceMark rm;
1376 char *method_name = nm->method()->name_and_sig_as_C_string();
1377 tty->print("%s", method_name);
1378 if(nm->is_alive()) { tty->print_cr(" alive"); }
1379 if(nm->is_not_entrant()) { tty->print_cr(" not-entrant"); }
1380 if(nm->is_zombie()) { tty->print_cr(" zombie"); }
1381 }
1382
1383 nmethodCount++;
1384
1385 if(nm->is_alive()) { nmethodAlive++; }
1386 if(nm->is_not_entrant()) { nmethodNotEntrant++; }
1442 tty->fill_to(40);
1443 tty->print_cr("%d",buckets[i]);
1444 }
1445 }
1446
1447 FREE_C_HEAP_ARRAY(int, buckets);
1448 print_memory_overhead();
1449 }
1450
1451 #endif // !PRODUCT
1452
1453 void CodeCache::print() {
1454 print_summary(tty);
1455
1456 #ifndef PRODUCT
1457 if (!Verbose) return;
1458
1459 CodeBlob_sizes live;
1460 CodeBlob_sizes dead;
1461
1462 FOR_ALL_HEAPS(heap) {
1463 FOR_ALL_BLOBS(cb, *heap) {
1464 if (!cb->is_alive()) {
1465 dead.add(cb);
1466 } else {
1467 live.add(cb);
1468 }
1469 }
1470 }
1471
1472 tty->print_cr("CodeCache:");
1473 tty->print_cr("nmethod dependency checking time %fs", dependentCheckTime.seconds());
1474
1475 if (!live.is_empty()) {
1476 live.print("live");
1477 }
1478 if (!dead.is_empty()) {
1479 dead.print("dead");
1480 }
1481
1482 if (WizardMode) {
1483 // print the oop_map usage
1484 int code_size = 0;
1485 int number_of_blobs = 0;
1486 int number_of_oop_maps = 0;
1487 int map_size = 0;
1488 FOR_ALL_HEAPS(heap) {
1489 FOR_ALL_BLOBS(cb, *heap) {
1490 if (cb->is_alive()) {
1491 number_of_blobs++;
1492 code_size += cb->code_size();
1493 ImmutableOopMapSet* set = cb->oop_maps();
1494 if (set != NULL) {
1495 number_of_oop_maps += set->count();
1496 map_size += set->nr_of_bytes();
1497 }
1498 }
1499 }
1500 }
1501 tty->print_cr("OopMaps");
1502 tty->print_cr(" #blobs = %d", number_of_blobs);
1503 tty->print_cr(" code size = %d", code_size);
1504 tty->print_cr(" #oop_maps = %d", number_of_oop_maps);
1505 tty->print_cr(" map size = %d", map_size);
1506 }
1507
1508 #endif // !PRODUCT
1551 char *method_name = nm->method()->name_and_sig_as_C_string();
1552 st->print_cr("%d %d %s [" INTPTR_FORMAT ", " INTPTR_FORMAT " - " INTPTR_FORMAT "]",
1553 nm->compile_id(), nm->comp_level(), method_name, (intptr_t)nm->header_begin(),
1554 (intptr_t)nm->code_begin(), (intptr_t)nm->code_end());
1555 }
1556 }
1557
1558 void CodeCache::print_layout(outputStream* st) {
1559 MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
1560 ResourceMark rm;
1561 print_summary(st, true);
1562 }
1563
1564 void CodeCache::log_state(outputStream* st) {
1565 st->print(" total_blobs='" UINT32_FORMAT "' nmethods='" UINT32_FORMAT "'"
1566 " adapters='" UINT32_FORMAT "' free_code_cache='" SIZE_FORMAT "'",
1567 blob_count(), nmethod_count(), adapter_count(),
1568 unallocated_capacity());
1569 }
1570
1571 // Initialize iterator to given compiled method
1572 void CompiledMethodIterator::initialize(CompiledMethod* cm) {
1573 _code_blob = (CodeBlob*)cm;
1574 if (!SegmentedCodeCache) {
1575 // Iterate over all CodeBlobs
1576 _code_blob_type = CodeBlobType::All;
1577 } else if (cm != NULL) {
1578 _code_blob_type = CodeCache::get_code_blob_type(cm);
1579 } else {
1580 // Only iterate over method code heaps, starting with non-profiled
1581 _code_blob_type = CodeBlobType::MethodNonProfiled;
1582 }
1583 }
1584
1585 // Advance iterator to the next compiled method in the current code heap
1586 bool CompiledMethodIterator::next_compiled_method() {
1587 // Get first method CodeBlob
1588 if (_code_blob == NULL) {
1589 _code_blob = CodeCache::first_blob(_code_blob_type);
1590 if (_code_blob == NULL) {
1591 return false;
1592 } else if (_code_blob->is_nmethod()) {
1593 return true;
1594 }
1595 }
1596 // Search for next method CodeBlob
1597 _code_blob = CodeCache::next_blob(_code_blob);
1598 while (_code_blob != NULL && !_code_blob->is_compiled()) {
1599 _code_blob = CodeCache::next_blob(_code_blob);
1600 }
1601 return _code_blob != NULL;
1602 }
|
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "precompiled.hpp"
26 #include "aot/aotLoader.hpp"
27 #include "code/codeBlob.hpp"
28 #include "code/codeCache.hpp"
29 #include "code/compiledIC.hpp"
30 #include "code/dependencies.hpp"
31 #include "code/icBuffer.hpp"
32 #include "code/nmethod.hpp"
33 #include "code/pcDesc.hpp"
34 #include "compiler/compileBroker.hpp"
35 #include "gc/shared/gcLocker.hpp"
36 #include "memory/allocation.inline.hpp"
37 #include "memory/iterator.hpp"
38 #include "memory/resourceArea.hpp"
39 #include "oops/method.hpp"
40 #include "oops/objArrayOop.hpp"
41 #include "oops/oop.inline.hpp"
42 #include "oops/verifyOopClosure.hpp"
43 #include "runtime/arguments.hpp"
44 #include "runtime/compilationPolicy.hpp"
45 #include "runtime/deoptimization.hpp"
46 #include "runtime/handles.inline.hpp"
112 total_size += cb->size();
113 header_size += cb->header_size();
114 relocation_size += cb->relocation_size();
115 if (cb->is_nmethod()) {
116 nmethod* nm = cb->as_nmethod_or_null();
117 code_size += nm->insts_size();
118 stub_size += nm->stub_size();
119
120 scopes_oop_size += nm->oops_size();
121 scopes_metadata_size += nm->metadata_size();
122 scopes_data_size += nm->scopes_data_size();
123 scopes_pcs_size += nm->scopes_pcs_size();
124 } else {
125 code_size += cb->code_size();
126 }
127 }
128 };
129
130 // Iterate over all CodeHeaps
131 #define FOR_ALL_HEAPS(heap) for (GrowableArrayIterator<CodeHeap*> heap = _heaps->begin(); heap != _heaps->end(); ++heap)
132 #define FOR_ALL_NMETHOD_HEAPS(heap) for (GrowableArrayIterator<CodeHeap*> heap = _nmethod_heaps->begin(); heap != _nmethod_heaps->end(); ++heap)
133
134 // Iterate over all CodeBlobs (cb) on the given CodeHeap
135 #define FOR_ALL_BLOBS(cb, heap) for (CodeBlob* cb = first_blob(heap); cb != NULL; cb = next_blob(heap, cb))
136
137 address CodeCache::_low_bound = 0;
138 address CodeCache::_high_bound = 0;
139 int CodeCache::_number_of_nmethods_with_dependencies = 0;
140 bool CodeCache::_needs_cache_clean = false;
141 nmethod* CodeCache::_scavenge_root_nmethods = NULL;
142
143 // Initialize array of CodeHeaps
144 GrowableArray<CodeHeap*>* CodeCache::_heaps = new(ResourceObj::C_HEAP, mtCode) GrowableArray<CodeHeap*> (CodeBlobType::All, true);
145 GrowableArray<CodeHeap*>* CodeCache::_compiled_heaps = new(ResourceObj::C_HEAP, mtCode) GrowableArray<CodeHeap*> (CodeBlobType::All, true);
146 GrowableArray<CodeHeap*>* CodeCache::_nmethod_heaps = new(ResourceObj::C_HEAP, mtCode) GrowableArray<CodeHeap*> (CodeBlobType::All, true);
147
148 void CodeCache::check_heap_sizes(size_t non_nmethod_size, size_t profiled_size, size_t non_profiled_size, size_t cache_size, bool all_set) {
149 size_t total_size = non_nmethod_size + profiled_size + non_profiled_size;
150 // Prepare error message
151 const char* error = "Invalid code heap sizes";
152 err_msg message("NonNMethodCodeHeapSize (%zuK) + ProfiledCodeHeapSize (%zuK) + NonProfiledCodeHeapSize (%zuK) = %zuK",
153 non_nmethod_size/K, profiled_size/K, non_profiled_size/K, total_size/K);
154
155 if (total_size > cache_size) {
156 // Some code heap sizes were explicitly set: total_size must be <= cache_size
157 message.append(" is greater than ReservedCodeCacheSize (%zuK).", cache_size/K);
158 vm_exit_during_initialization(error, message);
159 } else if (all_set && total_size != cache_size) {
160 // All code heap sizes were explicitly set: total_size must equal cache_size
161 message.append(" is not equal to ReservedCodeCacheSize (%zuK).", cache_size/K);
162 vm_exit_during_initialization(error, message);
163 }
164 }
165
166 void CodeCache::initialize_heaps() {
353 (code_blob_type == CodeBlobType::MethodNonProfiled);
354 }
355 }
356
357 const char* CodeCache::get_code_heap_flag_name(int code_blob_type) {
358 switch(code_blob_type) {
359 case CodeBlobType::NonNMethod:
360 return "NonNMethodCodeHeapSize";
361 break;
362 case CodeBlobType::MethodNonProfiled:
363 return "NonProfiledCodeHeapSize";
364 break;
365 case CodeBlobType::MethodProfiled:
366 return "ProfiledCodeHeapSize";
367 break;
368 }
369 ShouldNotReachHere();
370 return NULL;
371 }
372
373 int CodeCache::code_heap_compare(CodeHeap* const &lhs, CodeHeap* const &rhs) {
374 if (lhs->code_blob_type() == rhs->code_blob_type()) {
375 return (lhs > rhs) ? 1 : ((lhs < rhs) ? -1 : 0);
376 } else {
377 return lhs->code_blob_type() - rhs->code_blob_type();
378 }
379 }
380
381 void CodeCache::add_heap(CodeHeap* heap) {
382 assert(!Universe::is_fully_initialized(), "late heap addition?");
383
384 _heaps->insert_sorted<code_heap_compare>(heap);
385
386 int type = heap->code_blob_type();
387 if (code_blob_type_accepts_compiled(type)) {
388 _compiled_heaps->insert_sorted<code_heap_compare>(heap);
389 }
390 if (code_blob_type_accepts_nmethod(type)) {
391 _nmethod_heaps->insert_sorted<code_heap_compare>(heap);
392 }
393 }
394
395 void CodeCache::add_heap(ReservedSpace rs, const char* name, int code_blob_type) {
396 // Check if heap is needed
397 if (!heap_available(code_blob_type)) {
398 return;
399 }
400
401 // Create CodeHeap
402 CodeHeap* heap = new CodeHeap(name, code_blob_type);
403 add_heap(heap);
404
405 // Reserve Space
406 size_t size_initial = MIN2(InitialCodeCacheSize, rs.size());
407 size_initial = round_to(size_initial, os::vm_page_size());
408 if (!heap->reserve(rs, size_initial, CodeCacheSegmentSize)) {
409 vm_exit_during_initialization("Could not reserve enough space for code cache");
410 }
411
412 // Register the CodeHeap
413 MemoryService::add_code_heap_memory_pool(heap, name);
414 }
415
416 CodeHeap* CodeCache::get_code_heap(const CodeBlob* cb) {
417 assert(cb != NULL, "CodeBlob is null");
418 FOR_ALL_HEAPS(heap) {
419 if ((*heap)->contains(cb->code_begin())) {
420 return *heap;
421 }
422 }
423 ShouldNotReachHere();
424 return NULL;
425 }
426
427 CodeHeap* CodeCache::get_code_heap(int code_blob_type) {
428 FOR_ALL_HEAPS(heap) {
429 if ((*heap)->accepts(code_blob_type)) {
430 return *heap;
431 }
432 }
433 return NULL;
434 }
435
436 CodeBlob* CodeCache::first_blob(CodeHeap* heap) {
437 assert_locked_or_safepoint(CodeCache_lock);
438 assert(heap != NULL, "heap is null");
439 return (CodeBlob*)heap->first();
440 }
441
442 CodeBlob* CodeCache::first_blob(int code_blob_type) {
443 if (heap_available(code_blob_type)) {
444 return first_blob(get_code_heap(code_blob_type));
445 } else {
446 return NULL;
447 }
448 }
449
450 CodeBlob* CodeCache::next_blob(CodeHeap* heap, CodeBlob* cb) {
451 assert_locked_or_safepoint(CodeCache_lock);
452 assert(heap != NULL, "heap is null");
453 return (CodeBlob*)heap->next(cb);
454 }
455
456 /**
457 * Do not seize the CodeCache lock here--if the caller has not
458 * already done so, we are going to lose bigtime, since the code
459 * cache will contain a garbage CodeBlob until the caller can
460 * run the constructor for the CodeBlob subclass he is busy
461 * instantiating.
462 */
463 CodeBlob* CodeCache::allocate(int size, int code_blob_type, int orig_code_blob_type) {
464 // Possibly wakes up the sweeper thread.
465 NMethodSweeper::notify(code_blob_type);
466 assert_locked_or_safepoint(CodeCache_lock);
467 assert(size > 0, "Code cache allocation request must be > 0 but is %d", size);
468 if (size <= 0) {
469 return NULL;
470 }
471 CodeBlob* cb = NULL;
472
473 // Get CodeHeap for the given CodeBlobType
474 CodeHeap* heap = get_code_heap(code_blob_type);
475 assert(heap != NULL, "heap is null");
500 // Avoid loop if we already tried that code heap
501 if (type == orig_code_blob_type) {
502 type = CodeBlobType::MethodNonProfiled;
503 }
504 break;
505 }
506 if (type != code_blob_type && type != orig_code_blob_type && heap_available(type)) {
507 if (PrintCodeCacheExtension) {
508 tty->print_cr("Extension of %s failed. Trying to allocate in %s.",
509 heap->name(), get_code_heap(type)->name());
510 }
511 return allocate(size, type, orig_code_blob_type);
512 }
513 }
514 MutexUnlockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
515 CompileBroker::handle_full_code_cache(orig_code_blob_type);
516 return NULL;
517 }
518 if (PrintCodeCacheExtension) {
519 ResourceMark rm;
520 if (_nmethod_heaps->length() >= 1) {
521 tty->print("%s", heap->name());
522 } else {
523 tty->print("CodeCache");
524 }
525 tty->print_cr(" extended to [" INTPTR_FORMAT ", " INTPTR_FORMAT "] (" SSIZE_FORMAT " bytes)",
526 (intptr_t)heap->low_boundary(), (intptr_t)heap->high(),
527 (address)heap->high() - (address)heap->low_boundary());
528 }
529 }
530 print_trace("allocation", cb, size);
531 return cb;
532 }
533
534 void CodeCache::free(CodeBlob* cb) {
535 assert_locked_or_safepoint(CodeCache_lock);
536 CodeHeap* heap = get_code_heap(cb);
537 print_trace("free", cb);
538 if (cb->is_nmethod()) {
539 heap->set_nmethod_count(heap->nmethod_count() - 1);
540 if (((nmethod *)cb)->has_dependencies()) {
565 heap->set_adapter_count(heap->adapter_count() + 1);
566 }
567
568 // flush the hardware I-cache
569 ICache::invalidate_range(cb->content_begin(), cb->content_size());
570 }
571
572 bool CodeCache::contains(void *p) {
573 // S390 uses contains() in current_frame(), which is used before
574 // code cache initialization if NativeMemoryTracking=detail is set.
575 S390_ONLY(if (_heaps == NULL) return false;)
576 // It should be ok to call contains without holding a lock.
577 FOR_ALL_HEAPS(heap) {
578 if ((*heap)->contains(p)) {
579 return true;
580 }
581 }
582 return false;
583 }
584
585 bool CodeCache::contains(nmethod *nm) {
586 return contains((void *)nm);
587 }
588
589 // This method is safe to call without holding the CodeCache_lock, as long as a dead CodeBlob is not
590 // looked up (i.e., one that has been marked for deletion). It only depends on the _segmap to contain
591 // valid indices, which it will always do, as long as the CodeBlob is not in the process of being recycled.
592 CodeBlob* CodeCache::find_blob(void* start) {
593 CodeBlob* result = find_blob_unsafe(start);
594 // We could potentially look up non_entrant methods
595 guarantee(result == NULL || !result->is_zombie() || result->is_locked_by_vm() || is_error_reported(), "unsafe access to zombie method");
596 return result;
597 }
598
599 // Lookup that does not fail if you lookup a zombie method (if you call this, be sure to know
600 // what you are doing)
601 CodeBlob* CodeCache::find_blob_unsafe(void* start) {
602 // NMT can walk the stack before code cache is created
603 if (_heaps != NULL && !_heaps->is_empty()) {
604 FOR_ALL_HEAPS(heap) {
605 CodeBlob* result = (*heap)->find_blob_unsafe(start);
606 if (result != NULL) {
607 return result;
608 }
609 }
610 }
611 return NULL;
612 }
613
614 nmethod* CodeCache::find_nmethod(void* start) {
615 CodeBlob* cb = find_blob(start);
616 assert(cb->is_nmethod(), "did not find an nmethod");
617 return (nmethod*)cb;
618 }
619
620 void CodeCache::blobs_do(void f(CodeBlob* nm)) {
621 assert_locked_or_safepoint(CodeCache_lock);
622 FOR_ALL_NMETHOD_HEAPS(heap) {
623 FOR_ALL_BLOBS(cb, *heap) {
624 f(cb);
625 }
626 }
627 }
628
629 void CodeCache::nmethods_do(void f(nmethod* nm)) {
630 assert_locked_or_safepoint(CodeCache_lock);
631 NMethodIterator iter;
632 while(iter.next()) {
633 f(iter.method());
634 }
635 }
636
637 void CodeCache::metadata_do(void f(Metadata* m)) {
638 assert_locked_or_safepoint(CodeCache_lock);
639 NMethodIterator iter;
640 while(iter.next_alive()) {
641 iter.method()->metadata_do(f);
642 }
643 AOTLoader::metadata_do(f);
644 }
645
646 int CodeCache::alignment_unit() {
647 return (int)_heaps->first()->alignment_unit();
648 }
649
650 int CodeCache::alignment_offset() {
651 return (int)_heaps->first()->alignment_offset();
652 }
653
654 // Mark nmethods for unloading if they contain otherwise unreachable oops.
655 void CodeCache::do_unloading(BoolObjectClosure* is_alive, bool unloading_occurred) {
656 assert_locked_or_safepoint(CodeCache_lock);
657 CompiledMethodIterator iter;
658 while(iter.next_alive()) {
659 iter.method()->do_unloading(is_alive, unloading_occurred);
660 }
661 }
662
663 void CodeCache::blobs_do(CodeBlobClosure* f) {
664 assert_locked_or_safepoint(CodeCache_lock);
665 FOR_ALL_NMETHOD_HEAPS(heap) {
666 FOR_ALL_BLOBS(cb, *heap) {
667 if (cb->is_alive()) {
668 f->do_code_blob(cb);
669 #ifdef ASSERT
670 if (cb->is_nmethod())
671 ((nmethod*)cb)->verify_scavenge_root_oops();
672 #endif //ASSERT
673 }
674 }
675 }
676 }
677
678 // Walk the list of methods which might contain non-perm oops.
679 void CodeCache::scavenge_root_nmethods_do(CodeBlobToOopClosure* f) {
680 assert_locked_or_safepoint(CodeCache_lock);
681
682 if (UseG1GC) {
683 return;
684 }
685
686 const bool fix_relocations = f->fix_relocations();
687 debug_only(mark_scavenge_root_nmethods());
688
845 #endif //PRODUCT
846
847 void CodeCache::verify_clean_inline_caches() {
848 #ifdef ASSERT
849 NMethodIterator iter;
850 while(iter.next_alive()) {
851 nmethod* nm = iter.method();
852 assert(!nm->is_unloaded(), "Tautology");
853 nm->verify_clean_inline_caches();
854 nm->verify();
855 }
856 #endif
857 }
858
859 void CodeCache::verify_icholder_relocations() {
860 #ifdef ASSERT
861 // make sure that we aren't leaking icholders
862 int count = 0;
863 FOR_ALL_HEAPS(heap) {
864 FOR_ALL_BLOBS(cb, *heap) {
865 CompiledMethod *nm = cb->as_compiled_method_or_null();
866 if (nm != NULL) {
867 count += nm->verify_icholder_relocations();
868 }
869 }
870 }
871 assert(count + InlineCacheBuffer::pending_icholder_count() + CompiledICHolder::live_not_claimed_count() ==
872 CompiledICHolder::live_count(), "must agree");
873 #endif
874 }
875
876 void CodeCache::gc_prologue() {
877 }
878
879 void CodeCache::gc_epilogue() {
880 assert_locked_or_safepoint(CodeCache_lock);
881 NOT_DEBUG(if (needs_cache_clean())) {
882 CompiledMethodIterator iter;
883 while(iter.next_alive()) {
884 CompiledMethod* cm = iter.method();
885 assert(!cm->is_unloaded(), "Tautology");
886 DEBUG_ONLY(if (needs_cache_clean())) {
887 cm->cleanup_inline_caches();
888 }
889 DEBUG_ONLY(cm->verify());
890 DEBUG_ONLY(cm->verify_oop_relocations());
911 int CodeCache::blob_count(int code_blob_type) {
912 CodeHeap* heap = get_code_heap(code_blob_type);
913 return (heap != NULL) ? heap->blob_count() : 0;
914 }
915
916 int CodeCache::blob_count() {
917 int count = 0;
918 FOR_ALL_HEAPS(heap) {
919 count += (*heap)->blob_count();
920 }
921 return count;
922 }
923
924 int CodeCache::nmethod_count(int code_blob_type) {
925 CodeHeap* heap = get_code_heap(code_blob_type);
926 return (heap != NULL) ? heap->nmethod_count() : 0;
927 }
928
929 int CodeCache::nmethod_count() {
930 int count = 0;
931 FOR_ALL_NMETHOD_HEAPS(heap) {
932 count += (*heap)->nmethod_count();
933 }
934 return count;
935 }
936
937 int CodeCache::adapter_count(int code_blob_type) {
938 CodeHeap* heap = get_code_heap(code_blob_type);
939 return (heap != NULL) ? heap->adapter_count() : 0;
940 }
941
942 int CodeCache::adapter_count() {
943 int count = 0;
944 FOR_ALL_HEAPS(heap) {
945 count += (*heap)->adapter_count();
946 }
947 return count;
948 }
949
950 address CodeCache::low_bound(int code_blob_type) {
951 CodeHeap* heap = get_code_heap(code_blob_type);
952 return (heap != NULL) ? (address)heap->low_boundary() : NULL;
953 }
954
955 address CodeCache::high_bound(int code_blob_type) {
956 CodeHeap* heap = get_code_heap(code_blob_type);
957 return (heap != NULL) ? (address)heap->high_boundary() : NULL;
958 }
959
960 size_t CodeCache::capacity() {
961 size_t cap = 0;
962 FOR_ALL_NMETHOD_HEAPS(heap) {
963 cap += (*heap)->capacity();
964 }
965 return cap;
966 }
967
968 size_t CodeCache::unallocated_capacity(int code_blob_type) {
969 CodeHeap* heap = get_code_heap(code_blob_type);
970 return (heap != NULL) ? heap->unallocated_capacity() : 0;
971 }
972
973 size_t CodeCache::unallocated_capacity() {
974 size_t unallocated_cap = 0;
975 FOR_ALL_NMETHOD_HEAPS(heap) {
976 unallocated_cap += (*heap)->unallocated_capacity();
977 }
978 return unallocated_cap;
979 }
980
981 size_t CodeCache::max_capacity() {
982 size_t max_cap = 0;
983 FOR_ALL_NMETHOD_HEAPS(heap) {
984 max_cap += (*heap)->max_capacity();
985 }
986 return max_cap;
987 }
988
989 /**
990 * Returns the reverse free ratio. E.g., if 25% (1/4) of the code heap
991 * is free, reverse_free_ratio() returns 4.
992 */
993 double CodeCache::reverse_free_ratio(int code_blob_type) {
994 CodeHeap* heap = get_code_heap(code_blob_type);
995 if (heap == NULL) {
996 return 0;
997 }
998
999 double unallocated_capacity = MAX2((double)heap->unallocated_capacity(), 1.0); // Avoid division by 0;
1000 double max_capacity = (double)heap->max_capacity();
1001 double result = max_capacity / unallocated_capacity;
1002 assert (max_capacity >= unallocated_capacity, "Must be");
1003 assert (result >= 1.0, "reverse_free_ratio must be at least 1. It is %f", result);
1004 return result;
1005 }
1006
1007 size_t CodeCache::bytes_allocated_in_freelists() {
1008 size_t allocated_bytes = 0;
1009 FOR_ALL_NMETHOD_HEAPS(heap) {
1010 allocated_bytes += (*heap)->allocated_in_freelist();
1011 }
1012 return allocated_bytes;
1013 }
1014
1015 int CodeCache::allocated_segments() {
1016 int number_of_segments = 0;
1017 FOR_ALL_NMETHOD_HEAPS(heap) {
1018 number_of_segments += (*heap)->allocated_segments();
1019 }
1020 return number_of_segments;
1021 }
1022
1023 size_t CodeCache::freelists_length() {
1024 size_t length = 0;
1025 FOR_ALL_NMETHOD_HEAPS(heap) {
1026 length += (*heap)->freelist_length();
1027 }
1028 return length;
1029 }
1030
1031 void icache_init();
1032
1033 void CodeCache::initialize() {
1034 assert(CodeCacheSegmentSize >= (uintx)CodeEntryAlignment, "CodeCacheSegmentSize must be large enough to align entry points");
1035 #ifdef COMPILER2
1036 assert(CodeCacheSegmentSize >= (uintx)OptoLoopAlignment, "CodeCacheSegmentSize must be large enough to align inner loops");
1037 #endif
1038 assert(CodeCacheSegmentSize >= sizeof(jdouble), "CodeCacheSegmentSize must be large enough to align constants");
1039 // This was originally just a check of the alignment, causing failure, instead, round
1040 // the code cache to the page size. In particular, Solaris is moving to a larger
1041 // default page size.
1042 CodeCacheExpansionSize = round_to(CodeCacheExpansionSize, os::vm_page_size());
1043
1044 if (SegmentedCodeCache) {
1045 // Use multiple code heaps
1048 // Use a single code heap
1049 FLAG_SET_ERGO(uintx, NonNMethodCodeHeapSize, 0);
1050 FLAG_SET_ERGO(uintx, ProfiledCodeHeapSize, 0);
1051 FLAG_SET_ERGO(uintx, NonProfiledCodeHeapSize, 0);
1052 ReservedCodeSpace rs = reserve_heap_memory(ReservedCodeCacheSize);
1053 add_heap(rs, "CodeCache", CodeBlobType::All);
1054 }
1055
1056 // Initialize ICache flush mechanism
1057 // This service is needed for os::register_code_area
1058 icache_init();
1059
1060 // Give OS a chance to register generated code area.
1061 // This is used on Windows 64 bit platforms to register
1062 // Structured Exception Handlers for our generated code.
1063 os::register_code_area((char*)low_bound(), (char*)high_bound());
1064 }
1065
1066 void codeCache_init() {
1067 CodeCache::initialize();
1068 // Load AOT libraries and add AOT code heaps.
1069 AOTLoader::initialize();
1070 }
1071
1072 //------------------------------------------------------------------------------------------------
1073
1074 int CodeCache::number_of_nmethods_with_dependencies() {
1075 return _number_of_nmethods_with_dependencies;
1076 }
1077
1078 void CodeCache::clear_inline_caches() {
1079 assert_locked_or_safepoint(CodeCache_lock);
1080 CompiledMethodIterator iter;
1081 while(iter.next_alive()) {
1082 iter.method()->clear_inline_caches();
1083 }
1084 }
1085
1086 void CodeCache::cleanup_inline_caches() {
1087 assert_locked_or_safepoint(CodeCache_lock);
1088 NMethodIterator iter;
1089 while(iter.next_alive()) {
1113
1114 #ifndef PRODUCT
1115 if (VerifyDependencies) {
1116 // Object pointers are used as unique identifiers for dependency arguments. This
1117 // is only possible if no safepoint, i.e., GC occurs during the verification code.
1118 dependentCheckTime.start();
1119 nmethod::check_all_dependencies(changes);
1120 dependentCheckTime.stop();
1121 }
1122 #endif
1123
1124 return number_of_marked_CodeBlobs;
1125 }
1126
1127 CompiledMethod* CodeCache::find_compiled(void* start) {
1128 CodeBlob *cb = find_blob(start);
1129 assert(cb == NULL || cb->is_compiled(), "did not find an compiled_method");
1130 return (CompiledMethod*)cb;
1131 }
1132
1133 bool CodeCache::is_far_target(address target) {
1134 #if INCLUDE_AOT
1135 return NativeCall::is_far_call(_low_bound, target) ||
1136 NativeCall::is_far_call(_high_bound, target);
1137 #else
1138 return false;
1139 #endif
1140 }
1141
1142 #ifdef HOTSWAP
1143 int CodeCache::mark_for_evol_deoptimization(instanceKlassHandle dependee) {
1144 MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
1145 int number_of_marked_CodeBlobs = 0;
1146
1147 // Deoptimize all methods of the evolving class itself
1148 Array<Method*>* old_methods = dependee->methods();
1149 for (int i = 0; i < old_methods->length(); i++) {
1150 ResourceMark rm;
1151 Method* old_method = old_methods->at(i);
1152 CompiledMethod* nm = old_method->code();
1153 if (nm != NULL) {
1154 nm->mark_for_deoptimization();
1155 number_of_marked_CodeBlobs++;
1156 }
1157 }
1158
1159 CompiledMethodIterator iter;
1160 while(iter.next_alive()) {
1161 CompiledMethod* nm = iter.method();
1224
1225 // CodeCache can only be updated by a thread_in_VM and they will all be
1226 // stopped during the safepoint so CodeCache will be safe to update without
1227 // holding the CodeCache_lock.
1228
1229 KlassDepChange changes(dependee);
1230
1231 // Compute the dependent nmethods
1232 if (mark_for_deoptimization(changes) > 0) {
1233 // At least one nmethod has been marked for deoptimization
1234 VM_Deoptimize op;
1235 VMThread::execute(&op);
1236 }
1237 }
1238
1239 #ifdef HOTSWAP
1240 // Flushes compiled methods dependent on dependee in the evolutionary sense
1241 void CodeCache::flush_evol_dependents_on(instanceKlassHandle ev_k_h) {
1242 // --- Compile_lock is not held. However we are at a safepoint.
1243 assert_locked_or_safepoint(Compile_lock);
1244 if (number_of_nmethods_with_dependencies() == 0 && !UseAOT) return;
1245
1246 // CodeCache can only be updated by a thread_in_VM and they will all be
1247 // stopped during the safepoint so CodeCache will be safe to update without
1248 // holding the CodeCache_lock.
1249
1250 // Compute the dependent nmethods
1251 if (mark_for_evol_deoptimization(ev_k_h) > 0) {
1252 // At least one nmethod has been marked for deoptimization
1253
1254 // All this already happens inside a VM_Operation, so we'll do all the work here.
1255 // Stuff copied from VM_Deoptimize and modified slightly.
1256
1257 // We do not want any GCs to happen while we are in the middle of this VM operation
1258 ResourceMark rm;
1259 DeoptimizationMarker dm;
1260
1261 // Deoptimize all activations depending on marked nmethods
1262 Deoptimization::deoptimize_dependents();
1263
1264 // Make the dependent methods not entrant
1336
1337 heap->report_full();
1338
1339 EventCodeCacheFull event;
1340 if (event.should_commit()) {
1341 event.set_codeBlobType((u1)code_blob_type);
1342 event.set_startAddress((u8)heap->low_boundary());
1343 event.set_commitedTopAddress((u8)heap->high());
1344 event.set_reservedTopAddress((u8)heap->high_boundary());
1345 event.set_entryCount(heap->blob_count());
1346 event.set_methodCount(heap->nmethod_count());
1347 event.set_adaptorCount(heap->adapter_count());
1348 event.set_unallocatedCapacity(heap->unallocated_capacity());
1349 event.set_fullCount(heap->full_count());
1350 event.commit();
1351 }
1352 }
1353
1354 void CodeCache::print_memory_overhead() {
1355 size_t wasted_bytes = 0;
1356 FOR_ALL_NMETHOD_HEAPS(heap) {
1357 CodeHeap* curr_heap = *heap;
1358 for (CodeBlob* cb = (CodeBlob*)curr_heap->first(); cb != NULL; cb = (CodeBlob*)curr_heap->next(cb)) {
1359 HeapBlock* heap_block = ((HeapBlock*)cb) - 1;
1360 wasted_bytes += heap_block->length() * CodeCacheSegmentSize - cb->size();
1361 }
1362 }
1363 // Print bytes that are allocated in the freelist
1364 ttyLocker ttl;
1365 tty->print_cr("Number of elements in freelist: " SSIZE_FORMAT, freelists_length());
1366 tty->print_cr("Allocated in freelist: " SSIZE_FORMAT "kB", bytes_allocated_in_freelists()/K);
1367 tty->print_cr("Unused bytes in CodeBlobs: " SSIZE_FORMAT "kB", (wasted_bytes/K));
1368 tty->print_cr("Segment map size: " SSIZE_FORMAT "kB", allocated_segments()/K); // 1 byte per segment
1369 }
1370
1371 //------------------------------------------------------------------------------------------------
1372 // Non-product version
1373
1374 #ifndef PRODUCT
1375
1376 void CodeCache::print_trace(const char* event, CodeBlob* cb, int size) {
1382 }
1383
1384 void CodeCache::print_internals() {
1385 int nmethodCount = 0;
1386 int runtimeStubCount = 0;
1387 int adapterCount = 0;
1388 int deoptimizationStubCount = 0;
1389 int uncommonTrapStubCount = 0;
1390 int bufferBlobCount = 0;
1391 int total = 0;
1392 int nmethodAlive = 0;
1393 int nmethodNotEntrant = 0;
1394 int nmethodZombie = 0;
1395 int nmethodUnloaded = 0;
1396 int nmethodJava = 0;
1397 int nmethodNative = 0;
1398 int max_nm_size = 0;
1399 ResourceMark rm;
1400
1401 int i = 0;
1402 FOR_ALL_NMETHOD_HEAPS(heap) {
1403 if ((_nmethod_heaps->length() >= 1) && Verbose) {
1404 tty->print_cr("-- %s --", (*heap)->name());
1405 }
1406 FOR_ALL_BLOBS(cb, *heap) {
1407 total++;
1408 if (cb->is_nmethod()) {
1409 nmethod* nm = (nmethod*)cb;
1410
1411 if (Verbose && nm->method() != NULL) {
1412 ResourceMark rm;
1413 char *method_name = nm->method()->name_and_sig_as_C_string();
1414 tty->print("%s", method_name);
1415 if(nm->is_alive()) { tty->print_cr(" alive"); }
1416 if(nm->is_not_entrant()) { tty->print_cr(" not-entrant"); }
1417 if(nm->is_zombie()) { tty->print_cr(" zombie"); }
1418 }
1419
1420 nmethodCount++;
1421
1422 if(nm->is_alive()) { nmethodAlive++; }
1423 if(nm->is_not_entrant()) { nmethodNotEntrant++; }
1479 tty->fill_to(40);
1480 tty->print_cr("%d",buckets[i]);
1481 }
1482 }
1483
1484 FREE_C_HEAP_ARRAY(int, buckets);
1485 print_memory_overhead();
1486 }
1487
1488 #endif // !PRODUCT
1489
1490 void CodeCache::print() {
1491 print_summary(tty);
1492
1493 #ifndef PRODUCT
1494 if (!Verbose) return;
1495
1496 CodeBlob_sizes live;
1497 CodeBlob_sizes dead;
1498
1499 FOR_ALL_NMETHOD_HEAPS(heap) {
1500 FOR_ALL_BLOBS(cb, *heap) {
1501 if (!cb->is_alive()) {
1502 dead.add(cb);
1503 } else {
1504 live.add(cb);
1505 }
1506 }
1507 }
1508
1509 tty->print_cr("CodeCache:");
1510 tty->print_cr("nmethod dependency checking time %fs", dependentCheckTime.seconds());
1511
1512 if (!live.is_empty()) {
1513 live.print("live");
1514 }
1515 if (!dead.is_empty()) {
1516 dead.print("dead");
1517 }
1518
1519 if (WizardMode) {
1520 // print the oop_map usage
1521 int code_size = 0;
1522 int number_of_blobs = 0;
1523 int number_of_oop_maps = 0;
1524 int map_size = 0;
1525 FOR_ALL_NMETHOD_HEAPS(heap) {
1526 FOR_ALL_BLOBS(cb, *heap) {
1527 if (cb->is_alive()) {
1528 number_of_blobs++;
1529 code_size += cb->code_size();
1530 ImmutableOopMapSet* set = cb->oop_maps();
1531 if (set != NULL) {
1532 number_of_oop_maps += set->count();
1533 map_size += set->nr_of_bytes();
1534 }
1535 }
1536 }
1537 }
1538 tty->print_cr("OopMaps");
1539 tty->print_cr(" #blobs = %d", number_of_blobs);
1540 tty->print_cr(" code size = %d", code_size);
1541 tty->print_cr(" #oop_maps = %d", number_of_oop_maps);
1542 tty->print_cr(" map size = %d", map_size);
1543 }
1544
1545 #endif // !PRODUCT
1588 char *method_name = nm->method()->name_and_sig_as_C_string();
1589 st->print_cr("%d %d %s [" INTPTR_FORMAT ", " INTPTR_FORMAT " - " INTPTR_FORMAT "]",
1590 nm->compile_id(), nm->comp_level(), method_name, (intptr_t)nm->header_begin(),
1591 (intptr_t)nm->code_begin(), (intptr_t)nm->code_end());
1592 }
1593 }
1594
1595 void CodeCache::print_layout(outputStream* st) {
1596 MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
1597 ResourceMark rm;
1598 print_summary(st, true);
1599 }
1600
1601 void CodeCache::log_state(outputStream* st) {
1602 st->print(" total_blobs='" UINT32_FORMAT "' nmethods='" UINT32_FORMAT "'"
1603 " adapters='" UINT32_FORMAT "' free_code_cache='" SIZE_FORMAT "'",
1604 blob_count(), nmethod_count(), adapter_count(),
1605 unallocated_capacity());
1606 }
1607
|