128 scopes_metadata_size += nm->metadata_size();
129 scopes_data_size += nm->scopes_data_size();
130 scopes_pcs_size += nm->scopes_pcs_size();
131 } else {
132 code_size += cb->code_size();
133 }
134 }
135 };
136
137 // Iterate over all CodeHeaps
138 #define FOR_ALL_HEAPS(heap) for (GrowableArrayIterator<CodeHeap*> heap = _heaps->begin(); heap != _heaps->end(); ++heap)
139 #define FOR_ALL_NMETHOD_HEAPS(heap) for (GrowableArrayIterator<CodeHeap*> heap = _nmethod_heaps->begin(); heap != _nmethod_heaps->end(); ++heap)
140 #define FOR_ALL_ALLOCABLE_HEAPS(heap) for (GrowableArrayIterator<CodeHeap*> heap = _allocable_heaps->begin(); heap != _allocable_heaps->end(); ++heap)
141
142 // Iterate over all CodeBlobs (cb) on the given CodeHeap
143 #define FOR_ALL_BLOBS(cb, heap) for (CodeBlob* cb = first_blob(heap); cb != NULL; cb = next_blob(heap, cb))
144
145 address CodeCache::_low_bound = 0;
146 address CodeCache::_high_bound = 0;
147 int CodeCache::_number_of_nmethods_with_dependencies = 0;
148 nmethod* CodeCache::_scavenge_root_nmethods = NULL;
149 ExceptionCache* volatile CodeCache::_exception_cache_purge_list = NULL;
150
151 // Initialize arrays of CodeHeap subsets
152 GrowableArray<CodeHeap*>* CodeCache::_heaps = new(ResourceObj::C_HEAP, mtCode) GrowableArray<CodeHeap*> (CodeBlobType::All, true);
153 GrowableArray<CodeHeap*>* CodeCache::_compiled_heaps = new(ResourceObj::C_HEAP, mtCode) GrowableArray<CodeHeap*> (CodeBlobType::All, true);
154 GrowableArray<CodeHeap*>* CodeCache::_nmethod_heaps = new(ResourceObj::C_HEAP, mtCode) GrowableArray<CodeHeap*> (CodeBlobType::All, true);
155 GrowableArray<CodeHeap*>* CodeCache::_allocable_heaps = new(ResourceObj::C_HEAP, mtCode) GrowableArray<CodeHeap*> (CodeBlobType::All, true);
156
157 void CodeCache::check_heap_sizes(size_t non_nmethod_size, size_t profiled_size, size_t non_profiled_size, size_t cache_size, bool all_set) {
158 size_t total_size = non_nmethod_size + profiled_size + non_profiled_size;
159 // Prepare error message
160 const char* error = "Invalid code heap sizes";
161 err_msg message("NonNMethodCodeHeapSize (" SIZE_FORMAT "K) + ProfiledCodeHeapSize (" SIZE_FORMAT "K)"
162 " + NonProfiledCodeHeapSize (" SIZE_FORMAT "K) = " SIZE_FORMAT "K",
163 non_nmethod_size/K, profiled_size/K, non_profiled_size/K, total_size/K);
164
165 if (total_size > cache_size) {
166 // Some code heap sizes were explicitly set: total_size must be <= cache_size
167 message.append(" is greater than ReservedCodeCacheSize (" SIZE_FORMAT "K).", cache_size/K);
168 vm_exit_during_initialization(error, message);
694 iter.method()->do_unloading(unloading_occurred);
695 }
696 }
697
698 void CodeCache::blobs_do(CodeBlobClosure* f) {
699 assert_locked_or_safepoint(CodeCache_lock);
700 FOR_ALL_ALLOCABLE_HEAPS(heap) {
701 FOR_ALL_BLOBS(cb, *heap) {
702 if (cb->is_alive()) {
703 f->do_code_blob(cb);
704 #ifdef ASSERT
705 if (cb->is_nmethod()) {
706 Universe::heap()->verify_nmethod((nmethod*)cb);
707 }
708 #endif //ASSERT
709 }
710 }
711 }
712 }
713
714 // Walk the list of methods which might contain oops to the java heap.
715 void CodeCache::scavenge_root_nmethods_do(CodeBlobToOopClosure* f) {
716 assert_locked_or_safepoint(CodeCache_lock);
717
718 const bool fix_relocations = f->fix_relocations();
719 debug_only(mark_scavenge_root_nmethods());
720
721 nmethod* prev = NULL;
722 nmethod* cur = scavenge_root_nmethods();
723 while (cur != NULL) {
724 debug_only(cur->clear_scavenge_root_marked());
725 assert(cur->scavenge_root_not_marked(), "");
726 assert(cur->on_scavenge_root_list(), "else shouldn't be on this list");
727
728 bool is_live = (!cur->is_zombie() && !cur->is_unloaded());
729 LogTarget(Trace, gc, nmethod) lt;
730 if (lt.is_enabled()) {
731 LogStream ls(lt);
732 CompileTask::print(&ls, cur,
733 is_live ? "scavenge root " : "dead scavenge root", /*short_form:*/ true);
734 }
735 if (is_live) {
736 // Perform cur->oops_do(f), maybe just once per nmethod.
737 f->do_code_blob(cur);
738 }
739 nmethod* const next = cur->scavenge_root_link();
740 // The scavengable nmethod list must contain all methods with scavengable
741 // oops. It is safe to include more nmethod on the list, but we do not
742 // expect any live non-scavengable nmethods on the list.
743 if (fix_relocations) {
744 if (!is_live || !cur->detect_scavenge_root_oops()) {
745 unlink_scavenge_root_nmethod(cur, prev);
746 } else {
747 prev = cur;
748 }
749 }
750 cur = next;
751 }
752
753 // Check for stray marks.
754 debug_only(verify_perm_nmethods(NULL));
755 }
756
757 void CodeCache::register_scavenge_root_nmethod(nmethod* nm) {
758 assert_locked_or_safepoint(CodeCache_lock);
759 if (!nm->on_scavenge_root_list() && nm->detect_scavenge_root_oops()) {
760 add_scavenge_root_nmethod(nm);
761 }
762 }
763
764 void CodeCache::verify_scavenge_root_nmethod(nmethod* nm) {
765 nm->verify_scavenge_root_oops();
766 }
767
768 void CodeCache::add_scavenge_root_nmethod(nmethod* nm) {
769 assert_locked_or_safepoint(CodeCache_lock);
770
771 nm->set_on_scavenge_root_list();
772 nm->set_scavenge_root_link(_scavenge_root_nmethods);
773 set_scavenge_root_nmethods(nm);
774 print_trace("add_scavenge_root", nm);
775 }
776
777 void CodeCache::unlink_scavenge_root_nmethod(nmethod* nm, nmethod* prev) {
778 assert_locked_or_safepoint(CodeCache_lock);
779
780 assert((prev == NULL && scavenge_root_nmethods() == nm) ||
781 (prev != NULL && prev->scavenge_root_link() == nm), "precondition");
782
783 print_trace("unlink_scavenge_root", nm);
784 if (prev == NULL) {
785 set_scavenge_root_nmethods(nm->scavenge_root_link());
786 } else {
787 prev->set_scavenge_root_link(nm->scavenge_root_link());
788 }
789 nm->set_scavenge_root_link(NULL);
790 nm->clear_on_scavenge_root_list();
791 }
792
793 void CodeCache::drop_scavenge_root_nmethod(nmethod* nm) {
794 assert_locked_or_safepoint(CodeCache_lock);
795
796 print_trace("drop_scavenge_root", nm);
797 nmethod* prev = NULL;
798 for (nmethod* cur = scavenge_root_nmethods(); cur != NULL; cur = cur->scavenge_root_link()) {
799 if (cur == nm) {
800 unlink_scavenge_root_nmethod(cur, prev);
801 return;
802 }
803 prev = cur;
804 }
805 assert(false, "should have been on list");
806 }
807
808 void CodeCache::prune_scavenge_root_nmethods() {
809 assert_locked_or_safepoint(CodeCache_lock);
810
811 debug_only(mark_scavenge_root_nmethods());
812
813 nmethod* last = NULL;
814 nmethod* cur = scavenge_root_nmethods();
815 while (cur != NULL) {
816 nmethod* next = cur->scavenge_root_link();
817 debug_only(cur->clear_scavenge_root_marked());
818 assert(cur->scavenge_root_not_marked(), "");
819 assert(cur->on_scavenge_root_list(), "else shouldn't be on this list");
820
821 if (!cur->is_zombie() && !cur->is_unloaded()
822 && cur->detect_scavenge_root_oops()) {
823 // Keep it. Advance 'last' to prevent deletion.
824 last = cur;
825 } else {
826 // Prune it from the list, so we don't have to look at it any more.
827 print_trace("prune_scavenge_root", cur);
828 unlink_scavenge_root_nmethod(cur, last);
829 }
830 cur = next;
831 }
832
833 // Check for stray marks.
834 debug_only(verify_perm_nmethods(NULL));
835 }
836
837 #ifndef PRODUCT
838 void CodeCache::asserted_non_scavengable_nmethods_do(CodeBlobClosure* f) {
839 // While we are here, verify the integrity of the list.
840 mark_scavenge_root_nmethods();
841 for (nmethod* cur = scavenge_root_nmethods(); cur != NULL; cur = cur->scavenge_root_link()) {
842 assert(cur->on_scavenge_root_list(), "else shouldn't be on this list");
843 cur->clear_scavenge_root_marked();
844 }
845 verify_perm_nmethods(f);
846 }
847
848 // Temporarily mark nmethods that are claimed to be on the scavenge list.
849 void CodeCache::mark_scavenge_root_nmethods() {
850 NMethodIterator iter(NMethodIterator::only_alive);
851 while(iter.next()) {
852 nmethod* nm = iter.method();
853 assert(nm->scavenge_root_not_marked(), "clean state");
854 if (nm->on_scavenge_root_list())
855 nm->set_scavenge_root_marked();
856 }
857 }
858
859 // If the closure is given, run it on the unlisted nmethods.
860 // Also make sure that the effects of mark_scavenge_root_nmethods is gone.
861 void CodeCache::verify_perm_nmethods(CodeBlobClosure* f_or_null) {
862 NMethodIterator iter(NMethodIterator::only_alive);
863 while(iter.next()) {
864 nmethod* nm = iter.method();
865 bool call_f = (f_or_null != NULL);
866 assert(nm->scavenge_root_not_marked(), "must be already processed");
867 if (nm->on_scavenge_root_list())
868 call_f = false; // don't show this one to the client
869 Universe::heap()->verify_nmethod(nm);
870 if (call_f) f_or_null->do_code_blob(nm);
871 }
872 }
873 #endif //PRODUCT
874
875 void CodeCache::verify_clean_inline_caches() {
876 #ifdef ASSERT
877 NMethodIterator iter(NMethodIterator::only_alive_and_not_unloading);
878 while(iter.next()) {
879 nmethod* nm = iter.method();
880 assert(!nm->is_unloaded(), "Tautology");
881 nm->verify_clean_inline_caches();
882 nm->verify();
883 }
884 #endif
885 }
886
887 void CodeCache::verify_icholder_relocations() {
888 #ifdef ASSERT
889 // make sure that we aren't leaking icholders
890 int count = 0;
891 FOR_ALL_HEAPS(heap) {
892 FOR_ALL_BLOBS(cb, *heap) {
893 CompiledMethod *nm = cb->as_compiled_method_or_null();
894 if (nm != NULL) {
910 for (;;) {
911 ExceptionCache* purge_list_head = Atomic::load(&_exception_cache_purge_list);
912 entry->set_purge_list_next(purge_list_head);
913 if (Atomic::cmpxchg(entry, &_exception_cache_purge_list, purge_list_head) == purge_list_head) {
914 break;
915 }
916 }
917 }
918 }
919
920 // Delete exception caches that have been concurrently unlinked,
921 // followed by a global handshake operation.
922 void CodeCache::purge_exception_caches() {
923 ExceptionCache* curr = _exception_cache_purge_list;
924 while (curr != NULL) {
925 ExceptionCache* next = curr->purge_list_next();
926 delete curr;
927 curr = next;
928 }
929 _exception_cache_purge_list = NULL;
930 }
931
932 void CodeCache::gc_prologue() { }
933
934 void CodeCache::gc_epilogue() {
935 prune_scavenge_root_nmethods();
936 }
937
938 uint8_t CodeCache::_unloading_cycle = 1;
939
940 void CodeCache::increment_unloading_cycle() {
941 if (_unloading_cycle == 1) {
942 _unloading_cycle = 2;
943 } else {
944 _unloading_cycle = 1;
945 }
946 }
947
948 CodeCache::UnloadingScope::UnloadingScope(BoolObjectClosure* is_alive)
949 : _is_unloading_behaviour(is_alive)
950 {
951 IsUnloadingBehaviour::set_current(&_is_unloading_behaviour);
952 increment_unloading_cycle();
953 DependencyContext::cleaning_start();
954 }
955
|
128 scopes_metadata_size += nm->metadata_size();
129 scopes_data_size += nm->scopes_data_size();
130 scopes_pcs_size += nm->scopes_pcs_size();
131 } else {
132 code_size += cb->code_size();
133 }
134 }
135 };
136
137 // Iterate over all CodeHeaps
138 #define FOR_ALL_HEAPS(heap) for (GrowableArrayIterator<CodeHeap*> heap = _heaps->begin(); heap != _heaps->end(); ++heap)
139 #define FOR_ALL_NMETHOD_HEAPS(heap) for (GrowableArrayIterator<CodeHeap*> heap = _nmethod_heaps->begin(); heap != _nmethod_heaps->end(); ++heap)
140 #define FOR_ALL_ALLOCABLE_HEAPS(heap) for (GrowableArrayIterator<CodeHeap*> heap = _allocable_heaps->begin(); heap != _allocable_heaps->end(); ++heap)
141
142 // Iterate over all CodeBlobs (cb) on the given CodeHeap
143 #define FOR_ALL_BLOBS(cb, heap) for (CodeBlob* cb = first_blob(heap); cb != NULL; cb = next_blob(heap, cb))
144
145 address CodeCache::_low_bound = 0;
146 address CodeCache::_high_bound = 0;
147 int CodeCache::_number_of_nmethods_with_dependencies = 0;
148 ExceptionCache* volatile CodeCache::_exception_cache_purge_list = NULL;
149
150 // Initialize arrays of CodeHeap subsets
151 GrowableArray<CodeHeap*>* CodeCache::_heaps = new(ResourceObj::C_HEAP, mtCode) GrowableArray<CodeHeap*> (CodeBlobType::All, true);
152 GrowableArray<CodeHeap*>* CodeCache::_compiled_heaps = new(ResourceObj::C_HEAP, mtCode) GrowableArray<CodeHeap*> (CodeBlobType::All, true);
153 GrowableArray<CodeHeap*>* CodeCache::_nmethod_heaps = new(ResourceObj::C_HEAP, mtCode) GrowableArray<CodeHeap*> (CodeBlobType::All, true);
154 GrowableArray<CodeHeap*>* CodeCache::_allocable_heaps = new(ResourceObj::C_HEAP, mtCode) GrowableArray<CodeHeap*> (CodeBlobType::All, true);
155
156 void CodeCache::check_heap_sizes(size_t non_nmethod_size, size_t profiled_size, size_t non_profiled_size, size_t cache_size, bool all_set) {
157 size_t total_size = non_nmethod_size + profiled_size + non_profiled_size;
158 // Prepare error message
159 const char* error = "Invalid code heap sizes";
160 err_msg message("NonNMethodCodeHeapSize (" SIZE_FORMAT "K) + ProfiledCodeHeapSize (" SIZE_FORMAT "K)"
161 " + NonProfiledCodeHeapSize (" SIZE_FORMAT "K) = " SIZE_FORMAT "K",
162 non_nmethod_size/K, profiled_size/K, non_profiled_size/K, total_size/K);
163
164 if (total_size > cache_size) {
165 // Some code heap sizes were explicitly set: total_size must be <= cache_size
166 message.append(" is greater than ReservedCodeCacheSize (" SIZE_FORMAT "K).", cache_size/K);
167 vm_exit_during_initialization(error, message);
693 iter.method()->do_unloading(unloading_occurred);
694 }
695 }
696
697 void CodeCache::blobs_do(CodeBlobClosure* f) {
698 assert_locked_or_safepoint(CodeCache_lock);
699 FOR_ALL_ALLOCABLE_HEAPS(heap) {
700 FOR_ALL_BLOBS(cb, *heap) {
701 if (cb->is_alive()) {
702 f->do_code_blob(cb);
703 #ifdef ASSERT
704 if (cb->is_nmethod()) {
705 Universe::heap()->verify_nmethod((nmethod*)cb);
706 }
707 #endif //ASSERT
708 }
709 }
710 }
711 }
712
713 void CodeCache::verify_clean_inline_caches() {
714 #ifdef ASSERT
715 NMethodIterator iter(NMethodIterator::only_alive_and_not_unloading);
716 while(iter.next()) {
717 nmethod* nm = iter.method();
718 assert(!nm->is_unloaded(), "Tautology");
719 nm->verify_clean_inline_caches();
720 nm->verify();
721 }
722 #endif
723 }
724
725 void CodeCache::verify_icholder_relocations() {
726 #ifdef ASSERT
727 // make sure that we aren't leaking icholders
728 int count = 0;
729 FOR_ALL_HEAPS(heap) {
730 FOR_ALL_BLOBS(cb, *heap) {
731 CompiledMethod *nm = cb->as_compiled_method_or_null();
732 if (nm != NULL) {
748 for (;;) {
749 ExceptionCache* purge_list_head = Atomic::load(&_exception_cache_purge_list);
750 entry->set_purge_list_next(purge_list_head);
751 if (Atomic::cmpxchg(entry, &_exception_cache_purge_list, purge_list_head) == purge_list_head) {
752 break;
753 }
754 }
755 }
756 }
757
758 // Delete exception caches that have been concurrently unlinked,
759 // followed by a global handshake operation.
760 void CodeCache::purge_exception_caches() {
761 ExceptionCache* curr = _exception_cache_purge_list;
762 while (curr != NULL) {
763 ExceptionCache* next = curr->purge_list_next();
764 delete curr;
765 curr = next;
766 }
767 _exception_cache_purge_list = NULL;
768 }
769
770 uint8_t CodeCache::_unloading_cycle = 1;
771
772 void CodeCache::increment_unloading_cycle() {
773 if (_unloading_cycle == 1) {
774 _unloading_cycle = 2;
775 } else {
776 _unloading_cycle = 1;
777 }
778 }
779
780 CodeCache::UnloadingScope::UnloadingScope(BoolObjectClosure* is_alive)
781 : _is_unloading_behaviour(is_alive)
782 {
783 IsUnloadingBehaviour::set_current(&_is_unloading_behaviour);
784 increment_unloading_cycle();
785 DependencyContext::cleaning_start();
786 }
787
|