27 #include "code/codeCache.hpp"
28 #include "code/compiledIC.hpp"
29 #include "code/dependencies.hpp"
30 #include "code/icBuffer.hpp"
31 #include "code/nmethod.hpp"
32 #include "code/pcDesc.hpp"
33 #include "compiler/compileBroker.hpp"
34 #include "gc_implementation/shared/markSweep.hpp"
35 #include "memory/allocation.inline.hpp"
36 #include "memory/gcLocker.hpp"
37 #include "memory/iterator.hpp"
38 #include "memory/resourceArea.hpp"
39 #include "oops/method.hpp"
40 #include "oops/objArrayOop.hpp"
41 #include "oops/oop.inline.hpp"
42 #include "runtime/handles.inline.hpp"
43 #include "runtime/arguments.hpp"
44 #include "runtime/icache.hpp"
45 #include "runtime/java.hpp"
46 #include "runtime/mutexLocker.hpp"
47 #include "services/memoryService.hpp"
48 #include "trace/tracing.hpp"
49 #include "utilities/xmlstream.hpp"
50
51 // Helper class for printing in CodeCache
52
53 class CodeBlob_sizes {
54 private:
55 int count;
56 int total_size;
57 int header_size;
58 int code_size;
59 int stub_size;
60 int relocation_size;
61 int scopes_oop_size;
62 int scopes_metadata_size;
63 int scopes_data_size;
64 int scopes_pcs_size;
65
66 public:
67 CodeBlob_sizes() {
68 count = 0;
69 total_size = 0;
70 header_size = 0;
71 code_size = 0;
72 stub_size = 0;
98 void add(CodeBlob* cb) {
99 count++;
100 total_size += cb->size();
101 header_size += cb->header_size();
102 relocation_size += cb->relocation_size();
103 if (cb->is_nmethod()) {
104 nmethod* nm = cb->as_nmethod_or_null();
105 code_size += nm->insts_size();
106 stub_size += nm->stub_size();
107
108 scopes_oop_size += nm->oops_size();
109 scopes_metadata_size += nm->metadata_size();
110 scopes_data_size += nm->scopes_data_size();
111 scopes_pcs_size += nm->scopes_pcs_size();
112 } else {
113 code_size += cb->code_size();
114 }
115 }
116 };
117
118 // CodeCache implementation
119
120 CodeHeap * CodeCache::_heap = new CodeHeap();
121 int CodeCache::_number_of_blobs = 0;
122 int CodeCache::_number_of_adapters = 0;
123 int CodeCache::_number_of_nmethods = 0;
124 int CodeCache::_number_of_nmethods_with_dependencies = 0;
125 bool CodeCache::_needs_cache_clean = false;
126 nmethod* CodeCache::_scavenge_root_nmethods = NULL;
127
128 int CodeCache::_codemem_full_count = 0;
129
130 CodeBlob* CodeCache::first() {
131 assert_locked_or_safepoint(CodeCache_lock);
132 return (CodeBlob*)_heap->first();
133 }
134
135
136 CodeBlob* CodeCache::next(CodeBlob* cb) {
137 assert_locked_or_safepoint(CodeCache_lock);
138 return (CodeBlob*)_heap->next(cb);
139 }
140
141
142 CodeBlob* CodeCache::alive(CodeBlob *cb) {
143 assert_locked_or_safepoint(CodeCache_lock);
144 while (cb != NULL && !cb->is_alive()) cb = next(cb);
145 return cb;
146 }
147
148
149 nmethod* CodeCache::alive_nmethod(CodeBlob* cb) {
150 assert_locked_or_safepoint(CodeCache_lock);
151 while (cb != NULL && (!cb->is_alive() || !cb->is_nmethod())) cb = next(cb);
152 return (nmethod*)cb;
153 }
154
155 nmethod* CodeCache::first_nmethod() {
156 assert_locked_or_safepoint(CodeCache_lock);
157 CodeBlob* cb = first();
158 while (cb != NULL && !cb->is_nmethod()) {
159 cb = next(cb);
160 }
161 return (nmethod*)cb;
162 }
163
164 nmethod* CodeCache::next_nmethod (CodeBlob* cb) {
165 assert_locked_or_safepoint(CodeCache_lock);
166 cb = next(cb);
167 while (cb != NULL && !cb->is_nmethod()) {
168 cb = next(cb);
169 }
170 return (nmethod*)cb;
171 }
172
173 static size_t maxCodeCacheUsed = 0;
174
175 CodeBlob* CodeCache::allocate(int size, bool is_critical) {
176 // Do not seize the CodeCache lock here--if the caller has not
177 // already done so, we are going to lose bigtime, since the code
178 // cache will contain a garbage CodeBlob until the caller can
179 // run the constructor for the CodeBlob subclass he is busy
180 // instantiating.
181 assert_locked_or_safepoint(CodeCache_lock);
182 assert(size > 0, "allocation request must be reasonable");
183 if (size <= 0) {
184 return NULL;
185 }
186 CodeBlob* cb = NULL;
187 while (true) {
188 cb = (CodeBlob*)_heap->allocate(size, is_critical);
189 if (cb != NULL) break;
190 if (!_heap->expand_by(CodeCacheExpansionSize)) {
191 // Expansion failed
192 return NULL;
193 }
194 if (PrintCodeCacheExtension) {
195 ResourceMark rm;
196 tty->print_cr("code cache extended to [" INTPTR_FORMAT ", " INTPTR_FORMAT "] (" SSIZE_FORMAT " bytes)",
197 (intptr_t)_heap->low_boundary(), (intptr_t)_heap->high(),
198 (address)_heap->high() - (address)_heap->low_boundary());
199 }
200 }
201 maxCodeCacheUsed = MAX2(maxCodeCacheUsed, ((address)_heap->high_boundary() -
202 (address)_heap->low_boundary()) - unallocated_capacity());
203 print_trace("allocation", cb, size);
204 _number_of_blobs++;
205 return cb;
206 }
207
208 void CodeCache::free(CodeBlob* cb) {
209 assert_locked_or_safepoint(CodeCache_lock);
210
211 print_trace("free", cb);
212 if (cb->is_nmethod()) {
213 _number_of_nmethods--;
214 if (((nmethod *)cb)->has_dependencies()) {
215 _number_of_nmethods_with_dependencies--;
216 }
217 }
218 if (cb->is_adapter_blob()) {
219 _number_of_adapters--;
220 }
221 _number_of_blobs--;
222
223 _heap->deallocate(cb);
224
225 assert(_number_of_blobs >= 0, "sanity check");
226 }
227
228
229 void CodeCache::commit(CodeBlob* cb) {
230 // this is called by nmethod::nmethod, which must already own CodeCache_lock
231 assert_locked_or_safepoint(CodeCache_lock);
232 if (cb->is_nmethod()) {
233 _number_of_nmethods++;
234 if (((nmethod *)cb)->has_dependencies()) {
235 _number_of_nmethods_with_dependencies++;
236 }
237 }
238 if (cb->is_adapter_blob()) {
239 _number_of_adapters++;
240 }
241
242 // flush the hardware I-cache
243 ICache::invalidate_range(cb->content_begin(), cb->content_size());
244 }
245
246
247 // Iteration over CodeBlobs
248
249 #define FOR_ALL_BLOBS(var) for (CodeBlob *var = first() ; var != NULL; var = next(var) )
250 #define FOR_ALL_ALIVE_BLOBS(var) for (CodeBlob *var = alive(first()); var != NULL; var = alive(next(var)))
251 #define FOR_ALL_ALIVE_NMETHODS(var) for (nmethod *var = alive_nmethod(first()); var != NULL; var = alive_nmethod(next(var)))
252
253
254 bool CodeCache::contains(void *p) {
255 // It should be ok to call contains without holding a lock
256 return _heap->contains(p);
257 }
258
259
260 // This method is safe to call without holding the CodeCache_lock, as long as a dead codeblob is not
261 // looked up (i.e., one that has been marked for deletion). It only dependes on the _segmap to contain
262 // valid indices, which it will always do, as long as the CodeBlob is not in the process of being recycled.
263 CodeBlob* CodeCache::find_blob(void* start) {
264 CodeBlob* result = find_blob_unsafe(start);
265 if (result == NULL) return NULL;
266 // We could potentially look up non_entrant methods
267 guarantee(!result->is_zombie() || result->is_locked_by_vm() || is_error_reported(), "unsafe access to zombie method");
268 return result;
269 }
270
271 nmethod* CodeCache::find_nmethod(void* start) {
272 CodeBlob *cb = find_blob(start);
273 assert(cb == NULL || cb->is_nmethod(), "did not find an nmethod");
274 return (nmethod*)cb;
275 }
276
277
278 void CodeCache::blobs_do(void f(CodeBlob* nm)) {
279 assert_locked_or_safepoint(CodeCache_lock);
280 FOR_ALL_BLOBS(p) {
281 f(p);
282 }
283 }
284
285
286 void CodeCache::nmethods_do(void f(nmethod* nm)) {
287 assert_locked_or_safepoint(CodeCache_lock);
288 FOR_ALL_BLOBS(nm) {
289 if (nm->is_nmethod()) f((nmethod*)nm);
290 }
291 }
292
293 void CodeCache::alive_nmethods_do(void f(nmethod* nm)) {
294 assert_locked_or_safepoint(CodeCache_lock);
295 FOR_ALL_ALIVE_NMETHODS(nm) {
296 f(nm);
297 }
298 }
299
300 int CodeCache::alignment_unit() {
301 return (int)_heap->alignment_unit();
302 }
303
304
305 int CodeCache::alignment_offset() {
306 return (int)_heap->alignment_offset();
307 }
308
309
310 // Mark nmethods for unloading if they contain otherwise unreachable
311 // oops.
312 void CodeCache::do_unloading(BoolObjectClosure* is_alive, bool unloading_occurred) {
313 assert_locked_or_safepoint(CodeCache_lock);
314 FOR_ALL_ALIVE_NMETHODS(nm) {
315 nm->do_unloading(is_alive, unloading_occurred);
316 }
317 }
318
319 void CodeCache::blobs_do(CodeBlobClosure* f) {
320 assert_locked_or_safepoint(CodeCache_lock);
321 FOR_ALL_ALIVE_BLOBS(cb) {
322 f->do_code_blob(cb);
323
324 #ifdef ASSERT
325 if (cb->is_nmethod())
326 ((nmethod*)cb)->verify_scavenge_root_oops();
327 #endif //ASSERT
328 }
329 }
330
331 // Walk the list of methods which might contain non-perm oops.
332 void CodeCache::scavenge_root_nmethods_do(CodeBlobClosure* f) {
333 assert_locked_or_safepoint(CodeCache_lock);
334
335 if (UseG1GC) {
336 return;
337 }
338
339 debug_only(mark_scavenge_root_nmethods());
340
341 for (nmethod* cur = scavenge_root_nmethods(); cur != NULL; cur = cur->scavenge_root_link()) {
342 debug_only(cur->clear_scavenge_root_marked());
343 assert(cur->scavenge_root_not_marked(), "");
344 assert(cur->on_scavenge_root_list(), "else shouldn't be on this list");
345
346 bool is_live = (!cur->is_zombie() && !cur->is_unloaded());
347 #ifndef PRODUCT
348 if (TraceScavenge) {
435 debug_only(verify_perm_nmethods(NULL));
436 }
437
438 #ifndef PRODUCT
439 void CodeCache::asserted_non_scavengable_nmethods_do(CodeBlobClosure* f) {
440 if (UseG1GC) {
441 return;
442 }
443
444 // While we are here, verify the integrity of the list.
445 mark_scavenge_root_nmethods();
446 for (nmethod* cur = scavenge_root_nmethods(); cur != NULL; cur = cur->scavenge_root_link()) {
447 assert(cur->on_scavenge_root_list(), "else shouldn't be on this list");
448 cur->clear_scavenge_root_marked();
449 }
450 verify_perm_nmethods(f);
451 }
452
453 // Temporarily mark nmethods that are claimed to be on the non-perm list.
454 void CodeCache::mark_scavenge_root_nmethods() {
455 FOR_ALL_ALIVE_BLOBS(cb) {
456 if (cb->is_nmethod()) {
457 nmethod *nm = (nmethod*)cb;
458 assert(nm->scavenge_root_not_marked(), "clean state");
459 if (nm->on_scavenge_root_list())
460 nm->set_scavenge_root_marked();
461 }
462 }
463 }
464
465 // If the closure is given, run it on the unlisted nmethods.
466 // Also make sure that the effects of mark_scavenge_root_nmethods is gone.
467 void CodeCache::verify_perm_nmethods(CodeBlobClosure* f_or_null) {
468 FOR_ALL_ALIVE_BLOBS(cb) {
469 bool call_f = (f_or_null != NULL);
470 if (cb->is_nmethod()) {
471 nmethod *nm = (nmethod*)cb;
472 assert(nm->scavenge_root_not_marked(), "must be already processed");
473 if (nm->on_scavenge_root_list())
474 call_f = false; // don't show this one to the client
475 nm->verify_scavenge_root_oops();
476 } else {
477 call_f = false; // not an nmethod
478 }
479 if (call_f) f_or_null->do_code_blob(cb);
480 }
481 }
482 #endif //PRODUCT
483
484 void CodeCache::verify_clean_inline_caches() {
485 #ifdef ASSERT
486 FOR_ALL_ALIVE_BLOBS(cb) {
487 if (cb->is_nmethod()) {
488 nmethod* nm = (nmethod*)cb;
489 assert(!nm->is_unloaded(), "Tautology");
490 nm->verify_clean_inline_caches();
491 nm->verify();
492 }
493 }
494 #endif
495 }
496
497 void CodeCache::verify_icholder_relocations() {
498 #ifdef ASSERT
499 // make sure that we aren't leaking icholders
500 int count = 0;
501 FOR_ALL_BLOBS(cb) {
502 if (cb->is_nmethod()) {
503 nmethod* nm = (nmethod*)cb;
504 count += nm->verify_icholder_relocations();
505 }
506 }
507
508 assert(count + InlineCacheBuffer::pending_icholder_count() + CompiledICHolder::live_not_claimed_count() ==
509 CompiledICHolder::live_count(), "must agree");
510 #endif
511 }
512
513 void CodeCache::gc_prologue() {
514 }
515
516 void CodeCache::gc_epilogue() {
517 assert_locked_or_safepoint(CodeCache_lock);
518 FOR_ALL_ALIVE_BLOBS(cb) {
519 if (cb->is_nmethod()) {
520 nmethod *nm = (nmethod*)cb;
521 assert(!nm->is_unloaded(), "Tautology");
522 if (needs_cache_clean()) {
523 nm->cleanup_inline_caches();
524 }
525 DEBUG_ONLY(nm->verify());
526 DEBUG_ONLY(nm->verify_oop_relocations());
527 }
528 }
529 set_needs_cache_clean(false);
530 prune_scavenge_root_nmethods();
531
532 verify_icholder_relocations();
533 }
534
535 void CodeCache::verify_oops() {
536 MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
537 VerifyOopClosure voc;
538 FOR_ALL_ALIVE_BLOBS(cb) {
539 if (cb->is_nmethod()) {
540 nmethod *nm = (nmethod*)cb;
541 nm->oops_do(&voc);
542 nm->verify_oop_relocations();
543 }
544 }
545 }
546
547
548 address CodeCache::first_address() {
549 assert_locked_or_safepoint(CodeCache_lock);
550 return (address)_heap->low_boundary();
551 }
552
553
554 address CodeCache::last_address() {
555 assert_locked_or_safepoint(CodeCache_lock);
556 return (address)_heap->high();
557 }
558
559 /**
560 * Returns the reverse free ratio. E.g., if 25% (1/4) of the code cache
561 * is free, reverse_free_ratio() returns 4.
562 */
563 double CodeCache::reverse_free_ratio() {
564 double unallocated_capacity = (double)(CodeCache::unallocated_capacity() - CodeCacheMinimumFreeSpace);
565 double max_capacity = (double)CodeCache::max_capacity();
566 return max_capacity / unallocated_capacity;
567 }
568
569 void icache_init();
570
571 void CodeCache::initialize() {
572 assert(CodeCacheSegmentSize >= (uintx)CodeEntryAlignment, "CodeCacheSegmentSize must be large enough to align entry points");
573 #ifdef COMPILER2
574 assert(CodeCacheSegmentSize >= (uintx)OptoLoopAlignment, "CodeCacheSegmentSize must be large enough to align inner loops");
575 #endif
576 assert(CodeCacheSegmentSize >= sizeof(jdouble), "CodeCacheSegmentSize must be large enough to align constants");
577 // This was originally just a check of the alignment, causing failure, instead, round
578 // the code cache to the page size. In particular, Solaris is moving to a larger
579 // default page size.
580 CodeCacheExpansionSize = round_to(CodeCacheExpansionSize, os::vm_page_size());
581 InitialCodeCacheSize = round_to(InitialCodeCacheSize, os::vm_page_size());
582 ReservedCodeCacheSize = round_to(ReservedCodeCacheSize, os::vm_page_size());
583 if (!_heap->reserve(ReservedCodeCacheSize, InitialCodeCacheSize, CodeCacheSegmentSize)) {
584 vm_exit_during_initialization("Could not reserve enough space for code cache");
585 }
586
587 MemoryService::add_code_heap_memory_pool(_heap);
588
589 // Initialize ICache flush mechanism
590 // This service is needed for os::register_code_area
591 icache_init();
592
593 // Give OS a chance to register generated code area.
594 // This is used on Windows 64 bit platforms to register
595 // Structured Exception Handlers for our generated code.
596 os::register_code_area(_heap->low_boundary(), _heap->high_boundary());
597 }
598
599
600 void codeCache_init() {
601 CodeCache::initialize();
602 }
603
604 //------------------------------------------------------------------------------------------------
605
606 int CodeCache::number_of_nmethods_with_dependencies() {
607 return _number_of_nmethods_with_dependencies;
608 }
609
610 void CodeCache::clear_inline_caches() {
611 assert_locked_or_safepoint(CodeCache_lock);
612 FOR_ALL_ALIVE_NMETHODS(nm) {
613 nm->clear_inline_caches();
614 }
615 }
616
617 // Keeps track of time spent for checking dependencies
618 NOT_PRODUCT(static elapsedTimer dependentCheckTime;)
619
620 int CodeCache::mark_for_deoptimization(DepChange& changes) {
621 MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
622 int number_of_marked_CodeBlobs = 0;
623
624 // search the hierarchy looking for nmethods which are affected by the loading of this class
625
626 // then search the interfaces this class implements looking for nmethods
627 // which might be dependent of the fact that an interface only had one
628 // implementor.
629 // nmethod::check_all_dependencies works only correctly, if no safepoint
630 // can happen
631 No_Safepoint_Verifier nsv;
632 for (DepChange::ContextStream str(changes, nsv); str.next(); ) {
633 Klass* d = str.klass();
648 }
649
650
651 #ifdef HOTSWAP
652 int CodeCache::mark_for_evol_deoptimization(instanceKlassHandle dependee) {
653 MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
654 int number_of_marked_CodeBlobs = 0;
655
656 // Deoptimize all methods of the evolving class itself
657 Array<Method*>* old_methods = dependee->methods();
658 for (int i = 0; i < old_methods->length(); i++) {
659 ResourceMark rm;
660 Method* old_method = old_methods->at(i);
661 nmethod *nm = old_method->code();
662 if (nm != NULL) {
663 nm->mark_for_deoptimization();
664 number_of_marked_CodeBlobs++;
665 }
666 }
667
668 FOR_ALL_ALIVE_NMETHODS(nm) {
669 if (nm->is_marked_for_deoptimization()) {
670 // ...Already marked in the previous pass; don't count it again.
671 } else if (nm->is_evol_dependent_on(dependee())) {
672 ResourceMark rm;
673 nm->mark_for_deoptimization();
674 number_of_marked_CodeBlobs++;
675 } else {
676 // flush caches in case they refer to a redefined Method*
677 nm->clear_inline_caches();
678 }
679 }
680
681 return number_of_marked_CodeBlobs;
682 }
683 #endif // HOTSWAP
684
685
686 // Deoptimize all methods
687 void CodeCache::mark_all_nmethods_for_deoptimization() {
688 MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
689 FOR_ALL_ALIVE_NMETHODS(nm) {
690 nm->mark_for_deoptimization();
691 }
692 }
693
694
695 int CodeCache::mark_for_deoptimization(Method* dependee) {
696 MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
697 int number_of_marked_CodeBlobs = 0;
698
699 FOR_ALL_ALIVE_NMETHODS(nm) {
700 if (nm->is_dependent_on_method(dependee)) {
701 ResourceMark rm;
702 nm->mark_for_deoptimization();
703 number_of_marked_CodeBlobs++;
704 }
705 }
706
707 return number_of_marked_CodeBlobs;
708 }
709
710 void CodeCache::make_marked_nmethods_zombies() {
711 assert(SafepointSynchronize::is_at_safepoint(), "must be at a safepoint");
712 FOR_ALL_ALIVE_NMETHODS(nm) {
713 if (nm->is_marked_for_deoptimization()) {
714
715 // If the nmethod has already been made non-entrant and it can be converted
716 // then zombie it now. Otherwise make it non-entrant and it will eventually
717 // be zombied when it is no longer seen on the stack. Note that the nmethod
718 // might be "entrant" and not on the stack and so could be zombied immediately
719 // but we can't tell because we don't track it on stack until it becomes
720 // non-entrant.
721
722 if (nm->is_not_entrant() && nm->can_not_entrant_be_converted()) {
723 nm->make_zombie();
724 } else {
725 nm->make_not_entrant();
726 }
727 }
728 }
729 }
730
731 void CodeCache::make_marked_nmethods_not_entrant() {
732 assert_locked_or_safepoint(CodeCache_lock);
733 FOR_ALL_ALIVE_NMETHODS(nm) {
734 if (nm->is_marked_for_deoptimization()) {
735 nm->make_not_entrant();
736 }
737 }
738 }
739
740 void CodeCache::verify() {
741 _heap->verify();
742 FOR_ALL_ALIVE_BLOBS(p) {
743 p->verify();
744 }
745 }
746
747 void CodeCache::report_codemem_full() {
748 _codemem_full_count++;
749 EventCodeCacheFull event;
750 if (event.should_commit()) {
751 event.set_startAddress((u8)low_bound());
752 event.set_commitedTopAddress((u8)high());
753 event.set_reservedTopAddress((u8)high_bound());
754 event.set_entryCount(nof_blobs());
755 event.set_methodCount(nof_nmethods());
756 event.set_adaptorCount(nof_adapters());
757 event.set_unallocatedCapacity(unallocated_capacity()/K);
758 event.set_fullCount(_codemem_full_count);
759 event.commit();
760 }
761 }
762
763 void CodeCache::print_memory_overhead() {
764 size_t wasted_bytes = 0;
765 CodeBlob *cb;
766 for (cb = first(); cb != NULL; cb = next(cb)) {
767 HeapBlock* heap_block = ((HeapBlock*)cb) - 1;
768 wasted_bytes += heap_block->length() * CodeCacheSegmentSize - cb->size();
769 }
770 // Print bytes that are allocated in the freelist
771 ttyLocker ttl;
772 tty->print_cr("Number of elements in freelist: " SSIZE_FORMAT, freelist_length());
773 tty->print_cr("Allocated in freelist: " SSIZE_FORMAT "kB", bytes_allocated_in_freelist()/K);
774 tty->print_cr("Unused bytes in CodeBlobs: " SSIZE_FORMAT "kB", (wasted_bytes/K));
775 tty->print_cr("Segment map size: " SSIZE_FORMAT "kB", allocated_segments()/K); // 1 byte per segment
776 }
777
778 //------------------------------------------------------------------------------------------------
779 // Non-product version
780
781 #ifndef PRODUCT
782
783 void CodeCache::print_trace(const char* event, CodeBlob* cb, int size) {
784 if (PrintCodeCache2) { // Need to add a new flag
785 ResourceMark rm;
786 if (size == 0) size = cb->size();
787 tty->print_cr("CodeCache %s: addr: " INTPTR_FORMAT ", size: 0x%x", event, p2i(cb), size);
788 }
789 }
790
791 void CodeCache::print_internals() {
792 int nmethodCount = 0;
793 int runtimeStubCount = 0;
794 int adapterCount = 0;
795 int deoptimizationStubCount = 0;
796 int uncommonTrapStubCount = 0;
797 int bufferBlobCount = 0;
798 int total = 0;
799 int nmethodAlive = 0;
800 int nmethodNotEntrant = 0;
801 int nmethodZombie = 0;
802 int nmethodUnloaded = 0;
803 int nmethodJava = 0;
804 int nmethodNative = 0;
805 int max_nm_size = 0;
806 ResourceMark rm;
807
808 CodeBlob *cb;
809 for (cb = first(); cb != NULL; cb = next(cb)) {
810 total++;
811 if (cb->is_nmethod()) {
812 nmethod* nm = (nmethod*)cb;
813
814 if (Verbose && nm->method() != NULL) {
815 ResourceMark rm;
816 char *method_name = nm->method()->name_and_sig_as_C_string();
817 tty->print("%s", method_name);
818 if(nm->is_alive()) { tty->print_cr(" alive"); }
819 if(nm->is_not_entrant()) { tty->print_cr(" not-entrant"); }
820 if(nm->is_zombie()) { tty->print_cr(" zombie"); }
821 }
822
823 nmethodCount++;
824
825 if(nm->is_alive()) { nmethodAlive++; }
826 if(nm->is_not_entrant()) { nmethodNotEntrant++; }
827 if(nm->is_zombie()) { nmethodZombie++; }
828 if(nm->is_unloaded()) { nmethodUnloaded++; }
829 if(nm->method() != NULL && nm->is_native_method()) { nmethodNative++; }
830
831 if(nm->method() != NULL && nm->is_java_method()) {
832 nmethodJava++;
833 max_nm_size = MAX2(max_nm_size, nm->size());
834 }
835 } else if (cb->is_runtime_stub()) {
836 runtimeStubCount++;
837 } else if (cb->is_deoptimization_stub()) {
838 deoptimizationStubCount++;
839 } else if (cb->is_uncommon_trap_stub()) {
840 uncommonTrapStubCount++;
841 } else if (cb->is_adapter_blob()) {
842 adapterCount++;
843 } else if (cb->is_buffer_blob()) {
844 bufferBlobCount++;
845 }
846 }
847
848 int bucketSize = 512;
849 int bucketLimit = max_nm_size / bucketSize + 1;
850 int *buckets = NEW_C_HEAP_ARRAY(int, bucketLimit, mtCode);
851 memset(buckets, 0, sizeof(int) * bucketLimit);
852
853 for (cb = first(); cb != NULL; cb = next(cb)) {
854 if (cb->is_nmethod()) {
855 nmethod* nm = (nmethod*)cb;
856 if(nm->is_java_method()) {
857 buckets[nm->size() / bucketSize]++;
858 }
859 }
860 }
861
862 tty->print_cr("Code Cache Entries (total of %d)",total);
863 tty->print_cr("-------------------------------------------------");
864 tty->print_cr("nmethods: %d",nmethodCount);
865 tty->print_cr("\talive: %d",nmethodAlive);
866 tty->print_cr("\tnot_entrant: %d",nmethodNotEntrant);
867 tty->print_cr("\tzombie: %d",nmethodZombie);
868 tty->print_cr("\tunloaded: %d",nmethodUnloaded);
869 tty->print_cr("\tjava: %d",nmethodJava);
870 tty->print_cr("\tnative: %d",nmethodNative);
871 tty->print_cr("runtime_stubs: %d",runtimeStubCount);
872 tty->print_cr("adapters: %d",adapterCount);
873 tty->print_cr("buffer blobs: %d",bufferBlobCount);
874 tty->print_cr("deoptimization_stubs: %d",deoptimizationStubCount);
875 tty->print_cr("uncommon_traps: %d",uncommonTrapStubCount);
876 tty->print_cr("\nnmethod size distribution (non-zombie java)");
877 tty->print_cr("-------------------------------------------------");
878
879 for(int i=0; i<bucketLimit; i++) {
880 if(buckets[i] != 0) {
882 tty->fill_to(40);
883 tty->print_cr("%d",buckets[i]);
884 }
885 }
886
887 FREE_C_HEAP_ARRAY(int, buckets, mtCode);
888 print_memory_overhead();
889 }
890
891 #endif // !PRODUCT
892
893 void CodeCache::print() {
894 print_summary(tty);
895
896 #ifndef PRODUCT
897 if (!Verbose) return;
898
899 CodeBlob_sizes live;
900 CodeBlob_sizes dead;
901
902 FOR_ALL_BLOBS(p) {
903 if (!p->is_alive()) {
904 dead.add(p);
905 } else {
906 live.add(p);
907 }
908 }
909
910 tty->print_cr("CodeCache:");
911 tty->print_cr("nmethod dependency checking time %fs", dependentCheckTime.seconds());
912
913 if (!live.is_empty()) {
914 live.print("live");
915 }
916 if (!dead.is_empty()) {
917 dead.print("dead");
918 }
919
920
921 if (WizardMode) {
922 // print the oop_map usage
923 int code_size = 0;
924 int number_of_blobs = 0;
925 int number_of_oop_maps = 0;
926 int map_size = 0;
927 FOR_ALL_BLOBS(p) {
928 if (p->is_alive()) {
929 number_of_blobs++;
930 code_size += p->code_size();
931 OopMapSet* set = p->oop_maps();
932 if (set != NULL) {
933 number_of_oop_maps += set->size();
934 map_size += set->heap_size();
935 }
936 }
937 }
938 tty->print_cr("OopMaps");
939 tty->print_cr(" #blobs = %d", number_of_blobs);
940 tty->print_cr(" code size = %d", code_size);
941 tty->print_cr(" #oop_maps = %d", number_of_oop_maps);
942 tty->print_cr(" map size = %d", map_size);
943 }
944
945 #endif // !PRODUCT
946 }
947
948 void CodeCache::print_summary(outputStream* st, bool detailed) {
949 size_t total = (_heap->high_boundary() - _heap->low_boundary());
950 st->print_cr("CodeCache: size=" SIZE_FORMAT "Kb used=" SIZE_FORMAT
951 "Kb max_used=" SIZE_FORMAT "Kb free=" SIZE_FORMAT "Kb",
952 total/K, (total - unallocated_capacity())/K,
953 maxCodeCacheUsed/K, unallocated_capacity()/K);
954
955 if (detailed) {
956 st->print_cr(" bounds [" INTPTR_FORMAT ", " INTPTR_FORMAT ", " INTPTR_FORMAT "]",
957 p2i(_heap->low_boundary()),
958 p2i(_heap->high()),
959 p2i(_heap->high_boundary()));
960 st->print_cr(" total_blobs=" UINT32_FORMAT " nmethods=" UINT32_FORMAT
961 " adapters=" UINT32_FORMAT,
962 nof_blobs(), nof_nmethods(), nof_adapters());
963 st->print_cr(" compilation: %s", CompileBroker::should_compile_new_jobs() ?
964 "enabled" : Arguments::mode() == Arguments::_int ?
965 "disabled (interpreter mode)" :
966 "disabled (not enough contiguous free space left)");
967 }
968 }
969
970 void CodeCache::log_state(outputStream* st) {
971 st->print(" total_blobs='" UINT32_FORMAT "' nmethods='" UINT32_FORMAT "'"
972 " adapters='" UINT32_FORMAT "' free_code_cache='" SIZE_FORMAT "'",
973 nof_blobs(), nof_nmethods(), nof_adapters(),
974 unallocated_capacity());
975 }
976
|
27 #include "code/codeCache.hpp"
28 #include "code/compiledIC.hpp"
29 #include "code/dependencies.hpp"
30 #include "code/icBuffer.hpp"
31 #include "code/nmethod.hpp"
32 #include "code/pcDesc.hpp"
33 #include "compiler/compileBroker.hpp"
34 #include "gc_implementation/shared/markSweep.hpp"
35 #include "memory/allocation.inline.hpp"
36 #include "memory/gcLocker.hpp"
37 #include "memory/iterator.hpp"
38 #include "memory/resourceArea.hpp"
39 #include "oops/method.hpp"
40 #include "oops/objArrayOop.hpp"
41 #include "oops/oop.inline.hpp"
42 #include "runtime/handles.inline.hpp"
43 #include "runtime/arguments.hpp"
44 #include "runtime/icache.hpp"
45 #include "runtime/java.hpp"
46 #include "runtime/mutexLocker.hpp"
47 #include "runtime/compilationPolicy.hpp"
48 #include "services/memoryService.hpp"
49 #include "trace/tracing.hpp"
50 #include "utilities/xmlstream.hpp"
51 #ifdef COMPILER1
52 #include "c1/c1_Compilation.hpp"
53 #include "c1/c1_Compiler.hpp"
54 #endif
55 #ifdef COMPILER2
56 #include "opto/c2compiler.hpp"
57 #include "opto/compile.hpp"
58 #endif
59
60 // Helper class for printing in CodeCache
61 class CodeBlob_sizes {
62 private:
63 int count;
64 int total_size;
65 int header_size;
66 int code_size;
67 int stub_size;
68 int relocation_size;
69 int scopes_oop_size;
70 int scopes_metadata_size;
71 int scopes_data_size;
72 int scopes_pcs_size;
73
74 public:
75 CodeBlob_sizes() {
76 count = 0;
77 total_size = 0;
78 header_size = 0;
79 code_size = 0;
80 stub_size = 0;
106 void add(CodeBlob* cb) {
107 count++;
108 total_size += cb->size();
109 header_size += cb->header_size();
110 relocation_size += cb->relocation_size();
111 if (cb->is_nmethod()) {
112 nmethod* nm = cb->as_nmethod_or_null();
113 code_size += nm->insts_size();
114 stub_size += nm->stub_size();
115
116 scopes_oop_size += nm->oops_size();
117 scopes_metadata_size += nm->metadata_size();
118 scopes_data_size += nm->scopes_data_size();
119 scopes_pcs_size += nm->scopes_pcs_size();
120 } else {
121 code_size += cb->code_size();
122 }
123 }
124 };
125
126 // Iterate over all CodeHeaps
127 #define FOR_ALL_HEAPS(heap) for (GrowableArrayIterator<CodeHeap*> heap = _heaps->begin(); heap != _heaps->end(); ++heap)
128 // Iterate over all CodeBlobs (cb) on the given CodeHeap
129 #define FOR_ALL_BLOBS(cb, heap) for (CodeBlob* cb = first_blob(heap); cb != NULL; cb = next_blob(heap, cb))
130
131 address CodeCache::_low_bound = 0;
132 address CodeCache::_high_bound = 0;
133 int CodeCache::_number_of_blobs = 0;
134 int CodeCache::_number_of_adapters = 0;
135 int CodeCache::_number_of_nmethods = 0;
136 int CodeCache::_number_of_nmethods_with_dependencies = 0;
137 bool CodeCache::_needs_cache_clean = false;
138 nmethod* CodeCache::_scavenge_root_nmethods = NULL;
139 int CodeCache::_codemem_full_count = 0;
140
141 // Initialize array of CodeHeaps
142 GrowableArray<CodeHeap*>* CodeCache::_heaps = new(ResourceObj::C_HEAP, mtCode) GrowableArray<CodeHeap*> (3, true);
143
144 void CodeCache::initialize_heaps() {
145 // Determine size of compiler buffers
146 size_t code_buffers_size = 0;
147 #ifdef COMPILER1
148 // C1 temporary code buffers (see Compiler::init_buffer_blob())
149 const int c1_count = CompilationPolicy::policy()->compiler_count(CompLevel_simple);
150 code_buffers_size += c1_count * Compiler::code_buffer_size();
151 #endif
152 #ifdef COMPILER2
153 // C2 scratch buffers (see Compile::init_scratch_buffer_blob())
154 const int c2_count = CompilationPolicy::policy()->compiler_count(CompLevel_full_optimization);
155 // Initial size of constant table (this may be increased if a compiled method needs more space)
156 code_buffers_size += c2_count * C2Compiler::initial_code_buffer_size();
157 #endif
158
159 // Calculate default CodeHeap sizes if not set by user
160 if (!FLAG_IS_CMDLINE(NonMethodCodeHeapSize) && !FLAG_IS_CMDLINE(ProfiledCodeHeapSize)
161 && !FLAG_IS_CMDLINE(NonProfiledCodeHeapSize)) {
162 // Increase default NonMethodCodeHeapSize to account for compiler buffers
163 FLAG_SET_ERGO(uintx, NonMethodCodeHeapSize, NonMethodCodeHeapSize + code_buffers_size);
164
165 // Check if we have enough space for the non-method code heap
166 if (ReservedCodeCacheSize > NonMethodCodeHeapSize) {
167 // Use the default value for NonMethodCodeHeapSize and one half of the
168 // remaining size for non-profiled methods and one half for profiled methods
169 size_t remaining_size = ReservedCodeCacheSize - NonMethodCodeHeapSize;
170 size_t profiled_size = remaining_size / 2;
171 size_t non_profiled_size = remaining_size - profiled_size;
172 FLAG_SET_ERGO(uintx, ProfiledCodeHeapSize, profiled_size);
173 FLAG_SET_ERGO(uintx, NonProfiledCodeHeapSize, non_profiled_size);
174 } else {
175 // Use all space for the non-method heap and set other heaps to minimal size
176 FLAG_SET_ERGO(uintx, NonMethodCodeHeapSize, ReservedCodeCacheSize - os::vm_page_size() * 2);
177 FLAG_SET_ERGO(uintx, ProfiledCodeHeapSize, os::vm_page_size());
178 FLAG_SET_ERGO(uintx, NonProfiledCodeHeapSize, os::vm_page_size());
179 }
180 }
181
182 // We do not need the profiled CodeHeap, use all space for the non-profiled CodeHeap
183 if(!heap_available(CodeBlobType::MethodProfiled)) {
184 FLAG_SET_ERGO(uintx, NonProfiledCodeHeapSize, NonProfiledCodeHeapSize + ProfiledCodeHeapSize);
185 FLAG_SET_ERGO(uintx, ProfiledCodeHeapSize, 0);
186 }
187 // We do not need the non-profiled CodeHeap, use all space for the non-method CodeHeap
188 if(!heap_available(CodeBlobType::MethodNonProfiled)) {
189 FLAG_SET_ERGO(uintx, NonMethodCodeHeapSize, NonMethodCodeHeapSize + NonProfiledCodeHeapSize);
190 FLAG_SET_ERGO(uintx, NonProfiledCodeHeapSize, 0);
191 }
192
193 // Make sure we have enough space for VM internal code
194 uint min_code_cache_size = (CodeCacheMinimumUseSpace DEBUG_ONLY(* 3)) + CodeCacheMinimumFreeSpace;
195 if (NonMethodCodeHeapSize < (min_code_cache_size + code_buffers_size)) {
196 vm_exit_during_initialization("Not enough space in non-method code heap to run VM.");
197 }
198 guarantee(NonProfiledCodeHeapSize + ProfiledCodeHeapSize + NonMethodCodeHeapSize <= ReservedCodeCacheSize, "Size check");
199
200 // Align reserved sizes of CodeHeaps
201 size_t non_method_size = ReservedCodeSpace::allocation_align_size_up(NonMethodCodeHeapSize);
202 size_t profiled_size = ReservedCodeSpace::allocation_align_size_up(ProfiledCodeHeapSize);
203 size_t non_profiled_size = ReservedCodeSpace::allocation_align_size_up(NonProfiledCodeHeapSize);
204
205 // Compute initial sizes of CodeHeaps
206 size_t init_non_method_size = MIN2(InitialCodeCacheSize, non_method_size);
207 size_t init_profiled_size = MIN2(InitialCodeCacheSize, profiled_size);
208 size_t init_non_profiled_size = MIN2(InitialCodeCacheSize, non_profiled_size);
209
210 // Reserve one continuous chunk of memory for CodeHeaps and split it into
211 // parts for the individual heaps. The memory layout looks like this:
212 // ---------- high -----------
213 // Non-profiled nmethods
214 // Profiled nmethods
215 // Non-methods
216 // ---------- low ------------
217 ReservedCodeSpace rs = reserve_heap_memory(non_profiled_size + profiled_size + non_method_size);
218 ReservedSpace non_method_space = rs.first_part(non_method_size);
219 ReservedSpace rest = rs.last_part(non_method_size);
220 ReservedSpace profiled_space = rest.first_part(profiled_size);
221 ReservedSpace non_profiled_space = rest.last_part(profiled_size);
222
223 // Non-methods (stubs, adapters, ...)
224 add_heap(non_method_space, "non-methods", init_non_method_size, CodeBlobType::NonMethod);
225 // Tier 2 and tier 3 (profiled) methods
226 add_heap(profiled_space, "profiled nmethods", init_profiled_size, CodeBlobType::MethodProfiled);
227 // Tier 1 and tier 4 (non-profiled) methods and native methods
228 add_heap(non_profiled_space, "non-profiled nmethods", init_non_profiled_size, CodeBlobType::MethodNonProfiled);
229 }
230
231 ReservedCodeSpace CodeCache::reserve_heap_memory(size_t size) {
232 // Determine alignment
233 const size_t page_size = os::can_execute_large_page_memory() ?
234 os::page_size_for_region(InitialCodeCacheSize, size, 8) :
235 os::vm_page_size();
236 const size_t granularity = os::vm_allocation_granularity();
237 const size_t r_align = MAX2(page_size, granularity);
238 const size_t r_size = align_size_up(size, r_align);
239 const size_t rs_align = page_size == (size_t) os::vm_page_size() ? 0 :
240 MAX2(page_size, granularity);
241
242 ReservedCodeSpace rs(r_size, rs_align, rs_align > 0);
243
244 // Initialize bounds
245 _low_bound = (address)rs.base();
246 _high_bound = _low_bound + rs.size();
247
248 return rs;
249 }
250
251 bool CodeCache::heap_available(int code_blob_type) {
252 if (!SegmentedCodeCache) {
253 // No segmentation: Use a single code heap
254 return (code_blob_type == CodeBlobType::All);
255 } else if (Arguments::mode() == Arguments::_int) {
256 // Interpreter only: we don't need any method code heaps
257 return (code_blob_type == CodeBlobType::NonMethod);
258 } else if (TieredCompilation || code_blob_type == CodeBlobType::NonMethod) {
259 // Tiered compilation: use all code heaps
260 return (code_blob_type < CodeBlobType::All);
261 } else {
262 // No TieredCompilation: we only need the non-profiled code heap
263 return (code_blob_type == CodeBlobType::MethodNonProfiled);
264 }
265 }
266
267 void CodeCache::add_heap(ReservedSpace rs, const char* name, size_t size_initial, int code_blob_type) {
268 // Check if heap is needed
269 if (!heap_available(code_blob_type)) {
270 return;
271 }
272
273 // Create CodeHeap
274 CodeHeap* heap = new CodeHeap(name, code_blob_type);
275 _heaps->append(heap);
276
277 // Reserve Space
278 size_initial = round_to(size_initial, os::vm_page_size());
279
280 if (!heap->reserve(rs, size_initial, CodeCacheSegmentSize)) {
281 vm_exit_during_initialization("Could not reserve enough space for code cache");
282 }
283
284 // Register the CodeHeap
285 MemoryService::add_code_heap_memory_pool(heap, name);
286 }
287
288 CodeHeap* CodeCache::get_code_heap(CodeBlob* cb) {
289 assert(cb != NULL, "CodeBlob is null");
290 FOR_ALL_HEAPS(heap) {
291 if ((*heap)->contains(cb)) {
292 return *heap;
293 }
294 }
295 ShouldNotReachHere();
296 return NULL;
297 }
298
299 CodeHeap* CodeCache::get_code_heap(int code_blob_type) {
300 FOR_ALL_HEAPS(heap) {
301 if ((*heap)->accepts(code_blob_type)) {
302 return *heap;
303 }
304 }
305 return NULL;
306 }
307
308 CodeBlob* CodeCache::first_blob(CodeHeap* heap) {
309 assert_locked_or_safepoint(CodeCache_lock);
310 assert(heap != NULL, "heap is null");
311 return (CodeBlob*)heap->first();
312 }
313
314 CodeBlob* CodeCache::first_blob(int code_blob_type) {
315 if (heap_available(code_blob_type)) {
316 return first_blob(get_code_heap(code_blob_type));
317 } else {
318 return NULL;
319 }
320 }
321
322 CodeBlob* CodeCache::next_blob(CodeHeap* heap, CodeBlob* cb) {
323 assert_locked_or_safepoint(CodeCache_lock);
324 assert(heap != NULL, "heap is null");
325 return (CodeBlob*)heap->next(cb);
326 }
327
328 CodeBlob* CodeCache::next_blob(CodeBlob* cb) {
329 return next_blob(get_code_heap(cb), cb);
330 }
331
332 CodeBlob* CodeCache::allocate(int size, int code_blob_type, bool is_critical) {
333 // Do not seize the CodeCache lock here--if the caller has not
334 // already done so, we are going to lose bigtime, since the code
335 // cache will contain a garbage CodeBlob until the caller can
336 // run the constructor for the CodeBlob subclass he is busy
337 // instantiating.
338 assert_locked_or_safepoint(CodeCache_lock);
339 assert(size > 0, "allocation request must be reasonable");
340 if (size <= 0) {
341 return NULL;
342 }
343 CodeBlob* cb = NULL;
344
345 // Get CodeHeap for the given CodeBlobType
346 CodeHeap* heap = get_code_heap(SegmentedCodeCache ? code_blob_type : CodeBlobType::All);
347 assert (heap != NULL, "heap is null");
348
349 while (true) {
350 cb = (CodeBlob*)heap->allocate(size, is_critical);
351 if (cb != NULL) break;
352 if (!heap->expand_by(CodeCacheExpansionSize)) {
353 // Expansion failed
354 if (SegmentedCodeCache && (code_blob_type == CodeBlobType::NonMethod)) {
355 // Fallback solution: Store non-method code in the non-profiled code heap
356 return allocate(size, CodeBlobType::MethodNonProfiled, is_critical);
357 }
358 return NULL;
359 }
360 if (PrintCodeCacheExtension) {
361 ResourceMark rm;
362 if (SegmentedCodeCache) {
363 tty->print("Code heap '%s'", heap->name());
364 } else {
365 tty->print("Code cache");
366 }
367 tty->print_cr(" extended to [" INTPTR_FORMAT ", " INTPTR_FORMAT "] (" SSIZE_FORMAT " bytes)",
368 (intptr_t)heap->low_boundary(), (intptr_t)heap->high(),
369 (address)heap->high() - (address)heap->low_boundary());
370 }
371 }
372 print_trace("allocation", cb, size);
373 _number_of_blobs++;
374 return cb;
375 }
376
377 void CodeCache::free(CodeBlob* cb) {
378 assert_locked_or_safepoint(CodeCache_lock);
379
380 print_trace("free", cb);
381 if (cb->is_nmethod()) {
382 _number_of_nmethods--;
383 if (((nmethod *)cb)->has_dependencies()) {
384 _number_of_nmethods_with_dependencies--;
385 }
386 }
387 if (cb->is_adapter_blob()) {
388 _number_of_adapters--;
389 }
390 _number_of_blobs--;
391
392 // Get heap for given CodeBlob and deallocate
393 get_code_heap(cb)->deallocate(cb);
394
395 assert(_number_of_blobs >= 0, "sanity check");
396 }
397
398 void CodeCache::commit(CodeBlob* cb) {
399 // this is called by nmethod::nmethod, which must already own CodeCache_lock
400 assert_locked_or_safepoint(CodeCache_lock);
401 if (cb->is_nmethod()) {
402 _number_of_nmethods++;
403 if (((nmethod *)cb)->has_dependencies()) {
404 _number_of_nmethods_with_dependencies++;
405 }
406 }
407 if (cb->is_adapter_blob()) {
408 _number_of_adapters++;
409 }
410
411 // flush the hardware I-cache
412 ICache::invalidate_range(cb->content_begin(), cb->content_size());
413 }
414
415 bool CodeCache::contains(void *p) {
416 // It should be ok to call contains without holding a lock
417 FOR_ALL_HEAPS(heap) {
418 if ((*heap)->contains(p)) {
419 return true;
420 }
421 }
422 return false;
423 }
424
425 // This method is safe to call without holding the CodeCache_lock, as long as a dead CodeBlob is not
426 // looked up (i.e., one that has been marked for deletion). It only depends on the _segmap to contain
427 // valid indices, which it will always do, as long as the CodeBlob is not in the process of being recycled.
428 CodeBlob* CodeCache::find_blob(void* start) {
429 CodeBlob* result = find_blob_unsafe(start);
430 // We could potentially look up non_entrant methods
431 guarantee(result == NULL || !result->is_zombie() || result->is_locked_by_vm() || is_error_reported(), "unsafe access to zombie method");
432 return result;
433 }
434
435 // Lookup that does not fail if you lookup a zombie method (if you call this, be sure to know
436 // what you are doing)
437 CodeBlob* CodeCache::find_blob_unsafe(void* start) {
438 // NMT can walk the stack before code cache is created
439 if (_heaps == NULL || _heaps->is_empty()) return NULL;
440
441 FOR_ALL_HEAPS(heap) {
442 CodeBlob* result = (CodeBlob*) (*heap)->find_start(start);
443 if (result != NULL && result->blob_contains((address)start)) {
444 return result;
445 }
446 }
447 return NULL;
448 }
449
450 nmethod* CodeCache::find_nmethod(void* start) {
451 CodeBlob* cb = find_blob(start);
452 assert(cb->is_nmethod(), "did not find an nmethod");
453 return (nmethod*)cb;
454 }
455
456 void CodeCache::blobs_do(void f(CodeBlob* nm)) {
457 assert_locked_or_safepoint(CodeCache_lock);
458 FOR_ALL_HEAPS(heap) {
459 FOR_ALL_BLOBS(cb, *heap) {
460 f(cb);
461 }
462 }
463 }
464
465 void CodeCache::nmethods_do(void f(nmethod* nm)) {
466 assert_locked_or_safepoint(CodeCache_lock);
467 NMethodIterator iter;
468 while(iter.next()) {
469 f(iter.method());
470 }
471 }
472
473 void CodeCache::alive_nmethods_do(void f(nmethod* nm)) {
474 assert_locked_or_safepoint(CodeCache_lock);
475 NMethodIterator iter;
476 while(iter.next_alive()) {
477 f(iter.method());
478 }
479 }
480
481 int CodeCache::alignment_unit() {
482 return (int)_heaps->first()->alignment_unit();
483 }
484
485 int CodeCache::alignment_offset() {
486 return (int)_heaps->first()->alignment_offset();
487 }
488
489 // Mark nmethods for unloading if they contain otherwise unreachable oops.
490 void CodeCache::do_unloading(BoolObjectClosure* is_alive, bool unloading_occurred) {
491 assert_locked_or_safepoint(CodeCache_lock);
492 NMethodIterator iter;
493 while(iter.next_alive()) {
494 iter.method()->do_unloading(is_alive, unloading_occurred);
495 }
496 }
497
498 void CodeCache::blobs_do(CodeBlobClosure* f) {
499 assert_locked_or_safepoint(CodeCache_lock);
500 FOR_ALL_HEAPS(heap) {
501 FOR_ALL_BLOBS(cb, *heap) {
502 if (cb->is_alive()) {
503 f->do_code_blob(cb);
504
505 #ifdef ASSERT
506 if (cb->is_nmethod())
507 ((nmethod*)cb)->verify_scavenge_root_oops();
508 #endif //ASSERT
509 }
510 }
511 }
512 }
513
514 // Walk the list of methods which might contain non-perm oops.
515 void CodeCache::scavenge_root_nmethods_do(CodeBlobClosure* f) {
516 assert_locked_or_safepoint(CodeCache_lock);
517
518 if (UseG1GC) {
519 return;
520 }
521
522 debug_only(mark_scavenge_root_nmethods());
523
524 for (nmethod* cur = scavenge_root_nmethods(); cur != NULL; cur = cur->scavenge_root_link()) {
525 debug_only(cur->clear_scavenge_root_marked());
526 assert(cur->scavenge_root_not_marked(), "");
527 assert(cur->on_scavenge_root_list(), "else shouldn't be on this list");
528
529 bool is_live = (!cur->is_zombie() && !cur->is_unloaded());
530 #ifndef PRODUCT
531 if (TraceScavenge) {
618 debug_only(verify_perm_nmethods(NULL));
619 }
620
621 #ifndef PRODUCT
622 void CodeCache::asserted_non_scavengable_nmethods_do(CodeBlobClosure* f) {
623 if (UseG1GC) {
624 return;
625 }
626
627 // While we are here, verify the integrity of the list.
628 mark_scavenge_root_nmethods();
629 for (nmethod* cur = scavenge_root_nmethods(); cur != NULL; cur = cur->scavenge_root_link()) {
630 assert(cur->on_scavenge_root_list(), "else shouldn't be on this list");
631 cur->clear_scavenge_root_marked();
632 }
633 verify_perm_nmethods(f);
634 }
635
636 // Temporarily mark nmethods that are claimed to be on the non-perm list.
637 void CodeCache::mark_scavenge_root_nmethods() {
638 NMethodIterator iter;
639 while(iter.next_alive()) {
640 nmethod* nm = iter.method();
641 assert(nm->scavenge_root_not_marked(), "clean state");
642 if (nm->on_scavenge_root_list())
643 nm->set_scavenge_root_marked();
644 }
645 }
646
647 // If the closure is given, run it on the unlisted nmethods.
648 // Also make sure that the effects of mark_scavenge_root_nmethods is gone.
649 void CodeCache::verify_perm_nmethods(CodeBlobClosure* f_or_null) {
650 NMethodIterator iter;
651 while(iter.next_alive()) {
652 nmethod* nm = iter.method();
653 bool call_f = (f_or_null != NULL);
654 assert(nm->scavenge_root_not_marked(), "must be already processed");
655 if (nm->on_scavenge_root_list())
656 call_f = false; // don't show this one to the client
657 nm->verify_scavenge_root_oops();
658 if (call_f) f_or_null->do_code_blob(nm);
659 }
660 }
661 #endif //PRODUCT
662
663 void CodeCache::verify_clean_inline_caches() {
664 #ifdef ASSERT
665 NMethodIterator iter;
666 while(iter.next_alive()) {
667 nmethod* nm = iter.method();
668 assert(!nm->is_unloaded(), "Tautology");
669 nm->verify_clean_inline_caches();
670 nm->verify();
671 }
672 #endif
673 }
674
675 void CodeCache::verify_icholder_relocations() {
676 #ifdef ASSERT
677 // make sure that we aren't leaking icholders
678 int count = 0;
679 FOR_ALL_HEAPS(heap) {
680 FOR_ALL_BLOBS(cb, *heap) {
681 if (cb->is_nmethod()) {
682 nmethod* nm = (nmethod*)cb;
683 count += nm->verify_icholder_relocations();
684 }
685 }
686 }
687
688 assert(count + InlineCacheBuffer::pending_icholder_count() + CompiledICHolder::live_not_claimed_count() ==
689 CompiledICHolder::live_count(), "must agree");
690 #endif
691 }
692
693 void CodeCache::gc_prologue() {
694 }
695
696 void CodeCache::gc_epilogue() {
697 assert_locked_or_safepoint(CodeCache_lock);
698 NMethodIterator iter;
699 while(iter.next_alive()) {
700 nmethod* nm = iter.method();
701 assert(!nm->is_unloaded(), "Tautology");
702 if (needs_cache_clean()) {
703 nm->cleanup_inline_caches();
704 }
705 DEBUG_ONLY(nm->verify());
706 DEBUG_ONLY(nm->verify_oop_relocations());
707 }
708 set_needs_cache_clean(false);
709 prune_scavenge_root_nmethods();
710
711 verify_icholder_relocations();
712 }
713
714 void CodeCache::verify_oops() {
715 MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
716 VerifyOopClosure voc;
717 NMethodIterator iter;
718 while(iter.next_alive()) {
719 nmethod* nm = iter.method();
720 nm->oops_do(&voc);
721 nm->verify_oop_relocations();
722 }
723 }
724
725 size_t CodeCache::capacity() {
726 size_t cap = 0;
727 FOR_ALL_HEAPS(heap) {
728 cap += (*heap)->capacity();
729 }
730 return cap;
731 }
732
733 size_t CodeCache::unallocated_capacity() {
734 size_t unallocated_cap = 0;
735 FOR_ALL_HEAPS(heap) {
736 unallocated_cap += (*heap)->unallocated_capacity();
737 }
738 return unallocated_cap;
739 }
740
741 size_t CodeCache::max_capacity() {
742 size_t max_cap = 0;
743 FOR_ALL_HEAPS(heap) {
744 max_cap += (*heap)->max_capacity();
745 }
746 return max_cap;
747 }
748
749 /**
750 * Returns true if a CodeHeap is full and sets code_blob_type accordingly.
751 */
752 bool CodeCache::is_full(int* code_blob_type) {
753 FOR_ALL_HEAPS(heap) {
754 if ((*heap)->unallocated_capacity() < CodeCacheMinimumFreeSpace) {
755 *code_blob_type = (*heap)->code_blob_type();
756 return true;
757 }
758 }
759 return false;
760 }
761
762 /**
763 * Returns the reverse free ratio. E.g., if 25% (1/4) of the code heap
764 * is free, reverse_free_ratio() returns 4.
765 */
766 double CodeCache::reverse_free_ratio(int code_blob_type) {
767 CodeHeap* heap = get_code_heap(code_blob_type);
768 if (heap == NULL) {
769 return 0;
770 }
771 double unallocated_capacity = (double)(heap->unallocated_capacity() - CodeCacheMinimumFreeSpace);
772 double max_capacity = (double)heap->max_capacity();
773 return max_capacity / unallocated_capacity;
774 }
775
776 size_t CodeCache::bytes_allocated_in_freelists() {
777 size_t allocated_bytes = 0;
778 FOR_ALL_HEAPS(heap) {
779 allocated_bytes += (*heap)->allocated_in_freelist();
780 }
781 return allocated_bytes;
782 }
783
784 int CodeCache::allocated_segments() {
785 int number_of_segments = 0;
786 FOR_ALL_HEAPS(heap) {
787 number_of_segments += (*heap)->allocated_segments();
788 }
789 return number_of_segments;
790 }
791
792 size_t CodeCache::freelists_length() {
793 size_t length = 0;
794 FOR_ALL_HEAPS(heap) {
795 length += (*heap)->freelist_length();
796 }
797 return length;
798 }
799
800 void icache_init();
801
802 void CodeCache::initialize() {
803 assert(CodeCacheSegmentSize >= (uintx)CodeEntryAlignment, "CodeCacheSegmentSize must be large enough to align entry points");
804 #ifdef COMPILER2
805 assert(CodeCacheSegmentSize >= (uintx)OptoLoopAlignment, "CodeCacheSegmentSize must be large enough to align inner loops");
806 #endif
807 assert(CodeCacheSegmentSize >= sizeof(jdouble), "CodeCacheSegmentSize must be large enough to align constants");
808 // This was originally just a check of the alignment, causing failure, instead, round
809 // the code cache to the page size. In particular, Solaris is moving to a larger
810 // default page size.
811 CodeCacheExpansionSize = round_to(CodeCacheExpansionSize, os::vm_page_size());
812
813 if (SegmentedCodeCache) {
814 // Use multiple code heaps
815 initialize_heaps();
816 } else {
817 // Use a single code heap
818 ReservedCodeSpace rs = reserve_heap_memory(ReservedCodeCacheSize);
819 add_heap(rs, "Code heap", InitialCodeCacheSize, CodeBlobType::All);
820 }
821
822 // Initialize ICache flush mechanism
823 // This service is needed for os::register_code_area
824 icache_init();
825
826 // Give OS a chance to register generated code area.
827 // This is used on Windows 64 bit platforms to register
828 // Structured Exception Handlers for our generated code.
829 os::register_code_area((char*)low_bound(), (char*)high_bound());
830 }
831
832 void codeCache_init() {
833 CodeCache::initialize();
834 }
835
836 //------------------------------------------------------------------------------------------------
837
838 int CodeCache::number_of_nmethods_with_dependencies() {
839 return _number_of_nmethods_with_dependencies;
840 }
841
842 void CodeCache::clear_inline_caches() {
843 assert_locked_or_safepoint(CodeCache_lock);
844 NMethodIterator iter;
845 while(iter.next_alive()) {
846 iter.method()->clear_inline_caches();
847 }
848 }
849
850 // Keeps track of time spent for checking dependencies
851 NOT_PRODUCT(static elapsedTimer dependentCheckTime;)
852
853 int CodeCache::mark_for_deoptimization(DepChange& changes) {
854 MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
855 int number_of_marked_CodeBlobs = 0;
856
857 // search the hierarchy looking for nmethods which are affected by the loading of this class
858
859 // then search the interfaces this class implements looking for nmethods
860 // which might be dependent of the fact that an interface only had one
861 // implementor.
862 // nmethod::check_all_dependencies works only correctly, if no safepoint
863 // can happen
864 No_Safepoint_Verifier nsv;
865 for (DepChange::ContextStream str(changes, nsv); str.next(); ) {
866 Klass* d = str.klass();
881 }
882
883
884 #ifdef HOTSWAP
885 int CodeCache::mark_for_evol_deoptimization(instanceKlassHandle dependee) {
886 MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
887 int number_of_marked_CodeBlobs = 0;
888
889 // Deoptimize all methods of the evolving class itself
890 Array<Method*>* old_methods = dependee->methods();
891 for (int i = 0; i < old_methods->length(); i++) {
892 ResourceMark rm;
893 Method* old_method = old_methods->at(i);
894 nmethod *nm = old_method->code();
895 if (nm != NULL) {
896 nm->mark_for_deoptimization();
897 number_of_marked_CodeBlobs++;
898 }
899 }
900
901 NMethodIterator iter;
902 while(iter.next_alive()) {
903 nmethod* nm = iter.method();
904 if (nm->is_marked_for_deoptimization()) {
905 // ...Already marked in the previous pass; don't count it again.
906 } else if (nm->is_evol_dependent_on(dependee())) {
907 ResourceMark rm;
908 nm->mark_for_deoptimization();
909 number_of_marked_CodeBlobs++;
910 } else {
911 // flush caches in case they refer to a redefined Method*
912 nm->clear_inline_caches();
913 }
914 }
915
916 return number_of_marked_CodeBlobs;
917 }
918 #endif // HOTSWAP
919
920
921 // Deoptimize all methods
922 void CodeCache::mark_all_nmethods_for_deoptimization() {
923 MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
924 NMethodIterator iter;
925 while(iter.next_alive()) {
926 iter.method()->mark_for_deoptimization();
927 }
928 }
929
930 int CodeCache::mark_for_deoptimization(Method* dependee) {
931 MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
932 int number_of_marked_CodeBlobs = 0;
933
934 NMethodIterator iter;
935 while(iter.next_alive()) {
936 nmethod* nm = iter.method();
937 if (nm->is_dependent_on_method(dependee)) {
938 ResourceMark rm;
939 nm->mark_for_deoptimization();
940 number_of_marked_CodeBlobs++;
941 }
942 }
943
944 return number_of_marked_CodeBlobs;
945 }
946
947 void CodeCache::make_marked_nmethods_zombies() {
948 assert(SafepointSynchronize::is_at_safepoint(), "must be at a safepoint");
949 NMethodIterator iter;
950 while(iter.next_alive()) {
951 nmethod* nm = iter.method();
952 if (nm->is_marked_for_deoptimization()) {
953
954 // If the nmethod has already been made non-entrant and it can be converted
955 // then zombie it now. Otherwise make it non-entrant and it will eventually
956 // be zombied when it is no longer seen on the stack. Note that the nmethod
957 // might be "entrant" and not on the stack and so could be zombied immediately
958 // but we can't tell because we don't track it on stack until it becomes
959 // non-entrant.
960
961 if (nm->is_not_entrant() && nm->can_not_entrant_be_converted()) {
962 nm->make_zombie();
963 } else {
964 nm->make_not_entrant();
965 }
966 }
967 }
968 }
969
970 void CodeCache::make_marked_nmethods_not_entrant() {
971 assert_locked_or_safepoint(CodeCache_lock);
972 NMethodIterator iter;
973 while(iter.next_alive()) {
974 nmethod* nm = iter.method();
975 if (nm->is_marked_for_deoptimization()) {
976 nm->make_not_entrant();
977 }
978 }
979 }
980
981 void CodeCache::verify() {
982 assert_locked_or_safepoint(CodeCache_lock);
983 FOR_ALL_HEAPS(heap) {
984 (*heap)->verify();
985 FOR_ALL_BLOBS(cb, *heap) {
986 if (cb->is_alive()) {
987 cb->verify();
988 }
989 }
990 }
991 }
992
993 // A CodeHeap is full. Print out warning and report event.
994 void CodeCache::report_codemem_full(int code_blob_type, bool print) {
995 // Get nmethod heap for the given CodeBlobType and build CodeCacheFull event
996 CodeHeap* heap = get_code_heap(SegmentedCodeCache ? code_blob_type : CodeBlobType::All);
997
998 if (!heap->was_full() || print) {
999 // Not yet reported for this heap, report
1000 heap->report_full();
1001 if (SegmentedCodeCache) {
1002 warning("CodeHeap for %s is full. Compiler has been disabled.", CodeCache::get_code_heap_name(code_blob_type));
1003 warning("Try increasing the code heap size using -XX:%s=",
1004 (code_blob_type == CodeBlobType::MethodNonProfiled) ? "NonProfiledCodeHeapSize" : "ProfiledCodeHeapSize");
1005 } else {
1006 warning("CodeCache is full. Compiler has been disabled.");
1007 warning("Try increasing the code cache size using -XX:ReservedCodeCacheSize=");
1008 }
1009 ResourceMark rm;
1010 stringStream s;
1011 // Dump code cache into a buffer before locking the tty,
1012 {
1013 MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
1014 print_summary(&s);
1015 }
1016 ttyLocker ttyl;
1017 tty->print("%s", s.as_string());
1018 }
1019
1020 _codemem_full_count++;
1021 EventCodeCacheFull event;
1022 if (event.should_commit()) {
1023 event.set_codeBlobType((u1)code_blob_type);
1024 event.set_startAddress((u8)heap->low_boundary());
1025 event.set_commitedTopAddress((u8)heap->high());
1026 event.set_reservedTopAddress((u8)heap->high_boundary());
1027 event.set_entryCount(nof_blobs());
1028 event.set_methodCount(nof_nmethods());
1029 event.set_adaptorCount(nof_adapters());
1030 event.set_unallocatedCapacity(heap->unallocated_capacity()/K);
1031 event.set_fullCount(_codemem_full_count);
1032 event.commit();
1033 }
1034 }
1035
1036 void CodeCache::print_memory_overhead() {
1037 size_t wasted_bytes = 0;
1038 FOR_ALL_HEAPS(heap) {
1039 CodeHeap* curr_heap = *heap;
1040 for (CodeBlob* cb = (CodeBlob*)curr_heap->first(); cb != NULL; cb = (CodeBlob*)curr_heap->next(cb)) {
1041 HeapBlock* heap_block = ((HeapBlock*)cb) - 1;
1042 wasted_bytes += heap_block->length() * CodeCacheSegmentSize - cb->size();
1043 }
1044 }
1045 // Print bytes that are allocated in the freelist
1046 ttyLocker ttl;
1047 tty->print_cr("Number of elements in freelist: " SSIZE_FORMAT, freelists_length());
1048 tty->print_cr("Allocated in freelist: " SSIZE_FORMAT "kB", bytes_allocated_in_freelists()/K);
1049 tty->print_cr("Unused bytes in CodeBlobs: " SSIZE_FORMAT "kB", (wasted_bytes/K));
1050 tty->print_cr("Segment map size: " SSIZE_FORMAT "kB", allocated_segments()/K); // 1 byte per segment
1051 }
1052
1053 //------------------------------------------------------------------------------------------------
1054 // Non-product version
1055
1056 #ifndef PRODUCT
1057
1058 void CodeCache::print_trace(const char* event, CodeBlob* cb, int size) {
1059 if (PrintCodeCache2) { // Need to add a new flag
1060 ResourceMark rm;
1061 if (size == 0) size = cb->size();
1062 tty->print_cr("CodeCache %s: addr: " INTPTR_FORMAT ", size: 0x%x", event, p2i(cb), size);
1063 }
1064 }
1065
1066 void CodeCache::print_internals() {
1067 int nmethodCount = 0;
1068 int runtimeStubCount = 0;
1069 int adapterCount = 0;
1070 int deoptimizationStubCount = 0;
1071 int uncommonTrapStubCount = 0;
1072 int bufferBlobCount = 0;
1073 int total = 0;
1074 int nmethodAlive = 0;
1075 int nmethodNotEntrant = 0;
1076 int nmethodZombie = 0;
1077 int nmethodUnloaded = 0;
1078 int nmethodJava = 0;
1079 int nmethodNative = 0;
1080 int max_nm_size = 0;
1081 ResourceMark rm;
1082
1083 int i = 0;
1084 FOR_ALL_HEAPS(heap) {
1085 if (SegmentedCodeCache && Verbose) {
1086 tty->print_cr("-- Code heap '%s' --", (*heap)->name());
1087 }
1088 FOR_ALL_BLOBS(cb, *heap) {
1089 total++;
1090 if (cb->is_nmethod()) {
1091 nmethod* nm = (nmethod*)cb;
1092
1093 if (Verbose && nm->method() != NULL) {
1094 ResourceMark rm;
1095 char *method_name = nm->method()->name_and_sig_as_C_string();
1096 tty->print("%s", method_name);
1097 if(nm->is_alive()) { tty->print_cr(" alive"); }
1098 if(nm->is_not_entrant()) { tty->print_cr(" not-entrant"); }
1099 if(nm->is_zombie()) { tty->print_cr(" zombie"); }
1100 }
1101
1102 nmethodCount++;
1103
1104 if(nm->is_alive()) { nmethodAlive++; }
1105 if(nm->is_not_entrant()) { nmethodNotEntrant++; }
1106 if(nm->is_zombie()) { nmethodZombie++; }
1107 if(nm->is_unloaded()) { nmethodUnloaded++; }
1108 if(nm->method() != NULL && nm->is_native_method()) { nmethodNative++; }
1109
1110 if(nm->method() != NULL && nm->is_java_method()) {
1111 nmethodJava++;
1112 max_nm_size = MAX2(max_nm_size, nm->size());
1113 }
1114 } else if (cb->is_runtime_stub()) {
1115 runtimeStubCount++;
1116 } else if (cb->is_deoptimization_stub()) {
1117 deoptimizationStubCount++;
1118 } else if (cb->is_uncommon_trap_stub()) {
1119 uncommonTrapStubCount++;
1120 } else if (cb->is_adapter_blob()) {
1121 adapterCount++;
1122 } else if (cb->is_buffer_blob()) {
1123 bufferBlobCount++;
1124 }
1125 }
1126 }
1127
1128 int bucketSize = 512;
1129 int bucketLimit = max_nm_size / bucketSize + 1;
1130 int *buckets = NEW_C_HEAP_ARRAY(int, bucketLimit, mtCode);
1131 memset(buckets, 0, sizeof(int) * bucketLimit);
1132
1133 NMethodIterator iter;
1134 while(iter.next()) {
1135 nmethod* nm = iter.method();
1136 if(nm->method() != NULL && nm->is_java_method()) {
1137 buckets[nm->size() / bucketSize]++;
1138 }
1139 }
1140
1141 tty->print_cr("Code Cache Entries (total of %d)",total);
1142 tty->print_cr("-------------------------------------------------");
1143 tty->print_cr("nmethods: %d",nmethodCount);
1144 tty->print_cr("\talive: %d",nmethodAlive);
1145 tty->print_cr("\tnot_entrant: %d",nmethodNotEntrant);
1146 tty->print_cr("\tzombie: %d",nmethodZombie);
1147 tty->print_cr("\tunloaded: %d",nmethodUnloaded);
1148 tty->print_cr("\tjava: %d",nmethodJava);
1149 tty->print_cr("\tnative: %d",nmethodNative);
1150 tty->print_cr("runtime_stubs: %d",runtimeStubCount);
1151 tty->print_cr("adapters: %d",adapterCount);
1152 tty->print_cr("buffer blobs: %d",bufferBlobCount);
1153 tty->print_cr("deoptimization_stubs: %d",deoptimizationStubCount);
1154 tty->print_cr("uncommon_traps: %d",uncommonTrapStubCount);
1155 tty->print_cr("\nnmethod size distribution (non-zombie java)");
1156 tty->print_cr("-------------------------------------------------");
1157
1158 for(int i=0; i<bucketLimit; i++) {
1159 if(buckets[i] != 0) {
1161 tty->fill_to(40);
1162 tty->print_cr("%d",buckets[i]);
1163 }
1164 }
1165
1166 FREE_C_HEAP_ARRAY(int, buckets, mtCode);
1167 print_memory_overhead();
1168 }
1169
1170 #endif // !PRODUCT
1171
1172 void CodeCache::print() {
1173 print_summary(tty);
1174
1175 #ifndef PRODUCT
1176 if (!Verbose) return;
1177
1178 CodeBlob_sizes live;
1179 CodeBlob_sizes dead;
1180
1181 FOR_ALL_HEAPS(heap) {
1182 FOR_ALL_BLOBS(cb, *heap) {
1183 if (!cb->is_alive()) {
1184 dead.add(cb);
1185 } else {
1186 live.add(cb);
1187 }
1188 }
1189 }
1190
1191 tty->print_cr("CodeCache:");
1192 tty->print_cr("nmethod dependency checking time %fs", dependentCheckTime.seconds());
1193
1194 if (!live.is_empty()) {
1195 live.print("live");
1196 }
1197 if (!dead.is_empty()) {
1198 dead.print("dead");
1199 }
1200
1201 if (WizardMode) {
1202 // print the oop_map usage
1203 int code_size = 0;
1204 int number_of_blobs = 0;
1205 int number_of_oop_maps = 0;
1206 int map_size = 0;
1207 FOR_ALL_HEAPS(heap) {
1208 FOR_ALL_BLOBS(cb, *heap) {
1209 if (cb->is_alive()) {
1210 number_of_blobs++;
1211 code_size += cb->code_size();
1212 OopMapSet* set = cb->oop_maps();
1213 if (set != NULL) {
1214 number_of_oop_maps += set->size();
1215 map_size += set->heap_size();
1216 }
1217 }
1218 }
1219 }
1220 tty->print_cr("OopMaps");
1221 tty->print_cr(" #blobs = %d", number_of_blobs);
1222 tty->print_cr(" code size = %d", code_size);
1223 tty->print_cr(" #oop_maps = %d", number_of_oop_maps);
1224 tty->print_cr(" map size = %d", map_size);
1225 }
1226
1227 #endif // !PRODUCT
1228 }
1229
1230 void CodeCache::print_summary(outputStream* st, bool detailed) {
1231 FOR_ALL_HEAPS(heap_iterator) {
1232 CodeHeap* heap = (*heap_iterator);
1233 size_t total = (heap->high_boundary() - heap->low_boundary());
1234 if (SegmentedCodeCache) {
1235 st->print("CodeHeap '%s':", heap->name());
1236 } else {
1237 st->print("CodeCache:");
1238 }
1239 st->print_cr(" size=" SIZE_FORMAT "Kb used=" SIZE_FORMAT
1240 "Kb max_used=" SIZE_FORMAT "Kb free=" SIZE_FORMAT "Kb",
1241 total/K, (total - heap->unallocated_capacity())/K,
1242 heap->max_allocated_capacity()/K, heap->unallocated_capacity()/K);
1243
1244 if (detailed) {
1245 st->print_cr(" bounds [" INTPTR_FORMAT ", " INTPTR_FORMAT ", " INTPTR_FORMAT "]",
1246 p2i(heap->low_boundary()),
1247 p2i(heap->high()),
1248 p2i(heap->high_boundary()));
1249 }
1250 }
1251
1252 if (detailed) {
1253 st->print_cr(" total_blobs=" UINT32_FORMAT " nmethods=" UINT32_FORMAT
1254 " adapters=" UINT32_FORMAT,
1255 nof_blobs(), nof_nmethods(), nof_adapters());
1256 st->print_cr(" compilation: %s", CompileBroker::should_compile_new_jobs() ?
1257 "enabled" : Arguments::mode() == Arguments::_int ?
1258 "disabled (interpreter mode)" :
1259 "disabled (not enough contiguous free space left)");
1260 }
1261 }
1262
1263 void CodeCache::log_state(outputStream* st) {
1264 st->print(" total_blobs='" UINT32_FORMAT "' nmethods='" UINT32_FORMAT "'"
1265 " adapters='" UINT32_FORMAT "' free_code_cache='" SIZE_FORMAT "'",
1266 nof_blobs(), nof_nmethods(), nof_adapters(),
1267 unallocated_capacity());
1268 }
|