27 #include "code/codeCache.hpp"
28 #include "code/compiledIC.hpp"
29 #include "code/dependencies.hpp"
30 #include "code/icBuffer.hpp"
31 #include "code/nmethod.hpp"
32 #include "code/pcDesc.hpp"
33 #include "compiler/compileBroker.hpp"
34 #include "gc_implementation/shared/markSweep.hpp"
35 #include "memory/allocation.inline.hpp"
36 #include "memory/gcLocker.hpp"
37 #include "memory/iterator.hpp"
38 #include "memory/resourceArea.hpp"
39 #include "oops/method.hpp"
40 #include "oops/objArrayOop.hpp"
41 #include "oops/oop.inline.hpp"
42 #include "runtime/handles.inline.hpp"
43 #include "runtime/arguments.hpp"
44 #include "runtime/icache.hpp"
45 #include "runtime/java.hpp"
46 #include "runtime/mutexLocker.hpp"
47 #include "services/memoryService.hpp"
48 #include "trace/tracing.hpp"
49 #include "utilities/xmlstream.hpp"
50
51 // Helper class for printing in CodeCache
52
53 class CodeBlob_sizes {
54 private:
55 int count;
56 int total_size;
57 int header_size;
58 int code_size;
59 int stub_size;
60 int relocation_size;
61 int scopes_oop_size;
62 int scopes_metadata_size;
63 int scopes_data_size;
64 int scopes_pcs_size;
65
66 public:
67 CodeBlob_sizes() {
68 count = 0;
69 total_size = 0;
70 header_size = 0;
71 code_size = 0;
72 stub_size = 0;
98 void add(CodeBlob* cb) {
99 count++;
100 total_size += cb->size();
101 header_size += cb->header_size();
102 relocation_size += cb->relocation_size();
103 if (cb->is_nmethod()) {
104 nmethod* nm = cb->as_nmethod_or_null();
105 code_size += nm->insts_size();
106 stub_size += nm->stub_size();
107
108 scopes_oop_size += nm->oops_size();
109 scopes_metadata_size += nm->metadata_size();
110 scopes_data_size += nm->scopes_data_size();
111 scopes_pcs_size += nm->scopes_pcs_size();
112 } else {
113 code_size += cb->code_size();
114 }
115 }
116 };
117
118 // CodeCache implementation
119
120 CodeHeap * CodeCache::_heap = new CodeHeap();
121 int CodeCache::_number_of_blobs = 0;
122 int CodeCache::_number_of_adapters = 0;
123 int CodeCache::_number_of_nmethods = 0;
124 int CodeCache::_number_of_nmethods_with_dependencies = 0;
125 bool CodeCache::_needs_cache_clean = false;
126 nmethod* CodeCache::_scavenge_root_nmethods = NULL;
127
128 int CodeCache::_codemem_full_count = 0;
129
130 CodeBlob* CodeCache::first() {
131 assert_locked_or_safepoint(CodeCache_lock);
132 return (CodeBlob*)_heap->first();
133 }
134
135
136 CodeBlob* CodeCache::next(CodeBlob* cb) {
137 assert_locked_or_safepoint(CodeCache_lock);
138 return (CodeBlob*)_heap->next(cb);
139 }
140
141
142 CodeBlob* CodeCache::alive(CodeBlob *cb) {
143 assert_locked_or_safepoint(CodeCache_lock);
144 while (cb != NULL && !cb->is_alive()) cb = next(cb);
145 return cb;
146 }
147
148
149 nmethod* CodeCache::alive_nmethod(CodeBlob* cb) {
150 assert_locked_or_safepoint(CodeCache_lock);
151 while (cb != NULL && (!cb->is_alive() || !cb->is_nmethod())) cb = next(cb);
152 return (nmethod*)cb;
153 }
154
155 nmethod* CodeCache::first_nmethod() {
156 assert_locked_or_safepoint(CodeCache_lock);
157 CodeBlob* cb = first();
158 while (cb != NULL && !cb->is_nmethod()) {
159 cb = next(cb);
160 }
161 return (nmethod*)cb;
162 }
163
164 nmethod* CodeCache::next_nmethod (CodeBlob* cb) {
165 assert_locked_or_safepoint(CodeCache_lock);
166 cb = next(cb);
167 while (cb != NULL && !cb->is_nmethod()) {
168 cb = next(cb);
169 }
170 return (nmethod*)cb;
171 }
172
173 static size_t maxCodeCacheUsed = 0;
174
175 CodeBlob* CodeCache::allocate(int size, bool is_critical) {
176 // Do not seize the CodeCache lock here--if the caller has not
177 // already done so, we are going to lose bigtime, since the code
178 // cache will contain a garbage CodeBlob until the caller can
179 // run the constructor for the CodeBlob subclass he is busy
180 // instantiating.
181 assert_locked_or_safepoint(CodeCache_lock);
182 assert(size > 0, "allocation request must be reasonable");
183 if (size <= 0) {
184 return NULL;
185 }
186 CodeBlob* cb = NULL;
187 while (true) {
188 cb = (CodeBlob*)_heap->allocate(size, is_critical);
189 if (cb != NULL) break;
190 if (!_heap->expand_by(CodeCacheExpansionSize)) {
191 // Expansion failed
192 return NULL;
193 }
194 if (PrintCodeCacheExtension) {
195 ResourceMark rm;
196 tty->print_cr("code cache extended to [" INTPTR_FORMAT ", " INTPTR_FORMAT "] (" SSIZE_FORMAT " bytes)",
197 (intptr_t)_heap->low_boundary(), (intptr_t)_heap->high(),
198 (address)_heap->high() - (address)_heap->low_boundary());
199 }
200 }
201 maxCodeCacheUsed = MAX2(maxCodeCacheUsed, ((address)_heap->high_boundary() -
202 (address)_heap->low_boundary()) - unallocated_capacity());
203 print_trace("allocation", cb, size);
204 _number_of_blobs++;
205 return cb;
206 }
207
208 void CodeCache::free(CodeBlob* cb) {
209 assert_locked_or_safepoint(CodeCache_lock);
210
211 print_trace("free", cb);
212 if (cb->is_nmethod()) {
213 _number_of_nmethods--;
214 if (((nmethod *)cb)->has_dependencies()) {
215 _number_of_nmethods_with_dependencies--;
216 }
217 }
218 if (cb->is_adapter_blob()) {
219 _number_of_adapters--;
220 }
221 _number_of_blobs--;
222
223 _heap->deallocate(cb);
224
225 assert(_number_of_blobs >= 0, "sanity check");
226 }
227
228
229 void CodeCache::commit(CodeBlob* cb) {
230 // this is called by nmethod::nmethod, which must already own CodeCache_lock
231 assert_locked_or_safepoint(CodeCache_lock);
232 if (cb->is_nmethod()) {
233 _number_of_nmethods++;
234 if (((nmethod *)cb)->has_dependencies()) {
235 _number_of_nmethods_with_dependencies++;
236 }
237 }
238 if (cb->is_adapter_blob()) {
239 _number_of_adapters++;
240 }
241
242 // flush the hardware I-cache
243 ICache::invalidate_range(cb->content_begin(), cb->content_size());
244 }
245
246
247 // Iteration over CodeBlobs
248
249 #define FOR_ALL_BLOBS(var) for (CodeBlob *var = first() ; var != NULL; var = next(var) )
250 #define FOR_ALL_ALIVE_BLOBS(var) for (CodeBlob *var = alive(first()); var != NULL; var = alive(next(var)))
251 #define FOR_ALL_ALIVE_NMETHODS(var) for (nmethod *var = alive_nmethod(first()); var != NULL; var = alive_nmethod(next(var)))
252
253
254 bool CodeCache::contains(void *p) {
255 // It should be ok to call contains without holding a lock
256 return _heap->contains(p);
257 }
258
259
260 // This method is safe to call without holding the CodeCache_lock, as long as a dead codeblob is not
261 // looked up (i.e., one that has been marked for deletion). It only dependes on the _segmap to contain
262 // valid indices, which it will always do, as long as the CodeBlob is not in the process of being recycled.
263 CodeBlob* CodeCache::find_blob(void* start) {
264 CodeBlob* result = find_blob_unsafe(start);
265 if (result == NULL) return NULL;
266 // We could potentially look up non_entrant methods
267 guarantee(!result->is_zombie() || result->is_locked_by_vm() || is_error_reported(), "unsafe access to zombie method");
268 return result;
269 }
270
271 nmethod* CodeCache::find_nmethod(void* start) {
272 CodeBlob *cb = find_blob(start);
273 assert(cb == NULL || cb->is_nmethod(), "did not find an nmethod");
274 return (nmethod*)cb;
275 }
276
277
278 void CodeCache::blobs_do(void f(CodeBlob* nm)) {
279 assert_locked_or_safepoint(CodeCache_lock);
280 FOR_ALL_BLOBS(p) {
281 f(p);
282 }
283 }
284
285
286 void CodeCache::nmethods_do(void f(nmethod* nm)) {
287 assert_locked_or_safepoint(CodeCache_lock);
288 FOR_ALL_BLOBS(nm) {
289 if (nm->is_nmethod()) f((nmethod*)nm);
290 }
291 }
292
293 void CodeCache::alive_nmethods_do(void f(nmethod* nm)) {
294 assert_locked_or_safepoint(CodeCache_lock);
295 FOR_ALL_ALIVE_NMETHODS(nm) {
296 f(nm);
297 }
298 }
299
300 int CodeCache::alignment_unit() {
301 return (int)_heap->alignment_unit();
302 }
303
304
305 int CodeCache::alignment_offset() {
306 return (int)_heap->alignment_offset();
307 }
308
309
310 // Mark nmethods for unloading if they contain otherwise unreachable
311 // oops.
312 void CodeCache::do_unloading(BoolObjectClosure* is_alive, bool unloading_occurred) {
313 assert_locked_or_safepoint(CodeCache_lock);
314 FOR_ALL_ALIVE_NMETHODS(nm) {
315 nm->do_unloading(is_alive, unloading_occurred);
316 }
317 }
318
319 void CodeCache::blobs_do(CodeBlobClosure* f) {
320 assert_locked_or_safepoint(CodeCache_lock);
321 FOR_ALL_ALIVE_BLOBS(cb) {
322 f->do_code_blob(cb);
323
324 #ifdef ASSERT
325 if (cb->is_nmethod())
326 ((nmethod*)cb)->verify_scavenge_root_oops();
327 #endif //ASSERT
328 }
329 }
330
331 // Walk the list of methods which might contain non-perm oops.
332 void CodeCache::scavenge_root_nmethods_do(CodeBlobClosure* f) {
333 assert_locked_or_safepoint(CodeCache_lock);
334 debug_only(mark_scavenge_root_nmethods());
335
336 for (nmethod* cur = scavenge_root_nmethods(); cur != NULL; cur = cur->scavenge_root_link()) {
337 debug_only(cur->clear_scavenge_root_marked());
338 assert(cur->scavenge_root_not_marked(), "");
339 assert(cur->on_scavenge_root_list(), "else shouldn't be on this list");
340
341 bool is_live = (!cur->is_zombie() && !cur->is_unloaded());
342 #ifndef PRODUCT
343 if (TraceScavenge) {
344 cur->print_on(tty, is_live ? "scavenge root" : "dead scavenge root"); tty->cr();
345 }
346 #endif //PRODUCT
347 if (is_live) {
348 // Perform cur->oops_do(f), maybe just once per nmethod.
411 cur = next;
412 }
413
414 // Check for stray marks.
415 debug_only(verify_perm_nmethods(NULL));
416 }
417
418 #ifndef PRODUCT
419 void CodeCache::asserted_non_scavengable_nmethods_do(CodeBlobClosure* f) {
420 // While we are here, verify the integrity of the list.
421 mark_scavenge_root_nmethods();
422 for (nmethod* cur = scavenge_root_nmethods(); cur != NULL; cur = cur->scavenge_root_link()) {
423 assert(cur->on_scavenge_root_list(), "else shouldn't be on this list");
424 cur->clear_scavenge_root_marked();
425 }
426 verify_perm_nmethods(f);
427 }
428
429 // Temporarily mark nmethods that are claimed to be on the non-perm list.
430 void CodeCache::mark_scavenge_root_nmethods() {
431 FOR_ALL_ALIVE_BLOBS(cb) {
432 if (cb->is_nmethod()) {
433 nmethod *nm = (nmethod*)cb;
434 assert(nm->scavenge_root_not_marked(), "clean state");
435 if (nm->on_scavenge_root_list())
436 nm->set_scavenge_root_marked();
437 }
438 }
439 }
440
441 // If the closure is given, run it on the unlisted nmethods.
442 // Also make sure that the effects of mark_scavenge_root_nmethods is gone.
443 void CodeCache::verify_perm_nmethods(CodeBlobClosure* f_or_null) {
444 FOR_ALL_ALIVE_BLOBS(cb) {
445 bool call_f = (f_or_null != NULL);
446 if (cb->is_nmethod()) {
447 nmethod *nm = (nmethod*)cb;
448 assert(nm->scavenge_root_not_marked(), "must be already processed");
449 if (nm->on_scavenge_root_list())
450 call_f = false; // don't show this one to the client
451 nm->verify_scavenge_root_oops();
452 } else {
453 call_f = false; // not an nmethod
454 }
455 if (call_f) f_or_null->do_code_blob(cb);
456 }
457 }
458 #endif //PRODUCT
459
460
461 void CodeCache::gc_prologue() {
462 assert(!nmethod::oops_do_marking_is_active(), "oops_do_marking_epilogue must be called");
463 }
464
465 void CodeCache::gc_epilogue() {
466 assert_locked_or_safepoint(CodeCache_lock);
467 FOR_ALL_ALIVE_BLOBS(cb) {
468 if (cb->is_nmethod()) {
469 nmethod *nm = (nmethod*)cb;
470 assert(!nm->is_unloaded(), "Tautology");
471 if (needs_cache_clean()) {
472 nm->cleanup_inline_caches();
473 }
474 DEBUG_ONLY(nm->verify());
475 nm->fix_oop_relocations();
476 }
477 }
478 set_needs_cache_clean(false);
479 prune_scavenge_root_nmethods();
480 assert(!nmethod::oops_do_marking_is_active(), "oops_do_marking_prologue must be called");
481
482 #ifdef ASSERT
483 // make sure that we aren't leaking icholders
484 int count = 0;
485 FOR_ALL_BLOBS(cb) {
486 if (cb->is_nmethod()) {
487 RelocIterator iter((nmethod*)cb);
488 while(iter.next()) {
489 if (iter.type() == relocInfo::virtual_call_type) {
490 if (CompiledIC::is_icholder_call_site(iter.virtual_call_reloc())) {
491 CompiledIC *ic = CompiledIC_at(iter.reloc());
492 if (TraceCompiledIC) {
493 tty->print("noticed icholder " INTPTR_FORMAT " ", p2i(ic->cached_icholder()));
494 ic->print();
495 }
496 assert(ic->cached_icholder() != NULL, "must be non-NULL");
497 count++;
498 }
499 }
500 }
501 }
502 }
503
504 assert(count + InlineCacheBuffer::pending_icholder_count() + CompiledICHolder::live_not_claimed_count() ==
505 CompiledICHolder::live_count(), "must agree");
506 #endif
507 }
508
509
510 void CodeCache::verify_oops() {
511 MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
512 VerifyOopClosure voc;
513 FOR_ALL_ALIVE_BLOBS(cb) {
514 if (cb->is_nmethod()) {
515 nmethod *nm = (nmethod*)cb;
516 nm->oops_do(&voc);
517 nm->verify_oop_relocations();
518 }
519 }
520 }
521
522
523 address CodeCache::first_address() {
524 assert_locked_or_safepoint(CodeCache_lock);
525 return (address)_heap->low_boundary();
526 }
527
528
529 address CodeCache::last_address() {
530 assert_locked_or_safepoint(CodeCache_lock);
531 return (address)_heap->high();
532 }
533
534 /**
535 * Returns the reverse free ratio. E.g., if 25% (1/4) of the code cache
536 * is free, reverse_free_ratio() returns 4.
537 */
538 double CodeCache::reverse_free_ratio() {
539 double unallocated_capacity = (double)(CodeCache::unallocated_capacity() - CodeCacheMinimumFreeSpace);
540 double max_capacity = (double)CodeCache::max_capacity();
541 return max_capacity / unallocated_capacity;
542 }
543
544 void icache_init();
545
546 void CodeCache::initialize() {
547 assert(CodeCacheSegmentSize >= (uintx)CodeEntryAlignment, "CodeCacheSegmentSize must be large enough to align entry points");
548 #ifdef COMPILER2
549 assert(CodeCacheSegmentSize >= (uintx)OptoLoopAlignment, "CodeCacheSegmentSize must be large enough to align inner loops");
550 #endif
551 assert(CodeCacheSegmentSize >= sizeof(jdouble), "CodeCacheSegmentSize must be large enough to align constants");
552 // This was originally just a check of the alignment, causing failure, instead, round
553 // the code cache to the page size. In particular, Solaris is moving to a larger
554 // default page size.
555 CodeCacheExpansionSize = round_to(CodeCacheExpansionSize, os::vm_page_size());
556 InitialCodeCacheSize = round_to(InitialCodeCacheSize, os::vm_page_size());
557 ReservedCodeCacheSize = round_to(ReservedCodeCacheSize, os::vm_page_size());
558 if (!_heap->reserve(ReservedCodeCacheSize, InitialCodeCacheSize, CodeCacheSegmentSize)) {
559 vm_exit_during_initialization("Could not reserve enough space for code cache");
560 }
561
562 MemoryService::add_code_heap_memory_pool(_heap);
563
564 // Initialize ICache flush mechanism
565 // This service is needed for os::register_code_area
566 icache_init();
567
568 // Give OS a chance to register generated code area.
569 // This is used on Windows 64 bit platforms to register
570 // Structured Exception Handlers for our generated code.
571 os::register_code_area(_heap->low_boundary(), _heap->high_boundary());
572 }
573
574
575 void codeCache_init() {
576 CodeCache::initialize();
577 }
578
579 //------------------------------------------------------------------------------------------------
580
581 int CodeCache::number_of_nmethods_with_dependencies() {
582 return _number_of_nmethods_with_dependencies;
583 }
584
585 void CodeCache::clear_inline_caches() {
586 assert_locked_or_safepoint(CodeCache_lock);
587 FOR_ALL_ALIVE_NMETHODS(nm) {
588 nm->clear_inline_caches();
589 }
590 }
591
592 // Keeps track of time spent for checking dependencies
593 NOT_PRODUCT(static elapsedTimer dependentCheckTime;)
594
595 int CodeCache::mark_for_deoptimization(DepChange& changes) {
596 MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
597 int number_of_marked_CodeBlobs = 0;
598
599 // search the hierarchy looking for nmethods which are affected by the loading of this class
600
601 // then search the interfaces this class implements looking for nmethods
602 // which might be dependent of the fact that an interface only had one
603 // implementor.
604 // nmethod::check_all_dependencies works only correctly, if no safepoint
605 // can happen
606 No_Safepoint_Verifier nsv;
607 for (DepChange::ContextStream str(changes, nsv); str.next(); ) {
608 Klass* d = str.klass();
609 number_of_marked_CodeBlobs += InstanceKlass::cast(d)->mark_dependent_nmethods(changes);
623 }
624
625
626 #ifdef HOTSWAP
627 int CodeCache::mark_for_evol_deoptimization(instanceKlassHandle dependee) {
628 MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
629 int number_of_marked_CodeBlobs = 0;
630
631 // Deoptimize all methods of the evolving class itself
632 Array<Method*>* old_methods = dependee->methods();
633 for (int i = 0; i < old_methods->length(); i++) {
634 ResourceMark rm;
635 Method* old_method = old_methods->at(i);
636 nmethod *nm = old_method->code();
637 if (nm != NULL) {
638 nm->mark_for_deoptimization();
639 number_of_marked_CodeBlobs++;
640 }
641 }
642
643 FOR_ALL_ALIVE_NMETHODS(nm) {
644 if (nm->is_marked_for_deoptimization()) {
645 // ...Already marked in the previous pass; don't count it again.
646 } else if (nm->is_evol_dependent_on(dependee())) {
647 ResourceMark rm;
648 nm->mark_for_deoptimization();
649 number_of_marked_CodeBlobs++;
650 } else {
651 // flush caches in case they refer to a redefined Method*
652 nm->clear_inline_caches();
653 }
654 }
655
656 return number_of_marked_CodeBlobs;
657 }
658 #endif // HOTSWAP
659
660
661 // Deoptimize all methods
662 void CodeCache::mark_all_nmethods_for_deoptimization() {
663 MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
664 FOR_ALL_ALIVE_NMETHODS(nm) {
665 nm->mark_for_deoptimization();
666 }
667 }
668
669
670 int CodeCache::mark_for_deoptimization(Method* dependee) {
671 MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
672 int number_of_marked_CodeBlobs = 0;
673
674 FOR_ALL_ALIVE_NMETHODS(nm) {
675 if (nm->is_dependent_on_method(dependee)) {
676 ResourceMark rm;
677 nm->mark_for_deoptimization();
678 number_of_marked_CodeBlobs++;
679 }
680 }
681
682 return number_of_marked_CodeBlobs;
683 }
684
685 void CodeCache::make_marked_nmethods_zombies() {
686 assert(SafepointSynchronize::is_at_safepoint(), "must be at a safepoint");
687 FOR_ALL_ALIVE_NMETHODS(nm) {
688 if (nm->is_marked_for_deoptimization()) {
689
690 // If the nmethod has already been made non-entrant and it can be converted
691 // then zombie it now. Otherwise make it non-entrant and it will eventually
692 // be zombied when it is no longer seen on the stack. Note that the nmethod
693 // might be "entrant" and not on the stack and so could be zombied immediately
694 // but we can't tell because we don't track it on stack until it becomes
695 // non-entrant.
696
697 if (nm->is_not_entrant() && nm->can_not_entrant_be_converted()) {
698 nm->make_zombie();
699 } else {
700 nm->make_not_entrant();
701 }
702 }
703 }
704 }
705
706 void CodeCache::make_marked_nmethods_not_entrant() {
707 assert_locked_or_safepoint(CodeCache_lock);
708 FOR_ALL_ALIVE_NMETHODS(nm) {
709 if (nm->is_marked_for_deoptimization()) {
710 nm->make_not_entrant();
711 }
712 }
713 }
714
715 void CodeCache::verify() {
716 _heap->verify();
717 FOR_ALL_ALIVE_BLOBS(p) {
718 p->verify();
719 }
720 }
721
722 void CodeCache::report_codemem_full() {
723 _codemem_full_count++;
724 EventCodeCacheFull event;
725 if (event.should_commit()) {
726 event.set_startAddress((u8)low_bound());
727 event.set_commitedTopAddress((u8)high());
728 event.set_reservedTopAddress((u8)high_bound());
729 event.set_entryCount(nof_blobs());
730 event.set_methodCount(nof_nmethods());
731 event.set_adaptorCount(nof_adapters());
732 event.set_unallocatedCapacity(unallocated_capacity()/K);
733 event.set_fullCount(_codemem_full_count);
734 event.commit();
735 }
736 }
737
738 void CodeCache::print_memory_overhead() {
739 size_t wasted_bytes = 0;
740 CodeBlob *cb;
741 for (cb = first(); cb != NULL; cb = next(cb)) {
742 HeapBlock* heap_block = ((HeapBlock*)cb) - 1;
743 wasted_bytes += heap_block->length() * CodeCacheSegmentSize - cb->size();
744 }
745 // Print bytes that are allocated in the freelist
746 ttyLocker ttl;
747 tty->print_cr("Number of elements in freelist: " SSIZE_FORMAT, freelist_length());
748 tty->print_cr("Allocated in freelist: " SSIZE_FORMAT "kB", bytes_allocated_in_freelist()/K);
749 tty->print_cr("Unused bytes in CodeBlobs: " SSIZE_FORMAT "kB", (wasted_bytes/K));
750 tty->print_cr("Segment map size: " SSIZE_FORMAT "kB", allocated_segments()/K); // 1 byte per segment
751 }
752
753 //------------------------------------------------------------------------------------------------
754 // Non-product version
755
756 #ifndef PRODUCT
757
758 void CodeCache::print_trace(const char* event, CodeBlob* cb, int size) {
759 if (PrintCodeCache2) { // Need to add a new flag
760 ResourceMark rm;
761 if (size == 0) size = cb->size();
762 tty->print_cr("CodeCache %s: addr: " INTPTR_FORMAT ", size: 0x%x", event, p2i(cb), size);
763 }
764 }
765
766 void CodeCache::print_internals() {
767 int nmethodCount = 0;
768 int runtimeStubCount = 0;
769 int adapterCount = 0;
770 int deoptimizationStubCount = 0;
771 int uncommonTrapStubCount = 0;
772 int bufferBlobCount = 0;
773 int total = 0;
774 int nmethodAlive = 0;
775 int nmethodNotEntrant = 0;
776 int nmethodZombie = 0;
777 int nmethodUnloaded = 0;
778 int nmethodJava = 0;
779 int nmethodNative = 0;
780 int max_nm_size = 0;
781 ResourceMark rm;
782
783 CodeBlob *cb;
784 for (cb = first(); cb != NULL; cb = next(cb)) {
785 total++;
786 if (cb->is_nmethod()) {
787 nmethod* nm = (nmethod*)cb;
788
789 if (Verbose && nm->method() != NULL) {
790 ResourceMark rm;
791 char *method_name = nm->method()->name_and_sig_as_C_string();
792 tty->print("%s", method_name);
793 if(nm->is_alive()) { tty->print_cr(" alive"); }
794 if(nm->is_not_entrant()) { tty->print_cr(" not-entrant"); }
795 if(nm->is_zombie()) { tty->print_cr(" zombie"); }
796 }
797
798 nmethodCount++;
799
800 if(nm->is_alive()) { nmethodAlive++; }
801 if(nm->is_not_entrant()) { nmethodNotEntrant++; }
802 if(nm->is_zombie()) { nmethodZombie++; }
803 if(nm->is_unloaded()) { nmethodUnloaded++; }
804 if(nm->method() != NULL && nm->is_native_method()) { nmethodNative++; }
805
806 if(nm->method() != NULL && nm->is_java_method()) {
807 nmethodJava++;
808 max_nm_size = MAX2(max_nm_size, nm->size());
809 }
810 } else if (cb->is_runtime_stub()) {
811 runtimeStubCount++;
812 } else if (cb->is_deoptimization_stub()) {
813 deoptimizationStubCount++;
814 } else if (cb->is_uncommon_trap_stub()) {
815 uncommonTrapStubCount++;
816 } else if (cb->is_adapter_blob()) {
817 adapterCount++;
818 } else if (cb->is_buffer_blob()) {
819 bufferBlobCount++;
820 }
821 }
822
823 int bucketSize = 512;
824 int bucketLimit = max_nm_size / bucketSize + 1;
825 int *buckets = NEW_C_HEAP_ARRAY(int, bucketLimit, mtCode);
826 memset(buckets, 0, sizeof(int) * bucketLimit);
827
828 for (cb = first(); cb != NULL; cb = next(cb)) {
829 if (cb->is_nmethod()) {
830 nmethod* nm = (nmethod*)cb;
831 if(nm->is_java_method()) {
832 buckets[nm->size() / bucketSize]++;
833 }
834 }
835 }
836
837 tty->print_cr("Code Cache Entries (total of %d)",total);
838 tty->print_cr("-------------------------------------------------");
839 tty->print_cr("nmethods: %d",nmethodCount);
840 tty->print_cr("\talive: %d",nmethodAlive);
841 tty->print_cr("\tnot_entrant: %d",nmethodNotEntrant);
842 tty->print_cr("\tzombie: %d",nmethodZombie);
843 tty->print_cr("\tunloaded: %d",nmethodUnloaded);
844 tty->print_cr("\tjava: %d",nmethodJava);
845 tty->print_cr("\tnative: %d",nmethodNative);
846 tty->print_cr("runtime_stubs: %d",runtimeStubCount);
847 tty->print_cr("adapters: %d",adapterCount);
848 tty->print_cr("buffer blobs: %d",bufferBlobCount);
849 tty->print_cr("deoptimization_stubs: %d",deoptimizationStubCount);
850 tty->print_cr("uncommon_traps: %d",uncommonTrapStubCount);
851 tty->print_cr("\nnmethod size distribution (non-zombie java)");
852 tty->print_cr("-------------------------------------------------");
853
854 for(int i=0; i<bucketLimit; i++) {
855 if(buckets[i] != 0) {
857 tty->fill_to(40);
858 tty->print_cr("%d",buckets[i]);
859 }
860 }
861
862 FREE_C_HEAP_ARRAY(int, buckets, mtCode);
863 print_memory_overhead();
864 }
865
866 #endif // !PRODUCT
867
868 void CodeCache::print() {
869 print_summary(tty);
870
871 #ifndef PRODUCT
872 if (!Verbose) return;
873
874 CodeBlob_sizes live;
875 CodeBlob_sizes dead;
876
877 FOR_ALL_BLOBS(p) {
878 if (!p->is_alive()) {
879 dead.add(p);
880 } else {
881 live.add(p);
882 }
883 }
884
885 tty->print_cr("CodeCache:");
886 tty->print_cr("nmethod dependency checking time %fs", dependentCheckTime.seconds());
887
888 if (!live.is_empty()) {
889 live.print("live");
890 }
891 if (!dead.is_empty()) {
892 dead.print("dead");
893 }
894
895
896 if (WizardMode) {
897 // print the oop_map usage
898 int code_size = 0;
899 int number_of_blobs = 0;
900 int number_of_oop_maps = 0;
901 int map_size = 0;
902 FOR_ALL_BLOBS(p) {
903 if (p->is_alive()) {
904 number_of_blobs++;
905 code_size += p->code_size();
906 OopMapSet* set = p->oop_maps();
907 if (set != NULL) {
908 number_of_oop_maps += set->size();
909 map_size += set->heap_size();
910 }
911 }
912 }
913 tty->print_cr("OopMaps");
914 tty->print_cr(" #blobs = %d", number_of_blobs);
915 tty->print_cr(" code size = %d", code_size);
916 tty->print_cr(" #oop_maps = %d", number_of_oop_maps);
917 tty->print_cr(" map size = %d", map_size);
918 }
919
920 #endif // !PRODUCT
921 }
922
923 void CodeCache::print_summary(outputStream* st, bool detailed) {
924 size_t total = (_heap->high_boundary() - _heap->low_boundary());
925 st->print_cr("CodeCache: size=" SIZE_FORMAT "Kb used=" SIZE_FORMAT
926 "Kb max_used=" SIZE_FORMAT "Kb free=" SIZE_FORMAT "Kb",
927 total/K, (total - unallocated_capacity())/K,
928 maxCodeCacheUsed/K, unallocated_capacity()/K);
929
930 if (detailed) {
931 st->print_cr(" bounds [" INTPTR_FORMAT ", " INTPTR_FORMAT ", " INTPTR_FORMAT "]",
932 p2i(_heap->low_boundary()),
933 p2i(_heap->high()),
934 p2i(_heap->high_boundary()));
935 st->print_cr(" total_blobs=" UINT32_FORMAT " nmethods=" UINT32_FORMAT
936 " adapters=" UINT32_FORMAT,
937 nof_blobs(), nof_nmethods(), nof_adapters());
938 st->print_cr(" compilation: %s", CompileBroker::should_compile_new_jobs() ?
939 "enabled" : Arguments::mode() == Arguments::_int ?
940 "disabled (interpreter mode)" :
941 "disabled (not enough contiguous free space left)");
942 }
943 }
944
945 void CodeCache::log_state(outputStream* st) {
946 st->print(" total_blobs='" UINT32_FORMAT "' nmethods='" UINT32_FORMAT "'"
947 " adapters='" UINT32_FORMAT "' free_code_cache='" SIZE_FORMAT "'",
948 nof_blobs(), nof_nmethods(), nof_adapters(),
949 unallocated_capacity());
950 }
951
|
27 #include "code/codeCache.hpp"
28 #include "code/compiledIC.hpp"
29 #include "code/dependencies.hpp"
30 #include "code/icBuffer.hpp"
31 #include "code/nmethod.hpp"
32 #include "code/pcDesc.hpp"
33 #include "compiler/compileBroker.hpp"
34 #include "gc_implementation/shared/markSweep.hpp"
35 #include "memory/allocation.inline.hpp"
36 #include "memory/gcLocker.hpp"
37 #include "memory/iterator.hpp"
38 #include "memory/resourceArea.hpp"
39 #include "oops/method.hpp"
40 #include "oops/objArrayOop.hpp"
41 #include "oops/oop.inline.hpp"
42 #include "runtime/handles.inline.hpp"
43 #include "runtime/arguments.hpp"
44 #include "runtime/icache.hpp"
45 #include "runtime/java.hpp"
46 #include "runtime/mutexLocker.hpp"
47 #include "runtime/compilationPolicy.hpp"
48 #include "services/memoryService.hpp"
49 #include "trace/tracing.hpp"
50 #include "utilities/xmlstream.hpp"
51 #ifdef COMPILER1
52 #include "c1/c1_Compilation.hpp"
53 #endif
54 #ifdef COMPILER2
55 #include "opto/compile.hpp"
56 #endif
57
58 // Helper class for printing in CodeCache
59 class CodeBlob_sizes {
60 private:
61 int count;
62 int total_size;
63 int header_size;
64 int code_size;
65 int stub_size;
66 int relocation_size;
67 int scopes_oop_size;
68 int scopes_metadata_size;
69 int scopes_data_size;
70 int scopes_pcs_size;
71
72 public:
73 CodeBlob_sizes() {
74 count = 0;
75 total_size = 0;
76 header_size = 0;
77 code_size = 0;
78 stub_size = 0;
104 void add(CodeBlob* cb) {
105 count++;
106 total_size += cb->size();
107 header_size += cb->header_size();
108 relocation_size += cb->relocation_size();
109 if (cb->is_nmethod()) {
110 nmethod* nm = cb->as_nmethod_or_null();
111 code_size += nm->insts_size();
112 stub_size += nm->stub_size();
113
114 scopes_oop_size += nm->oops_size();
115 scopes_metadata_size += nm->metadata_size();
116 scopes_data_size += nm->scopes_data_size();
117 scopes_pcs_size += nm->scopes_pcs_size();
118 } else {
119 code_size += cb->code_size();
120 }
121 }
122 };
123
124 // Iterate over all CodeHeaps
125 #define FOR_ALL_HEAPS(iter) for (GrowableArrayIterator<CodeHeap*> iter = _heaps->begin(); iter != _heaps->end(); ++iter)
126 // Iterate over all CodeBlobs (cb) on the given CodeHeap
127 #define FOR_ALL_BLOBS(cb, heap) for (CodeBlob* cb = first_blob(heap); cb != NULL; cb = next_blob(heap, cb))
128
129 address CodeCache::_low_bound = 0;
130 address CodeCache::_high_bound = 0;
131 int CodeCache::_number_of_blobs = 0;
132 int CodeCache::_number_of_adapters = 0;
133 int CodeCache::_number_of_nmethods = 0;
134 int CodeCache::_number_of_nmethods_with_dependencies = 0;
135 bool CodeCache::_needs_cache_clean = false;
136 nmethod* CodeCache::_scavenge_root_nmethods = NULL;
137 int CodeCache::_codemem_full_count = 0;
138
139 // Initialize array of CodeHeaps
140 GrowableArray<CodeHeap*>* CodeCache::_heaps = new(ResourceObj::C_HEAP, mtCode) GrowableArray<CodeHeap*> (3, true);
141
142 void CodeCache::initialize_heaps() {
143 // Calculate default CodeHeap sizes if not set by user
144 if (FLAG_IS_DEFAULT(NonMethodCodeHeapSize) && FLAG_IS_DEFAULT(ProfiledCodeHeapSize)
145 && FLAG_IS_DEFAULT(NonProfiledCodeHeapSize)) {
146 // Determine size of compiler buffers
147 int total_buffer_size = 0;
148
149 #ifdef COMPILER1
150 // C1 temporary code buffers (see Compiler::init_buffer_blob())
151 const int c1_count = CompilationPolicy::policy()->compiler_count(CompLevel_simple);
152 const int c1_buffer_size = Compilation::desired_max_code_buffer_size() + Compilation::desired_max_constant_size();
153 total_buffer_size += c1_count * c1_buffer_size;
154 #endif
155
156 #ifdef COMPILER2
157 // C2 scratch buffers (see Compile::init_scratch_buffer_blob())
158 const int c2_count = CompilationPolicy::policy()->compiler_count(CompLevel_full_optimization);
159 // Initial size of constant table (this may be increased if a compiled method needs more space)
160 const int constant_size = (4 * 1024);
161 const int c2_buffer_size = Compile::MAX_inst_size + Compile::MAX_locs_size + constant_size;
162 total_buffer_size += c2_count * c2_buffer_size;
163 #endif
164
165 // Increase default NonMethodCodeHeapSize to account for compiler buffers
166 FLAG_SET_DEFAULT(NonMethodCodeHeapSize, NonMethodCodeHeapSize + total_buffer_size);
167
168 // Check if we have enough space for the non-method code heap
169 if (ReservedCodeCacheSize > NonMethodCodeHeapSize) {
170 // Use the default value for NonMethodCodeHeapSize and one half of the
171 // remaining size for non-profiled methods and one half for profiled methods
172 size_t remaining_size = ReservedCodeCacheSize - NonMethodCodeHeapSize;
173 size_t profiled_size = remaining_size / 2;
174 size_t non_profiled_size = remaining_size - profiled_size;
175 FLAG_SET_DEFAULT(ProfiledCodeHeapSize, profiled_size);
176 FLAG_SET_DEFAULT(NonProfiledCodeHeapSize, non_profiled_size);
177 } else {
178 // Use all space for the non-method heap and set other heaps to minimal size
179 FLAG_SET_DEFAULT(NonMethodCodeHeapSize, ReservedCodeCacheSize - os::vm_page_size() * 2);
180 FLAG_SET_DEFAULT(ProfiledCodeHeapSize, os::vm_page_size());
181 FLAG_SET_DEFAULT(NonProfiledCodeHeapSize, os::vm_page_size());
182 }
183 }
184
185 // We do not need the profiled CodeHeap, use all space for the non-profiled CodeHeap
186 if(!heap_available(CodeBlobType::MethodProfiled)) {
187 FLAG_SET_DEFAULT(NonProfiledCodeHeapSize, NonProfiledCodeHeapSize + ProfiledCodeHeapSize);
188 FLAG_SET_DEFAULT(ProfiledCodeHeapSize, 0);
189 }
190 // We do not need the non-profiled CodeHeap, use all space for the non-method CodeHeap
191 if(!heap_available(CodeBlobType::MethodNonProfiled)) {
192 FLAG_SET_DEFAULT(NonMethodCodeHeapSize, NonMethodCodeHeapSize + NonProfiledCodeHeapSize);
193 FLAG_SET_DEFAULT(NonProfiledCodeHeapSize, 0);
194 }
195
196 // Size check
197 guarantee(NonProfiledCodeHeapSize + ProfiledCodeHeapSize + NonMethodCodeHeapSize <= ReservedCodeCacheSize, "Size check");
198
199 // Align reserved sizes of CodeHeaps
200 size_t non_method_size = ReservedCodeSpace::allocation_align_size_up(NonMethodCodeHeapSize);
201 size_t profiled_size = ReservedCodeSpace::allocation_align_size_up(ProfiledCodeHeapSize);
202 size_t non_profiled_size = ReservedCodeSpace::allocation_align_size_up(NonProfiledCodeHeapSize);
203
204 // Compute initial sizes of CodeHeaps
205 size_t init_non_method_size = MIN2(InitialCodeCacheSize, non_method_size);
206 size_t init_profiled_size = MIN2(InitialCodeCacheSize, profiled_size);
207 size_t init_non_profiled_size = MIN2(InitialCodeCacheSize, non_profiled_size);
208
209 // Reserve one continuous chunk of memory for CodeHeaps and split it into
210 // parts for the individual heaps. The memory layout looks like this:
211 // ---------- high -----------
212 // Non-profiled nmethods
213 // Profiled nmethods
214 // Non-methods
215 // ---------- low ------------
216 ReservedCodeSpace rs = reserve_heap_memory(non_profiled_size + profiled_size + non_method_size);
217 ReservedSpace non_method_space = rs.first_part(non_method_size);
218 ReservedSpace rest = rs.last_part(non_method_size);
219 ReservedSpace profiled_space = rest.first_part(profiled_size);
220 ReservedSpace non_profiled_space = rest.last_part(profiled_size);
221
222 // Non-methods (stubs, adapters, ...)
223 add_heap(non_method_space, "Non-methods", init_non_method_size, CodeBlobType::NonMethod);
224 // Tier 2 and tier 3 (profiled) methods
225 add_heap(profiled_space, "Profiled nmethods", init_profiled_size, CodeBlobType::MethodProfiled);
226 // Tier 1 and tier 4 (non-profiled) methods and native methods
227 add_heap(non_profiled_space, "Non-profiled nmethods", init_non_profiled_size, CodeBlobType::MethodNonProfiled);
228 }
229
230 ReservedCodeSpace CodeCache::reserve_heap_memory(size_t size) {
231 // Determine alignment
232 const size_t page_size = os::can_execute_large_page_memory() ?
233 os::page_size_for_region(InitialCodeCacheSize, size, 8) :
234 os::vm_page_size();
235 const size_t granularity = os::vm_allocation_granularity();
236 const size_t r_align = MAX2(page_size, granularity);
237 const size_t r_size = align_size_up(size, r_align);
238 const size_t rs_align = page_size == (size_t) os::vm_page_size() ? 0 :
239 MAX2(page_size, granularity);
240
241 ReservedCodeSpace rs(r_size, rs_align, rs_align > 0);
242
243 // Initialize bounds
244 _low_bound = (address)rs.base();
245 _high_bound = _low_bound + rs.size();
246
247 return rs;
248 }
249
250 bool CodeCache::heap_available(int code_blob_type) {
251 if (!SegmentedCodeCache) {
252 // No segmentation: Use a single code heap
253 return true;
254 } else if (Arguments::mode() == Arguments::_int) {
255 // Interpreter only: we don't need any method code heaps
256 return (code_blob_type == CodeBlobType::NonMethod);
257 } else if (TieredCompilation || code_blob_type == CodeBlobType::NonMethod) {
258 // Tiered compilation: use all code heaps
259 return true;
260 } else {
261 // No TieredCompilation: we only need the non-profiled code heap
262 return (code_blob_type == CodeBlobType::MethodNonProfiled);
263 }
264 }
265
266 void CodeCache::add_heap(ReservedSpace rs, const char* name, size_t size_initial, int code_blob_type) {
267 // Check if heap is needed
268 if (!heap_available(code_blob_type)) {
269 return;
270 }
271
272 // Create CodeHeap
273 CodeHeap* heap = new CodeHeap(name, code_blob_type);
274 _heaps->append(heap);
275
276 // Reserve Space
277 size_initial = round_to(size_initial, os::vm_page_size());
278
279 if (!heap->reserve(rs, size_initial, CodeCacheSegmentSize)) {
280 vm_exit_during_initialization("Could not reserve enough space for code cache");
281 }
282
283 // Register the CodeHeap
284 MemoryService::add_code_heap_memory_pool(heap, name);
285 }
286
287 CodeHeap* CodeCache::get_code_heap(int code_blob_type) {
288 if (!SegmentedCodeCache) return _heaps->first();
289
290 FOR_ALL_HEAPS(it) {
291 if ((*it)->accepts(code_blob_type)) {
292 return (*it);
293 }
294 }
295 return NULL;
296 }
297
298 CodeBlob* CodeCache::first_blob(CodeHeap* heap) {
299 assert_locked_or_safepoint(CodeCache_lock);
300 if (heap != NULL) {
301 return (CodeBlob*)heap->first();
302 }
303 return NULL;
304 }
305
306 CodeBlob* CodeCache::next_blob(CodeHeap* heap, CodeBlob* cb) {
307 assert_locked_or_safepoint(CodeCache_lock);
308 if (heap != NULL) {
309 return (CodeBlob*)heap->next(cb);
310 }
311 return NULL;
312 }
313
314 CodeBlob* CodeCache::first_alive_blob(CodeHeap* heap) {
315 assert_locked_or_safepoint(CodeCache_lock);
316 CodeBlob* cb = first_blob(heap);
317 while (cb != NULL && !cb->is_alive()) {
318 cb = next_blob(heap, cb);
319 }
320 return cb;
321 }
322
323 CodeBlob* CodeCache::next_alive_blob(CodeHeap* heap, CodeBlob* cb) {
324 assert_locked_or_safepoint(CodeCache_lock);
325 cb = next_blob(heap, cb);
326 while (cb != NULL && !cb->is_alive()) {
327 cb = next_blob(heap, cb);
328 }
329 return cb;
330 }
331
332 CodeBlob* CodeCache::allocate(int size, int code_blob_type, bool is_critical) {
333 // Do not seize the CodeCache lock here--if the caller has not
334 // already done so, we are going to lose bigtime, since the code
335 // cache will contain a garbage CodeBlob until the caller can
336 // run the constructor for the CodeBlob subclass he is busy
337 // instantiating.
338 assert_locked_or_safepoint(CodeCache_lock);
339 assert(size > 0, "allocation request must be reasonable");
340 if (size <= 0) {
341 return NULL;
342 }
343 CodeBlob* cb = NULL;
344
345 // Get CodeHeap for the given CodeBlobType
346 CodeHeap* heap = get_code_heap(code_blob_type);
347 assert (heap != NULL, "Heap exists");
348
349 while (true) {
350 cb = (CodeBlob*)heap->allocate(size, is_critical);
351 if (cb != NULL) break;
352 if (!heap->expand_by(CodeCacheExpansionSize)) {
353 // Expansion failed
354 return NULL;
355 }
356 if (PrintCodeCacheExtension) {
357 ResourceMark rm;
358 if (SegmentedCodeCache) {
359 tty->print("Code heap '%s'", heap->name());
360 } else {
361 tty->print("Code cache");
362 }
363 tty->print_cr(" extended to [" INTPTR_FORMAT ", " INTPTR_FORMAT "] (" SSIZE_FORMAT " bytes)",
364 (intptr_t)heap->low_boundary(), (intptr_t)heap->high(),
365 (address)heap->high() - (address)heap->low_boundary());
366 }
367 }
368 print_trace("allocation", cb, size);
369 _number_of_blobs++;
370 return cb;
371 }
372
373 void CodeCache::free(CodeBlob* cb, int code_blob_type) {
374 assert_locked_or_safepoint(CodeCache_lock);
375
376 print_trace("free", cb);
377 if (cb->is_nmethod()) {
378 _number_of_nmethods--;
379 if (((nmethod *)cb)->has_dependencies()) {
380 _number_of_nmethods_with_dependencies--;
381 }
382 }
383 if (cb->is_adapter_blob()) {
384 _number_of_adapters--;
385 }
386 _number_of_blobs--;
387
388 // Get heap for given CodeBlobType and deallocate
389 get_code_heap(code_blob_type)->deallocate(cb);
390
391 assert(_number_of_blobs >= 0, "sanity check");
392 }
393
394 void CodeCache::commit(CodeBlob* cb) {
395 // this is called by nmethod::nmethod, which must already own CodeCache_lock
396 assert_locked_or_safepoint(CodeCache_lock);
397 if (cb->is_nmethod()) {
398 _number_of_nmethods++;
399 if (((nmethod *)cb)->has_dependencies()) {
400 _number_of_nmethods_with_dependencies++;
401 }
402 }
403 if (cb->is_adapter_blob()) {
404 _number_of_adapters++;
405 }
406
407 // flush the hardware I-cache
408 ICache::invalidate_range(cb->content_begin(), cb->content_size());
409 }
410
411 bool CodeCache::contains(void *p) {
412 // It should be ok to call contains without holding a lock
413 FOR_ALL_HEAPS(it) {
414 if ((*it)->contains(p)) {
415 return true;
416 }
417 }
418 return false;
419 }
420
421 // This method is safe to call without holding the CodeCache_lock, as long as a dead CodeBlob is not
422 // looked up (i.e., one that has been marked for deletion). It only depends on the _segmap to contain
423 // valid indices, which it will always do, as long as the CodeBlob is not in the process of being recycled.
424 CodeBlob* CodeCache::find_blob(void* start) {
425 CodeBlob* result = find_blob_unsafe(start);
426 // We could potentially look up non_entrant methods
427 guarantee(result == NULL || !result->is_zombie() || result->is_locked_by_vm() || is_error_reported(), "unsafe access to zombie method");
428 return result;
429 }
430
431 // Lookup that does not fail if you lookup a zombie method (if you call this, be sure to know
432 // what you are doing)
433 CodeBlob* CodeCache::find_blob_unsafe(void* start) {
434 // NMT can walk the stack before code cache is created
435 if (_heaps->is_empty()) return NULL;
436
437 FOR_ALL_HEAPS(it) {
438 CodeBlob* result = (CodeBlob*) (*it)->find_start(start);
439 if (result != NULL && result->blob_contains((address)start)) {
440 return result;
441 }
442 }
443 return NULL;
444 }
445
446 nmethod* CodeCache::find_nmethod(void* start) {
447 CodeBlob* cb = find_blob(start);
448 assert(cb->is_nmethod(), "did not find an nmethod");
449 return (nmethod*)cb;
450 }
451
452 void CodeCache::blobs_do(void f(CodeBlob* nm)) {
453 assert_locked_or_safepoint(CodeCache_lock);
454 FOR_ALL_HEAPS(it) {
455 FOR_ALL_BLOBS(cb, *it) {
456 f(cb);
457 }
458 }
459 }
460
461 void CodeCache::nmethods_do(void f(nmethod* nm)) {
462 assert_locked_or_safepoint(CodeCache_lock);
463 NMethodIterator iter;
464 while(iter.next()) {
465 f(iter.method());
466 }
467 }
468
469 void CodeCache::alive_nmethods_do(void f(nmethod* nm)) {
470 assert_locked_or_safepoint(CodeCache_lock);
471 NMethodIterator iter;
472 while(iter.next()) {
473 nmethod* nm = iter.method();
474 if (nm->is_alive()) {
475 f(nm);
476 }
477 }
478 }
479
480 int CodeCache::alignment_unit() {
481 return (int)_heaps->first()->alignment_unit();
482 }
483
484 int CodeCache::alignment_offset() {
485 return (int)_heaps->first()->alignment_offset();
486 }
487
488 // Mark nmethods for unloading if they contain otherwise unreachable oops.
489 void CodeCache::do_unloading(BoolObjectClosure* is_alive, bool unloading_occurred) {
490 assert_locked_or_safepoint(CodeCache_lock);
491 NMethodIterator iter;
492 while(iter.next()) {
493 nmethod* nm = iter.method();
494 if (nm->is_alive()) {
495 nm->do_unloading(is_alive, unloading_occurred);
496 }
497 }
498 }
499
500 void CodeCache::blobs_do(CodeBlobClosure* f) {
501 assert_locked_or_safepoint(CodeCache_lock);
502 FOR_ALL_HEAPS(it) {
503 FOR_ALL_BLOBS(cb, *it) {
504 if (cb->is_alive()) {
505 f->do_code_blob(cb);
506
507 #ifdef ASSERT
508 if (cb->is_nmethod())
509 ((nmethod*)cb)->verify_scavenge_root_oops();
510 #endif //ASSERT
511 }
512 }
513 }
514 }
515
516 // Walk the list of methods which might contain non-perm oops.
517 void CodeCache::scavenge_root_nmethods_do(CodeBlobClosure* f) {
518 assert_locked_or_safepoint(CodeCache_lock);
519 debug_only(mark_scavenge_root_nmethods());
520
521 for (nmethod* cur = scavenge_root_nmethods(); cur != NULL; cur = cur->scavenge_root_link()) {
522 debug_only(cur->clear_scavenge_root_marked());
523 assert(cur->scavenge_root_not_marked(), "");
524 assert(cur->on_scavenge_root_list(), "else shouldn't be on this list");
525
526 bool is_live = (!cur->is_zombie() && !cur->is_unloaded());
527 #ifndef PRODUCT
528 if (TraceScavenge) {
529 cur->print_on(tty, is_live ? "scavenge root" : "dead scavenge root"); tty->cr();
530 }
531 #endif //PRODUCT
532 if (is_live) {
533 // Perform cur->oops_do(f), maybe just once per nmethod.
596 cur = next;
597 }
598
599 // Check for stray marks.
600 debug_only(verify_perm_nmethods(NULL));
601 }
602
603 #ifndef PRODUCT
604 void CodeCache::asserted_non_scavengable_nmethods_do(CodeBlobClosure* f) {
605 // While we are here, verify the integrity of the list.
606 mark_scavenge_root_nmethods();
607 for (nmethod* cur = scavenge_root_nmethods(); cur != NULL; cur = cur->scavenge_root_link()) {
608 assert(cur->on_scavenge_root_list(), "else shouldn't be on this list");
609 cur->clear_scavenge_root_marked();
610 }
611 verify_perm_nmethods(f);
612 }
613
614 // Temporarily mark nmethods that are claimed to be on the non-perm list.
615 void CodeCache::mark_scavenge_root_nmethods() {
616 NMethodIterator iter;
617 while(iter.next()) {
618 nmethod* nm = iter.method();
619 if (nm->is_alive()) {
620 assert(nm->scavenge_root_not_marked(), "clean state");
621 if (nm->on_scavenge_root_list())
622 nm->set_scavenge_root_marked();
623 }
624 }
625 }
626
627 // If the closure is given, run it on the unlisted nmethods.
628 // Also make sure that the effects of mark_scavenge_root_nmethods is gone.
629 void CodeCache::verify_perm_nmethods(CodeBlobClosure* f_or_null) {
630 NMethodIterator iter;
631 while(iter.next()) {
632 nmethod* nm = iter.method();
633 if (nm->is_alive()) {
634 bool call_f = (f_or_null != NULL);
635 assert(nm->scavenge_root_not_marked(), "must be already processed");
636 if (nm->on_scavenge_root_list())
637 call_f = false; // don't show this one to the client
638 nm->verify_scavenge_root_oops();
639 if (call_f) f_or_null->do_code_blob(nm);
640 }
641 }
642 }
643 #endif //PRODUCT
644
645 void CodeCache::gc_prologue() {
646 assert(!nmethod::oops_do_marking_is_active(), "oops_do_marking_epilogue must be called");
647 }
648
649 void CodeCache::gc_epilogue() {
650 assert_locked_or_safepoint(CodeCache_lock);
651 NMethodIterator iter;
652 while(iter.next()) {
653 nmethod* nm = iter.method();
654 if (nm->is_alive()) {
655 assert(!nm->is_unloaded(), "Tautology");
656 if (needs_cache_clean()) {
657 nm->cleanup_inline_caches();
658 }
659 DEBUG_ONLY(nm->verify());
660 nm->fix_oop_relocations();
661 }
662 }
663 set_needs_cache_clean(false);
664 prune_scavenge_root_nmethods();
665 assert(!nmethod::oops_do_marking_is_active(), "oops_do_marking_prologue must be called");
666
667 #ifdef ASSERT
668 // make sure that we aren't leaking icholders
669 int count = 0;
670
671 iter = NMethodIterator();
672 while(iter.next()) {
673 RelocIterator reloc_iter(iter.method());
674 while(reloc_iter.next()) {
675 if (reloc_iter.type() == relocInfo::virtual_call_type) {
676 if (CompiledIC::is_icholder_call_site(reloc_iter.virtual_call_reloc())) {
677 CompiledIC *ic = CompiledIC_at(reloc_iter.reloc());
678 if (TraceCompiledIC) {
679 tty->print("noticed icholder " INTPTR_FORMAT " ", p2i(ic->cached_icholder()));
680 ic->print();
681 }
682 assert(ic->cached_icholder() != NULL, "must be non-NULL");
683 count++;
684 }
685 }
686 }
687 }
688
689 assert(count + InlineCacheBuffer::pending_icholder_count() + CompiledICHolder::live_not_claimed_count() ==
690 CompiledICHolder::live_count(), "must agree");
691 #endif
692 }
693
694 void CodeCache::verify_oops() {
695 MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
696 VerifyOopClosure voc;
697 NMethodIterator iter;
698 while(iter.next()) {
699 nmethod* nm = iter.method();
700 if (nm->is_alive()) {
701 nm->oops_do(&voc);
702 nm->verify_oop_relocations();
703 }
704 }
705 }
706
707 size_t CodeCache::capacity() {
708 size_t cap = 0;
709 FOR_ALL_HEAPS(it) {
710 cap += (*it)->capacity();
711 }
712 return cap;
713 }
714
715 size_t CodeCache::unallocated_capacity() {
716 size_t unallocated_cap = 0;
717 FOR_ALL_HEAPS(it) {
718 unallocated_cap += (*it)->unallocated_capacity();
719 }
720 return unallocated_cap;
721 }
722
723 size_t CodeCache::max_capacity() {
724 size_t max_cap = 0;
725 FOR_ALL_HEAPS(it) {
726 max_cap += (*it)->max_capacity();
727 }
728 return max_cap;
729 }
730
731 /**
732 * Returns true if a CodeHeap is full and sets code_blob_type accordingly.
733 */
734 bool CodeCache::is_full(int* code_blob_type) {
735 FOR_ALL_HEAPS(it) {
736 if ((*it)->unallocated_capacity() < CodeCacheMinimumFreeSpace) {
737 *code_blob_type = (*it)->code_blob_type();
738 return true;
739 }
740 }
741 return false;
742 }
743
744 /**
745 * Returns the reverse free ratio. E.g., if 25% (1/4) of the code heap
746 * is free, reverse_free_ratio() returns 4.
747 */
748 double CodeCache::reverse_free_ratio(int code_blob_type) {
749 CodeHeap* heap = get_code_heap(code_blob_type);
750 if (heap == NULL) {
751 return 0;
752 }
753 double unallocated_capacity = (double)(heap->unallocated_capacity() - CodeCacheMinimumFreeSpace);
754 double max_capacity = (double)heap->max_capacity();
755 return max_capacity / unallocated_capacity;
756 }
757
758 size_t CodeCache::bytes_allocated_in_freelists() {
759 size_t allocated_bytes = 0;
760 FOR_ALL_HEAPS(it) {
761 allocated_bytes += (*it)->allocated_in_freelist();
762 }
763 return allocated_bytes;
764 }
765
766 int CodeCache::allocated_segments() {
767 int number_of_segments = 0;
768 FOR_ALL_HEAPS(it) {
769 number_of_segments += (*it)->allocated_segments();
770 }
771 return number_of_segments;
772 }
773
774 size_t CodeCache::freelists_length() {
775 size_t length = 0;
776 FOR_ALL_HEAPS(it) {
777 length += (*it)->freelist_length();
778 }
779 return length;
780 }
781
782 void icache_init();
783
784 void CodeCache::initialize() {
785 assert(CodeCacheSegmentSize >= (uintx)CodeEntryAlignment, "CodeCacheSegmentSize must be large enough to align entry points");
786 #ifdef COMPILER2
787 assert(CodeCacheSegmentSize >= (uintx)OptoLoopAlignment, "CodeCacheSegmentSize must be large enough to align inner loops");
788 #endif
789 assert(CodeCacheSegmentSize >= sizeof(jdouble), "CodeCacheSegmentSize must be large enough to align constants");
790 // This was originally just a check of the alignment, causing failure, instead, round
791 // the code cache to the page size. In particular, Solaris is moving to a larger
792 // default page size.
793 CodeCacheExpansionSize = round_to(CodeCacheExpansionSize, os::vm_page_size());
794
795 if (SegmentedCodeCache) {
796 // Use multiple code heaps
797 initialize_heaps();
798 } else {
799 // Use a single code heap
800 ReservedCodeSpace rs = reserve_heap_memory(ReservedCodeCacheSize);
801 add_heap(rs, "Code heap", InitialCodeCacheSize, CodeBlobType::All);
802 }
803
804 // Initialize ICache flush mechanism
805 // This service is needed for os::register_code_area
806 icache_init();
807
808 // Give OS a chance to register generated code area.
809 // This is used on Windows 64 bit platforms to register
810 // Structured Exception Handlers for our generated code.
811 os::register_code_area((char*)low_bound(), (char*)high_bound());
812 }
813
814 void codeCache_init() {
815 CodeCache::initialize();
816 }
817
818 //------------------------------------------------------------------------------------------------
819
820 int CodeCache::number_of_nmethods_with_dependencies() {
821 return _number_of_nmethods_with_dependencies;
822 }
823
824 void CodeCache::clear_inline_caches() {
825 assert_locked_or_safepoint(CodeCache_lock);
826 NMethodIterator iter;
827 while(iter.next()) {
828 nmethod* nm = iter.method();
829 if (nm->is_alive()) {
830 nm->clear_inline_caches();
831 }
832 }
833 }
834
835 // Keeps track of time spent for checking dependencies
836 NOT_PRODUCT(static elapsedTimer dependentCheckTime;)
837
838 int CodeCache::mark_for_deoptimization(DepChange& changes) {
839 MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
840 int number_of_marked_CodeBlobs = 0;
841
842 // search the hierarchy looking for nmethods which are affected by the loading of this class
843
844 // then search the interfaces this class implements looking for nmethods
845 // which might be dependent of the fact that an interface only had one
846 // implementor.
847 // nmethod::check_all_dependencies works only correctly, if no safepoint
848 // can happen
849 No_Safepoint_Verifier nsv;
850 for (DepChange::ContextStream str(changes, nsv); str.next(); ) {
851 Klass* d = str.klass();
852 number_of_marked_CodeBlobs += InstanceKlass::cast(d)->mark_dependent_nmethods(changes);
866 }
867
868
869 #ifdef HOTSWAP
870 int CodeCache::mark_for_evol_deoptimization(instanceKlassHandle dependee) {
871 MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
872 int number_of_marked_CodeBlobs = 0;
873
874 // Deoptimize all methods of the evolving class itself
875 Array<Method*>* old_methods = dependee->methods();
876 for (int i = 0; i < old_methods->length(); i++) {
877 ResourceMark rm;
878 Method* old_method = old_methods->at(i);
879 nmethod *nm = old_method->code();
880 if (nm != NULL) {
881 nm->mark_for_deoptimization();
882 number_of_marked_CodeBlobs++;
883 }
884 }
885
886 NMethodIterator iter;
887 while(iter.next()) {
888 nmethod* nm = iter.method();
889 if (nm->is_alive()) {
890 if (nm->is_marked_for_deoptimization()) {
891 // ...Already marked in the previous pass; don't count iter again.
892 } else if (nm->is_evol_dependent_on(dependee())) {
893 ResourceMark rm;
894 nm->mark_for_deoptimization();
895 number_of_marked_CodeBlobs++;
896 } else {
897 // flush caches in case they refer to a redefined Method*
898 nm->clear_inline_caches();
899 }
900 }
901 }
902
903 return number_of_marked_CodeBlobs;
904 }
905 #endif // HOTSWAP
906
907
908 // Deoptimize all methods
909 void CodeCache::mark_all_nmethods_for_deoptimization() {
910 MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
911 NMethodIterator iter;
912 while(iter.next()) {
913 nmethod* nm = iter.method();
914 if (nm->is_alive()) {
915 nm->mark_for_deoptimization();
916 }
917 }
918 }
919
920 int CodeCache::mark_for_deoptimization(Method* dependee) {
921 MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
922 int number_of_marked_CodeBlobs = 0;
923
924 NMethodIterator iter;
925 while(iter.next()) {
926 nmethod* nm = iter.method();
927 if (nm->is_alive()) {
928 if (nm->is_dependent_on_method(dependee)) {
929 ResourceMark rm;
930 nm->mark_for_deoptimization();
931 number_of_marked_CodeBlobs++;
932 }
933 }
934 }
935
936 return number_of_marked_CodeBlobs;
937 }
938
939 void CodeCache::make_marked_nmethods_zombies() {
940 assert(SafepointSynchronize::is_at_safepoint(), "must be at a safepoint");
941 NMethodIterator iter;
942 while(iter.next()) {
943 nmethod* nm = iter.method();
944 if (nm->is_alive()) {
945 if (nm->is_marked_for_deoptimization()) {
946
947 // If the nmethod has already been made non-entrant and it can be converted
948 // then zombie it now. Otherwise make it non-entrant and it will eventually
949 // be zombied when it is no longer seen on the stack. Note that the nmethod
950 // might be "entrant" and not on the stack and so could be zombied immediately
951 // but we can't tell because we don't track iter on stack until iter becomes
952 // non-entrant.
953
954 if (nm->is_not_entrant() && nm->can_not_entrant_be_converted()) {
955 nm->make_zombie();
956 } else {
957 nm->make_not_entrant();
958 }
959 }
960 }
961 }
962 }
963
964 void CodeCache::make_marked_nmethods_not_entrant() {
965 assert_locked_or_safepoint(CodeCache_lock);
966 NMethodIterator iter;
967 while(iter.next()) {
968 nmethod* nm = iter.method();
969 if (nm->is_alive()) {
970 if (nm->is_marked_for_deoptimization()) {
971 nm->make_not_entrant();
972 }
973 }
974 }
975 }
976
977 void CodeCache::verify() {
978 assert_locked_or_safepoint(CodeCache_lock);
979 FOR_ALL_HEAPS(it) {
980 CodeHeap* heap = *it;
981 heap->verify();
982 FOR_ALL_BLOBS(cb, heap) {
983 if (cb->is_alive()) {
984 cb->verify();
985 }
986 }
987 }
988 }
989
990 // A CodeHeap is full. Print out warning and report event.
991 void CodeCache::report_codemem_full(int code_blob_type, bool print) {
992 // Get nmethod heap for the given CodeBlobType and build CodeCacheFull event
993 CodeHeap* heap = get_code_heap(code_blob_type);
994
995 if (!heap->was_full() || print) {
996 // Not yet reported for this heap, report
997 heap->report_full();
998 if (SegmentedCodeCache) {
999 warning("CodeHeap for %s is full. Compiler has been disabled.", CodeCache::get_code_heap_name(code_blob_type));
1000 warning("Try increasing the code heap size using -XX:%s=",
1001 (code_blob_type == CodeBlobType::MethodNonProfiled) ? "NonProfiledCodeHeapSize" : "ProfiledCodeHeapSize");
1002 } else {
1003 warning("CodeCache is full. Compiler has been disabled.");
1004 warning("Try increasing the code cache size using -XX:ReservedCodeCacheSize=");
1005 }
1006 ResourceMark rm;
1007 stringStream s;
1008 // Dump code cache into a buffer before locking the tty,
1009 {
1010 MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
1011 print_summary(&s);
1012 }
1013 ttyLocker ttyl;
1014 tty->print("%s", s.as_string());
1015 }
1016
1017 _codemem_full_count++;
1018 EventCodeCacheFull event;
1019 if (event.should_commit()) {
1020 event.set_codeBlobType((u1)code_blob_type);
1021 event.set_startAddress((u8)heap->low_boundary());
1022 event.set_commitedTopAddress((u8)heap->high());
1023 event.set_reservedTopAddress((u8)heap->high_boundary());
1024 event.set_entryCount(nof_blobs());
1025 event.set_methodCount(nof_nmethods());
1026 event.set_adaptorCount(nof_adapters());
1027 event.set_unallocatedCapacity(heap->unallocated_capacity()/K);
1028 event.set_fullCount(_codemem_full_count);
1029 event.commit();
1030 }
1031 }
1032
1033 void CodeCache::print_memory_overhead() {
1034 size_t wasted_bytes = 0;
1035
1036 FOR_ALL_HEAPS(it) {
1037 CodeHeap* heap = *it;
1038 CodeBlob* cb;
1039 for (cb = (CodeBlob*)heap->first(); cb != NULL; cb = (CodeBlob*)heap->next(cb)) {
1040 HeapBlock* heap_block = ((HeapBlock*)cb) - 1;
1041 wasted_bytes += heap_block->length() * CodeCacheSegmentSize - cb->size();
1042 }
1043 }
1044 // Print bytes that are allocated in the freelist
1045 ttyLocker ttl;
1046 tty->print_cr("Number of elements in freelist: " SSIZE_FORMAT, freelists_length());
1047 tty->print_cr("Allocated in freelist: " SSIZE_FORMAT "kB", bytes_allocated_in_freelists()/K);
1048 tty->print_cr("Unused bytes in CodeBlobs: " SSIZE_FORMAT "kB", (wasted_bytes/K));
1049 tty->print_cr("Segment map size: " SSIZE_FORMAT "kB", allocated_segments()/K); // 1 byte per segment
1050 }
1051
1052 //------------------------------------------------------------------------------------------------
1053 // Non-product version
1054
1055 #ifndef PRODUCT
1056
1057 void CodeCache::print_trace(const char* event, CodeBlob* cb, int size) {
1058 if (PrintCodeCache2) { // Need to add a new flag
1059 ResourceMark rm;
1060 if (size == 0) size = cb->size();
1061 tty->print_cr("CodeCache %s: addr: " INTPTR_FORMAT ", size: 0x%x", event, p2i(cb), size);
1062 }
1063 }
1064
1065 void CodeCache::print_internals() {
1066 int nmethodCount = 0;
1067 int runtimeStubCount = 0;
1068 int adapterCount = 0;
1069 int deoptimizationStubCount = 0;
1070 int uncommonTrapStubCount = 0;
1071 int bufferBlobCount = 0;
1072 int total = 0;
1073 int nmethodAlive = 0;
1074 int nmethodNotEntrant = 0;
1075 int nmethodZombie = 0;
1076 int nmethodUnloaded = 0;
1077 int nmethodJava = 0;
1078 int nmethodNative = 0;
1079 int max_nm_size = 0;
1080 ResourceMark rm;
1081
1082 int i = 0;
1083 FOR_ALL_HEAPS(it) {
1084 if (SegmentedCodeCache && Verbose) {
1085 tty->print_cr("-- Code heap '%s' --", (*it)->name());
1086 }
1087 FOR_ALL_BLOBS(cb, *it) {
1088 total++;
1089 if (cb->is_nmethod()) {
1090 nmethod* nm = (nmethod*)cb;
1091
1092 if (Verbose && nm->method() != NULL) {
1093 ResourceMark rm;
1094 char *method_name = nm->method()->name_and_sig_as_C_string();
1095 tty->print("%s", method_name);
1096 if(nm->is_alive()) { tty->print_cr(" alive"); }
1097 if(nm->is_not_entrant()) { tty->print_cr(" not-entrant"); }
1098 if(nm->is_zombie()) { tty->print_cr(" zombie"); }
1099 }
1100
1101 nmethodCount++;
1102
1103 if(nm->is_alive()) { nmethodAlive++; }
1104 if(nm->is_not_entrant()) { nmethodNotEntrant++; }
1105 if(nm->is_zombie()) { nmethodZombie++; }
1106 if(nm->is_unloaded()) { nmethodUnloaded++; }
1107 if(nm->method() != NULL && nm->is_native_method()) { nmethodNative++; }
1108
1109 if(nm->method() != NULL && nm->is_java_method()) {
1110 nmethodJava++;
1111 max_nm_size = MAX2(max_nm_size, nm->size());
1112 }
1113 } else if (cb->is_runtime_stub()) {
1114 runtimeStubCount++;
1115 } else if (cb->is_deoptimization_stub()) {
1116 deoptimizationStubCount++;
1117 } else if (cb->is_uncommon_trap_stub()) {
1118 uncommonTrapStubCount++;
1119 } else if (cb->is_adapter_blob()) {
1120 adapterCount++;
1121 } else if (cb->is_buffer_blob()) {
1122 bufferBlobCount++;
1123 }
1124 }
1125 }
1126
1127 int bucketSize = 512;
1128 int bucketLimit = max_nm_size / bucketSize + 1;
1129 int *buckets = NEW_C_HEAP_ARRAY(int, bucketLimit, mtCode);
1130 memset(buckets, 0, sizeof(int) * bucketLimit);
1131
1132 NMethodIterator iter;
1133 while(iter.next()) {
1134 nmethod* nm = iter.method();
1135 if(nm->method() != NULL && nm->is_java_method()) {
1136 buckets[nm->size() / bucketSize]++;
1137 }
1138 }
1139
1140 tty->print_cr("Code Cache Entries (total of %d)",total);
1141 tty->print_cr("-------------------------------------------------");
1142 tty->print_cr("nmethods: %d",nmethodCount);
1143 tty->print_cr("\talive: %d",nmethodAlive);
1144 tty->print_cr("\tnot_entrant: %d",nmethodNotEntrant);
1145 tty->print_cr("\tzombie: %d",nmethodZombie);
1146 tty->print_cr("\tunloaded: %d",nmethodUnloaded);
1147 tty->print_cr("\tjava: %d",nmethodJava);
1148 tty->print_cr("\tnative: %d",nmethodNative);
1149 tty->print_cr("runtime_stubs: %d",runtimeStubCount);
1150 tty->print_cr("adapters: %d",adapterCount);
1151 tty->print_cr("buffer blobs: %d",bufferBlobCount);
1152 tty->print_cr("deoptimization_stubs: %d",deoptimizationStubCount);
1153 tty->print_cr("uncommon_traps: %d",uncommonTrapStubCount);
1154 tty->print_cr("\nnmethod size distribution (non-zombie java)");
1155 tty->print_cr("-------------------------------------------------");
1156
1157 for(int i=0; i<bucketLimit; i++) {
1158 if(buckets[i] != 0) {
1160 tty->fill_to(40);
1161 tty->print_cr("%d",buckets[i]);
1162 }
1163 }
1164
1165 FREE_C_HEAP_ARRAY(int, buckets, mtCode);
1166 print_memory_overhead();
1167 }
1168
1169 #endif // !PRODUCT
1170
1171 void CodeCache::print() {
1172 print_summary(tty);
1173
1174 #ifndef PRODUCT
1175 if (!Verbose) return;
1176
1177 CodeBlob_sizes live;
1178 CodeBlob_sizes dead;
1179
1180 FOR_ALL_HEAPS(it) {
1181 FOR_ALL_BLOBS(cb, *it) {
1182 if (!cb->is_alive()) {
1183 dead.add(cb);
1184 } else {
1185 live.add(cb);
1186 }
1187 }
1188 }
1189
1190 tty->print_cr("CodeCache:");
1191 tty->print_cr("nmethod dependency checking time %fs", dependentCheckTime.seconds());
1192
1193 if (!live.is_empty()) {
1194 live.print("live");
1195 }
1196 if (!dead.is_empty()) {
1197 dead.print("dead");
1198 }
1199
1200 if (WizardMode) {
1201 // print the oop_map usage
1202 int code_size = 0;
1203 int number_of_blobs = 0;
1204 int number_of_oop_maps = 0;
1205 int map_size = 0;
1206 FOR_ALL_HEAPS(it) {
1207 FOR_ALL_BLOBS(cb, *it) {
1208 if (cb->is_alive()) {
1209 number_of_blobs++;
1210 code_size += cb->code_size();
1211 OopMapSet* set = cb->oop_maps();
1212 if (set != NULL) {
1213 number_of_oop_maps += set->size();
1214 map_size += set->heap_size();
1215 }
1216 }
1217 }
1218 }
1219 tty->print_cr("OopMaps");
1220 tty->print_cr(" #blobs = %d", number_of_blobs);
1221 tty->print_cr(" code size = %d", code_size);
1222 tty->print_cr(" #oop_maps = %d", number_of_oop_maps);
1223 tty->print_cr(" map size = %d", map_size);
1224 }
1225
1226 #endif // !PRODUCT
1227 }
1228
1229 void CodeCache::print_summary(outputStream* st, bool detailed) {
1230 FOR_ALL_HEAPS(it) {
1231 CodeHeap* heap = (*it);
1232 size_t total = (heap->high_boundary() - heap->low_boundary());
1233 if (SegmentedCodeCache) {
1234 st->print("Code heap '%s':", heap->name());
1235 } else {
1236 st->print("Code cache:");
1237 }
1238 st->print_cr(" size=" SIZE_FORMAT "Kb used=" SIZE_FORMAT
1239 "Kb max_used=" SIZE_FORMAT "Kb free=" SIZE_FORMAT "Kb",
1240 total/K, (total - heap->unallocated_capacity())/K,
1241 heap->max_allocated_capacity()/K, heap->unallocated_capacity()/K);
1242
1243 if (detailed) {
1244 st->print_cr(" bounds [" INTPTR_FORMAT ", " INTPTR_FORMAT ", " INTPTR_FORMAT "]",
1245 p2i(heap->low_boundary()),
1246 p2i(heap->high()),
1247 p2i(heap->high_boundary()));
1248 }
1249 }
1250
1251 if (detailed) {
1252 log_state(st);
1253 st->print_cr(" compilation: %s", CompileBroker::should_compile_new_jobs() ?
1254 "enabled" : Arguments::mode() == Arguments::_int ?
1255 "disabled (interpreter mode)" :
1256 "disabled (not enough contiguous free space left)");
1257 }
1258 }
1259
1260 void CodeCache::log_state(outputStream* st) {
1261 st->print(" total_blobs='" UINT32_FORMAT "' nmethods='" UINT32_FORMAT "'"
1262 " adapters='" UINT32_FORMAT "' free_code_cache='" SIZE_FORMAT "'",
1263 nof_blobs(), nof_nmethods(), nof_adapters(),
1264 unallocated_capacity());
1265 }
1266
|