31 #include "code/nmethod.hpp"
32 #include "code/pcDesc.hpp"
33 #include "compiler/compileBroker.hpp"
34 #include "gc_implementation/shared/markSweep.hpp"
35 #include "memory/allocation.inline.hpp"
36 #include "memory/gcLocker.hpp"
37 #include "memory/iterator.hpp"
38 #include "memory/resourceArea.hpp"
39 #include "oops/method.hpp"
40 #include "oops/objArrayOop.hpp"
41 #include "oops/oop.inline.hpp"
42 #include "runtime/handles.inline.hpp"
43 #include "runtime/arguments.hpp"
44 #include "runtime/icache.hpp"
45 #include "runtime/java.hpp"
46 #include "runtime/mutexLocker.hpp"
47 #include "services/memoryService.hpp"
48 #include "trace/tracing.hpp"
49 #include "utilities/xmlstream.hpp"
50
51 // Helper class for printing in CodeCache
52
53 class CodeBlob_sizes {
54 private:
55 int count;
56 int total_size;
57 int header_size;
58 int code_size;
59 int stub_size;
60 int relocation_size;
61 int scopes_oop_size;
62 int scopes_metadata_size;
63 int scopes_data_size;
64 int scopes_pcs_size;
65
66 public:
67 CodeBlob_sizes() {
68 count = 0;
69 total_size = 0;
70 header_size = 0;
71 code_size = 0;
72 stub_size = 0;
98 void add(CodeBlob* cb) {
99 count++;
100 total_size += cb->size();
101 header_size += cb->header_size();
102 relocation_size += cb->relocation_size();
103 if (cb->is_nmethod()) {
104 nmethod* nm = cb->as_nmethod_or_null();
105 code_size += nm->insts_size();
106 stub_size += nm->stub_size();
107
108 scopes_oop_size += nm->oops_size();
109 scopes_metadata_size += nm->metadata_size();
110 scopes_data_size += nm->scopes_data_size();
111 scopes_pcs_size += nm->scopes_pcs_size();
112 } else {
113 code_size += cb->code_size();
114 }
115 }
116 };
117
118 // CodeCache implementation
119
120 CodeHeap * CodeCache::_heap = new CodeHeap();
121 int CodeCache::_number_of_blobs = 0;
122 int CodeCache::_number_of_adapters = 0;
123 int CodeCache::_number_of_nmethods = 0;
124 int CodeCache::_number_of_nmethods_with_dependencies = 0;
125 bool CodeCache::_needs_cache_clean = false;
126 nmethod* CodeCache::_scavenge_root_nmethods = NULL;
127
128 int CodeCache::_codemem_full_count = 0;
129
130 CodeBlob* CodeCache::first() {
131 assert_locked_or_safepoint(CodeCache_lock);
132 return (CodeBlob*)_heap->first();
133 }
134
135
136 CodeBlob* CodeCache::next(CodeBlob* cb) {
137 assert_locked_or_safepoint(CodeCache_lock);
138 return (CodeBlob*)_heap->next(cb);
139 }
140
141
142 CodeBlob* CodeCache::alive(CodeBlob *cb) {
143 assert_locked_or_safepoint(CodeCache_lock);
144 while (cb != NULL && !cb->is_alive()) cb = next(cb);
145 return cb;
146 }
147
148
149 nmethod* CodeCache::alive_nmethod(CodeBlob* cb) {
150 assert_locked_or_safepoint(CodeCache_lock);
151 while (cb != NULL && (!cb->is_alive() || !cb->is_nmethod())) cb = next(cb);
152 return (nmethod*)cb;
153 }
154
155 nmethod* CodeCache::first_nmethod() {
156 assert_locked_or_safepoint(CodeCache_lock);
157 CodeBlob* cb = first();
158 while (cb != NULL && !cb->is_nmethod()) {
159 cb = next(cb);
160 }
161 return (nmethod*)cb;
162 }
163
164 nmethod* CodeCache::next_nmethod (CodeBlob* cb) {
165 assert_locked_or_safepoint(CodeCache_lock);
166 cb = next(cb);
167 while (cb != NULL && !cb->is_nmethod()) {
168 cb = next(cb);
169 }
170 return (nmethod*)cb;
171 }
172
173 static size_t maxCodeCacheUsed = 0;
174
175 CodeBlob* CodeCache::allocate(int size, bool is_critical) {
176 // Do not seize the CodeCache lock here--if the caller has not
177 // already done so, we are going to lose bigtime, since the code
178 // cache will contain a garbage CodeBlob until the caller can
179 // run the constructor for the CodeBlob subclass he is busy
180 // instantiating.
181 guarantee(size >= 0, "allocation request must be reasonable");
182 assert_locked_or_safepoint(CodeCache_lock);
183 CodeBlob* cb = NULL;
184 _number_of_blobs++;
185 while (true) {
186 cb = (CodeBlob*)_heap->allocate(size, is_critical);
187 if (cb != NULL) break;
188 if (!_heap->expand_by(CodeCacheExpansionSize)) {
189 // Expansion failed
190 return NULL;
191 }
192 if (PrintCodeCacheExtension) {
193 ResourceMark rm;
194 tty->print_cr("code cache extended to [" INTPTR_FORMAT ", " INTPTR_FORMAT "] (%d bytes)",
195 (intptr_t)_heap->low_boundary(), (intptr_t)_heap->high(),
196 (address)_heap->high() - (address)_heap->low_boundary());
197 }
198 }
199 maxCodeCacheUsed = MAX2(maxCodeCacheUsed, ((address)_heap->high_boundary() -
200 (address)_heap->low_boundary()) - unallocated_capacity());
201 verify_if_often();
202 print_trace("allocation", cb, size);
203 return cb;
204 }
205
206 void CodeCache::free(CodeBlob* cb) {
207 assert_locked_or_safepoint(CodeCache_lock);
208 verify_if_often();
209
210 print_trace("free", cb);
211 if (cb->is_nmethod()) {
212 _number_of_nmethods--;
213 if (((nmethod *)cb)->has_dependencies()) {
214 _number_of_nmethods_with_dependencies--;
215 }
216 }
217 if (cb->is_adapter_blob()) {
218 _number_of_adapters--;
219 }
220 _number_of_blobs--;
221
222 _heap->deallocate(cb);
223
224 verify_if_often();
225 assert(_number_of_blobs >= 0, "sanity check");
226 }
227
228
229 void CodeCache::commit(CodeBlob* cb) {
230 // this is called by nmethod::nmethod, which must already own CodeCache_lock
231 assert_locked_or_safepoint(CodeCache_lock);
232 if (cb->is_nmethod()) {
233 _number_of_nmethods++;
234 if (((nmethod *)cb)->has_dependencies()) {
235 _number_of_nmethods_with_dependencies++;
236 }
237 }
238 if (cb->is_adapter_blob()) {
239 _number_of_adapters++;
240 }
241
242 // flush the hardware I-cache
243 ICache::invalidate_range(cb->content_begin(), cb->content_size());
244 }
245
246
247 void CodeCache::flush() {
248 assert_locked_or_safepoint(CodeCache_lock);
249 Unimplemented();
250 }
251
252
253 // Iteration over CodeBlobs
254
255 #define FOR_ALL_BLOBS(var) for (CodeBlob *var = first() ; var != NULL; var = next(var) )
256 #define FOR_ALL_ALIVE_BLOBS(var) for (CodeBlob *var = alive(first()); var != NULL; var = alive(next(var)))
257 #define FOR_ALL_ALIVE_NMETHODS(var) for (nmethod *var = alive_nmethod(first()); var != NULL; var = alive_nmethod(next(var)))
258
259
260 bool CodeCache::contains(void *p) {
261 // It should be ok to call contains without holding a lock
262 return _heap->contains(p);
263 }
264
265
266 // This method is safe to call without holding the CodeCache_lock, as long as a dead codeblob is not
267 // looked up (i.e., one that has been marked for deletion). It only dependes on the _segmap to contain
268 // valid indices, which it will always do, as long as the CodeBlob is not in the process of being recycled.
269 CodeBlob* CodeCache::find_blob(void* start) {
270 CodeBlob* result = find_blob_unsafe(start);
271 if (result == NULL) return NULL;
272 // We could potientially look up non_entrant methods
273 guarantee(!result->is_zombie() || result->is_locked_by_vm() || is_error_reported(), "unsafe access to zombie method");
274 return result;
275 }
276
277 nmethod* CodeCache::find_nmethod(void* start) {
278 CodeBlob *cb = find_blob(start);
279 assert(cb == NULL || cb->is_nmethod(), "did not find an nmethod");
280 return (nmethod*)cb;
281 }
282
283
284 void CodeCache::blobs_do(void f(CodeBlob* nm)) {
285 assert_locked_or_safepoint(CodeCache_lock);
286 FOR_ALL_BLOBS(p) {
287 f(p);
288 }
289 }
290
291
292 void CodeCache::nmethods_do(void f(nmethod* nm)) {
293 assert_locked_or_safepoint(CodeCache_lock);
294 FOR_ALL_BLOBS(nm) {
295 if (nm->is_nmethod()) f((nmethod*)nm);
296 }
297 }
298
299 void CodeCache::alive_nmethods_do(void f(nmethod* nm)) {
300 assert_locked_or_safepoint(CodeCache_lock);
301 FOR_ALL_ALIVE_NMETHODS(nm) {
302 f(nm);
303 }
304 }
305
306 int CodeCache::alignment_unit() {
307 return (int)_heap->alignment_unit();
308 }
309
310
311 int CodeCache::alignment_offset() {
312 return (int)_heap->alignment_offset();
313 }
314
315
316 // Mark nmethods for unloading if they contain otherwise unreachable
317 // oops.
318 void CodeCache::do_unloading(BoolObjectClosure* is_alive, bool unloading_occurred) {
319 assert_locked_or_safepoint(CodeCache_lock);
320 FOR_ALL_ALIVE_NMETHODS(nm) {
321 nm->do_unloading(is_alive, unloading_occurred);
322 }
323 }
324
325 void CodeCache::blobs_do(CodeBlobClosure* f) {
326 assert_locked_or_safepoint(CodeCache_lock);
327 FOR_ALL_ALIVE_BLOBS(cb) {
328 f->do_code_blob(cb);
329
330 #ifdef ASSERT
331 if (cb->is_nmethod())
332 ((nmethod*)cb)->verify_scavenge_root_oops();
333 #endif //ASSERT
334 }
335 }
336
337 // Walk the list of methods which might contain non-perm oops.
338 void CodeCache::scavenge_root_nmethods_do(CodeBlobClosure* f) {
339 assert_locked_or_safepoint(CodeCache_lock);
340 debug_only(mark_scavenge_root_nmethods());
341
342 for (nmethod* cur = scavenge_root_nmethods(); cur != NULL; cur = cur->scavenge_root_link()) {
343 debug_only(cur->clear_scavenge_root_marked());
344 assert(cur->scavenge_root_not_marked(), "");
345 assert(cur->on_scavenge_root_list(), "else shouldn't be on this list");
346
347 bool is_live = (!cur->is_zombie() && !cur->is_unloaded());
348 #ifndef PRODUCT
349 if (TraceScavenge) {
350 cur->print_on(tty, is_live ? "scavenge root" : "dead scavenge root"); tty->cr();
351 }
352 #endif //PRODUCT
353 if (is_live) {
354 // Perform cur->oops_do(f), maybe just once per nmethod.
417 cur = next;
418 }
419
420 // Check for stray marks.
421 debug_only(verify_perm_nmethods(NULL));
422 }
423
424 #ifndef PRODUCT
425 void CodeCache::asserted_non_scavengable_nmethods_do(CodeBlobClosure* f) {
426 // While we are here, verify the integrity of the list.
427 mark_scavenge_root_nmethods();
428 for (nmethod* cur = scavenge_root_nmethods(); cur != NULL; cur = cur->scavenge_root_link()) {
429 assert(cur->on_scavenge_root_list(), "else shouldn't be on this list");
430 cur->clear_scavenge_root_marked();
431 }
432 verify_perm_nmethods(f);
433 }
434
435 // Temporarily mark nmethods that are claimed to be on the non-perm list.
436 void CodeCache::mark_scavenge_root_nmethods() {
437 FOR_ALL_ALIVE_BLOBS(cb) {
438 if (cb->is_nmethod()) {
439 nmethod *nm = (nmethod*)cb;
440 assert(nm->scavenge_root_not_marked(), "clean state");
441 if (nm->on_scavenge_root_list())
442 nm->set_scavenge_root_marked();
443 }
444 }
445 }
446
447 // If the closure is given, run it on the unlisted nmethods.
448 // Also make sure that the effects of mark_scavenge_root_nmethods is gone.
449 void CodeCache::verify_perm_nmethods(CodeBlobClosure* f_or_null) {
450 FOR_ALL_ALIVE_BLOBS(cb) {
451 bool call_f = (f_or_null != NULL);
452 if (cb->is_nmethod()) {
453 nmethod *nm = (nmethod*)cb;
454 assert(nm->scavenge_root_not_marked(), "must be already processed");
455 if (nm->on_scavenge_root_list())
456 call_f = false; // don't show this one to the client
457 nm->verify_scavenge_root_oops();
458 } else {
459 call_f = false; // not an nmethod
460 }
461 if (call_f) f_or_null->do_code_blob(cb);
462 }
463 }
464 #endif //PRODUCT
465
466
467 void CodeCache::gc_prologue() {
468 assert(!nmethod::oops_do_marking_is_active(), "oops_do_marking_epilogue must be called");
469 }
470
471 void CodeCache::gc_epilogue() {
472 assert_locked_or_safepoint(CodeCache_lock);
473 FOR_ALL_ALIVE_BLOBS(cb) {
474 if (cb->is_nmethod()) {
475 nmethod *nm = (nmethod*)cb;
476 assert(!nm->is_unloaded(), "Tautology");
477 if (needs_cache_clean()) {
478 nm->cleanup_inline_caches();
479 }
480 DEBUG_ONLY(nm->verify());
481 nm->fix_oop_relocations();
482 }
483 }
484 set_needs_cache_clean(false);
485 prune_scavenge_root_nmethods();
486 assert(!nmethod::oops_do_marking_is_active(), "oops_do_marking_prologue must be called");
487
488 #ifdef ASSERT
489 // make sure that we aren't leaking icholders
490 int count = 0;
491 FOR_ALL_BLOBS(cb) {
492 if (cb->is_nmethod()) {
493 RelocIterator iter((nmethod*)cb);
494 while(iter.next()) {
495 if (iter.type() == relocInfo::virtual_call_type) {
496 if (CompiledIC::is_icholder_call_site(iter.virtual_call_reloc())) {
497 CompiledIC *ic = CompiledIC_at(iter.reloc());
498 if (TraceCompiledIC) {
499 tty->print("noticed icholder " INTPTR_FORMAT " ", ic->cached_icholder());
500 ic->print();
501 }
502 assert(ic->cached_icholder() != NULL, "must be non-NULL");
503 count++;
504 }
505 }
506 }
507 }
508 }
509
510 assert(count + InlineCacheBuffer::pending_icholder_count() + CompiledICHolder::live_not_claimed_count() ==
511 CompiledICHolder::live_count(), "must agree");
512 #endif
513 }
514
515
516 void CodeCache::verify_oops() {
517 MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
518 VerifyOopClosure voc;
519 FOR_ALL_ALIVE_BLOBS(cb) {
520 if (cb->is_nmethod()) {
521 nmethod *nm = (nmethod*)cb;
522 nm->oops_do(&voc);
523 nm->verify_oop_relocations();
524 }
525 }
526 }
527
528
529 address CodeCache::first_address() {
530 assert_locked_or_safepoint(CodeCache_lock);
531 return (address)_heap->low_boundary();
532 }
533
534
535 address CodeCache::last_address() {
536 assert_locked_or_safepoint(CodeCache_lock);
537 return (address)_heap->high();
538 }
539
540 /**
541 * Returns the reverse free ratio. E.g., if 25% (1/4) of the code cache
542 * is free, reverse_free_ratio() returns 4.
543 */
544 double CodeCache::reverse_free_ratio() {
545 double unallocated_capacity = (double)(CodeCache::unallocated_capacity() - CodeCacheMinimumFreeSpace);
546 double max_capacity = (double)CodeCache::max_capacity();
547 return max_capacity / unallocated_capacity;
548 }
549
550 void icache_init();
551
552 void CodeCache::initialize() {
553 assert(CodeCacheSegmentSize >= (uintx)CodeEntryAlignment, "CodeCacheSegmentSize must be large enough to align entry points");
554 #ifdef COMPILER2
555 assert(CodeCacheSegmentSize >= (uintx)OptoLoopAlignment, "CodeCacheSegmentSize must be large enough to align inner loops");
556 #endif
557 assert(CodeCacheSegmentSize >= sizeof(jdouble), "CodeCacheSegmentSize must be large enough to align constants");
558 // This was originally just a check of the alignment, causing failure, instead, round
559 // the code cache to the page size. In particular, Solaris is moving to a larger
560 // default page size.
561 CodeCacheExpansionSize = round_to(CodeCacheExpansionSize, os::vm_page_size());
562 InitialCodeCacheSize = round_to(InitialCodeCacheSize, os::vm_page_size());
563 ReservedCodeCacheSize = round_to(ReservedCodeCacheSize, os::vm_page_size());
564 if (!_heap->reserve(ReservedCodeCacheSize, InitialCodeCacheSize, CodeCacheSegmentSize)) {
565 vm_exit_during_initialization("Could not reserve enough space for code cache");
566 }
567
568 MemoryService::add_code_heap_memory_pool(_heap);
569
570 // Initialize ICache flush mechanism
571 // This service is needed for os::register_code_area
572 icache_init();
573
574 // Give OS a chance to register generated code area.
575 // This is used on Windows 64 bit platforms to register
576 // Structured Exception Handlers for our generated code.
577 os::register_code_area(_heap->low_boundary(), _heap->high_boundary());
578 }
579
580
581 void codeCache_init() {
582 CodeCache::initialize();
583 }
584
585 //------------------------------------------------------------------------------------------------
586
587 int CodeCache::number_of_nmethods_with_dependencies() {
588 return _number_of_nmethods_with_dependencies;
589 }
590
591 void CodeCache::clear_inline_caches() {
592 assert_locked_or_safepoint(CodeCache_lock);
593 FOR_ALL_ALIVE_NMETHODS(nm) {
594 nm->clear_inline_caches();
595 }
596 }
597
598 #ifndef PRODUCT
599 // used to keep track of how much time is spent in mark_for_deoptimization
600 static elapsedTimer dependentCheckTime;
601 static int dependentCheckCount = 0;
602 #endif // PRODUCT
603
604
605 int CodeCache::mark_for_deoptimization(DepChange& changes) {
606 MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
607
608 #ifndef PRODUCT
609 dependentCheckTime.start();
610 dependentCheckCount++;
611 #endif // PRODUCT
612
613 int number_of_marked_CodeBlobs = 0;
614
615 // search the hierarchy looking for nmethods which are affected by the loading of this class
616
617 // then search the interfaces this class implements looking for nmethods
618 // which might be dependent of the fact that an interface only had one
619 // implementor.
620
621 { No_Safepoint_Verifier nsv;
622 for (DepChange::ContextStream str(changes, nsv); str.next(); ) {
623 Klass* d = str.klass();
624 number_of_marked_CodeBlobs += InstanceKlass::cast(d)->mark_dependent_nmethods(changes);
625 }
626 }
627
628 if (VerifyDependencies) {
629 // Turn off dependency tracing while actually testing deps.
630 NOT_PRODUCT( FlagSetting fs(TraceDependencies, false) );
631 FOR_ALL_ALIVE_NMETHODS(nm) {
632 if (!nm->is_marked_for_deoptimization() &&
633 nm->check_all_dependencies()) {
634 ResourceMark rm;
635 tty->print_cr("Should have been marked for deoptimization:");
636 changes.print();
637 nm->print();
638 nm->print_dependencies();
639 }
640 }
641 }
642
643 #ifndef PRODUCT
644 dependentCheckTime.stop();
645 #endif // PRODUCT
646
647 return number_of_marked_CodeBlobs;
648 }
649
650
651 #ifdef HOTSWAP
652 int CodeCache::mark_for_evol_deoptimization(instanceKlassHandle dependee) {
653 MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
654 int number_of_marked_CodeBlobs = 0;
655
656 // Deoptimize all methods of the evolving class itself
657 Array<Method*>* old_methods = dependee->methods();
658 for (int i = 0; i < old_methods->length(); i++) {
659 ResourceMark rm;
660 Method* old_method = old_methods->at(i);
661 nmethod *nm = old_method->code();
662 if (nm != NULL) {
663 nm->mark_for_deoptimization();
664 number_of_marked_CodeBlobs++;
665 }
666 }
667
668 FOR_ALL_ALIVE_NMETHODS(nm) {
669 if (nm->is_marked_for_deoptimization()) {
670 // ...Already marked in the previous pass; don't count it again.
671 } else if (nm->is_evol_dependent_on(dependee())) {
672 ResourceMark rm;
673 nm->mark_for_deoptimization();
674 number_of_marked_CodeBlobs++;
675 } else {
676 // flush caches in case they refer to a redefined Method*
677 nm->clear_inline_caches();
678 }
679 }
680
681 return number_of_marked_CodeBlobs;
682 }
683 #endif // HOTSWAP
684
685
686 // Deoptimize all methods
687 void CodeCache::mark_all_nmethods_for_deoptimization() {
688 MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
689 FOR_ALL_ALIVE_NMETHODS(nm) {
690 nm->mark_for_deoptimization();
691 }
692 }
693
694
695 int CodeCache::mark_for_deoptimization(Method* dependee) {
696 MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
697 int number_of_marked_CodeBlobs = 0;
698
699 FOR_ALL_ALIVE_NMETHODS(nm) {
700 if (nm->is_dependent_on_method(dependee)) {
701 ResourceMark rm;
702 nm->mark_for_deoptimization();
703 number_of_marked_CodeBlobs++;
704 }
705 }
706
707 return number_of_marked_CodeBlobs;
708 }
709
710 void CodeCache::make_marked_nmethods_zombies() {
711 assert(SafepointSynchronize::is_at_safepoint(), "must be at a safepoint");
712 FOR_ALL_ALIVE_NMETHODS(nm) {
713 if (nm->is_marked_for_deoptimization()) {
714
715 // If the nmethod has already been made non-entrant and it can be converted
716 // then zombie it now. Otherwise make it non-entrant and it will eventually
717 // be zombied when it is no longer seen on the stack. Note that the nmethod
718 // might be "entrant" and not on the stack and so could be zombied immediately
719 // but we can't tell because we don't track it on stack until it becomes
720 // non-entrant.
721
722 if (nm->is_not_entrant() && nm->can_not_entrant_be_converted()) {
723 nm->make_zombie();
724 } else {
725 nm->make_not_entrant();
726 }
727 }
728 }
729 }
730
731 void CodeCache::make_marked_nmethods_not_entrant() {
732 assert_locked_or_safepoint(CodeCache_lock);
733 FOR_ALL_ALIVE_NMETHODS(nm) {
734 if (nm->is_marked_for_deoptimization()) {
735 nm->make_not_entrant();
736 }
737 }
738 }
739
740 void CodeCache::verify() {
741 _heap->verify();
742 FOR_ALL_ALIVE_BLOBS(p) {
743 p->verify();
744 }
745 }
746
747 void CodeCache::report_codemem_full() {
748 _codemem_full_count++;
749 EventCodeCacheFull event;
750 if (event.should_commit()) {
751 event.set_startAddress((u8)low_bound());
752 event.set_commitedTopAddress((u8)high());
753 event.set_reservedTopAddress((u8)high_bound());
754 event.set_entryCount(nof_blobs());
755 event.set_methodCount(nof_nmethods());
756 event.set_adaptorCount(nof_adapters());
757 event.set_unallocatedCapacity(unallocated_capacity()/K);
758 event.set_fullCount(_codemem_full_count);
759 event.commit();
760 }
761 }
762
763 //------------------------------------------------------------------------------------------------
764 // Non-product version
765
766 #ifndef PRODUCT
767
768 void CodeCache::verify_if_often() {
769 if (VerifyCodeCacheOften) {
770 _heap->verify();
771 }
772 }
773
774 void CodeCache::print_trace(const char* event, CodeBlob* cb, int size) {
775 if (PrintCodeCache2) { // Need to add a new flag
776 ResourceMark rm;
777 if (size == 0) size = cb->size();
778 tty->print_cr("CodeCache %s: addr: " INTPTR_FORMAT ", size: 0x%x", event, cb, size);
779 }
780 }
781
782 void CodeCache::print_internals() {
783 int nmethodCount = 0;
784 int runtimeStubCount = 0;
785 int adapterCount = 0;
786 int deoptimizationStubCount = 0;
787 int uncommonTrapStubCount = 0;
788 int bufferBlobCount = 0;
789 int total = 0;
790 int nmethodAlive = 0;
791 int nmethodNotEntrant = 0;
792 int nmethodZombie = 0;
793 int nmethodUnloaded = 0;
794 int nmethodJava = 0;
795 int nmethodNative = 0;
796 int maxCodeSize = 0;
797 ResourceMark rm;
798
799 CodeBlob *cb;
800 for (cb = first(); cb != NULL; cb = next(cb)) {
801 total++;
802 if (cb->is_nmethod()) {
803 nmethod* nm = (nmethod*)cb;
804
805 if (Verbose && nm->method() != NULL) {
806 ResourceMark rm;
807 char *method_name = nm->method()->name_and_sig_as_C_string();
808 tty->print("%s", method_name);
809 if(nm->is_alive()) { tty->print_cr(" alive"); }
810 if(nm->is_not_entrant()) { tty->print_cr(" not-entrant"); }
811 if(nm->is_zombie()) { tty->print_cr(" zombie"); }
812 }
813
814 nmethodCount++;
815
816 if(nm->is_alive()) { nmethodAlive++; }
817 if(nm->is_not_entrant()) { nmethodNotEntrant++; }
818 if(nm->is_zombie()) { nmethodZombie++; }
819 if(nm->is_unloaded()) { nmethodUnloaded++; }
820 if(nm->is_native_method()) { nmethodNative++; }
821
822 if(nm->method() != NULL && nm->is_java_method()) {
823 nmethodJava++;
824 if (nm->insts_size() > maxCodeSize) {
825 maxCodeSize = nm->insts_size();
826 }
827 }
828 } else if (cb->is_runtime_stub()) {
829 runtimeStubCount++;
830 } else if (cb->is_deoptimization_stub()) {
831 deoptimizationStubCount++;
832 } else if (cb->is_uncommon_trap_stub()) {
833 uncommonTrapStubCount++;
834 } else if (cb->is_adapter_blob()) {
835 adapterCount++;
836 } else if (cb->is_buffer_blob()) {
837 bufferBlobCount++;
838 }
839 }
840
841 int bucketSize = 512;
842 int bucketLimit = maxCodeSize / bucketSize + 1;
843 int *buckets = NEW_C_HEAP_ARRAY(int, bucketLimit, mtCode);
844 memset(buckets,0,sizeof(int) * bucketLimit);
845
846 for (cb = first(); cb != NULL; cb = next(cb)) {
847 if (cb->is_nmethod()) {
848 nmethod* nm = (nmethod*)cb;
849 if(nm->is_java_method()) {
850 buckets[nm->insts_size() / bucketSize]++;
851 }
852 }
853 }
854 tty->print_cr("Code Cache Entries (total of %d)",total);
855 tty->print_cr("-------------------------------------------------");
856 tty->print_cr("nmethods: %d",nmethodCount);
857 tty->print_cr("\talive: %d",nmethodAlive);
858 tty->print_cr("\tnot_entrant: %d",nmethodNotEntrant);
859 tty->print_cr("\tzombie: %d",nmethodZombie);
860 tty->print_cr("\tunloaded: %d",nmethodUnloaded);
861 tty->print_cr("\tjava: %d",nmethodJava);
862 tty->print_cr("\tnative: %d",nmethodNative);
863 tty->print_cr("runtime_stubs: %d",runtimeStubCount);
864 tty->print_cr("adapters: %d",adapterCount);
865 tty->print_cr("buffer blobs: %d",bufferBlobCount);
866 tty->print_cr("deoptimization_stubs: %d",deoptimizationStubCount);
867 tty->print_cr("uncommon_traps: %d",uncommonTrapStubCount);
868 tty->print_cr("\nnmethod size distribution (non-zombie java)");
869 tty->print_cr("-------------------------------------------------");
870
871 for(int i=0; i<bucketLimit; i++) {
872 if(buckets[i] != 0) {
873 tty->print("%d - %d bytes",i*bucketSize,(i+1)*bucketSize);
874 tty->fill_to(40);
875 tty->print_cr("%d",buckets[i]);
876 }
877 }
878
879 FREE_C_HEAP_ARRAY(int, buckets, mtCode);
880 }
881
882 #endif // !PRODUCT
883
884 void CodeCache::print() {
885 print_summary(tty);
886
887 #ifndef PRODUCT
888 if (!Verbose) return;
889
890 CodeBlob_sizes live;
891 CodeBlob_sizes dead;
892
893 FOR_ALL_BLOBS(p) {
894 if (!p->is_alive()) {
895 dead.add(p);
896 } else {
897 live.add(p);
898 }
899 }
900
901 tty->print_cr("CodeCache:");
902
903 tty->print_cr("nmethod dependency checking time %f", dependentCheckTime.seconds(),
904 dependentCheckTime.seconds() / dependentCheckCount);
905
906 if (!live.is_empty()) {
907 live.print("live");
908 }
909 if (!dead.is_empty()) {
910 dead.print("dead");
911 }
912
913
914 if (WizardMode) {
915 // print the oop_map usage
916 int code_size = 0;
917 int number_of_blobs = 0;
918 int number_of_oop_maps = 0;
919 int map_size = 0;
920 FOR_ALL_BLOBS(p) {
921 if (p->is_alive()) {
922 number_of_blobs++;
923 code_size += p->code_size();
924 OopMapSet* set = p->oop_maps();
925 if (set != NULL) {
926 number_of_oop_maps += set->size();
927 map_size += set->heap_size();
928 }
929 }
930 }
931 tty->print_cr("OopMaps");
932 tty->print_cr(" #blobs = %d", number_of_blobs);
933 tty->print_cr(" code size = %d", code_size);
934 tty->print_cr(" #oop_maps = %d", number_of_oop_maps);
935 tty->print_cr(" map size = %d", map_size);
936 }
937
938 #endif // !PRODUCT
939 }
940
941 void CodeCache::print_summary(outputStream* st, bool detailed) {
942 size_t total = (_heap->high_boundary() - _heap->low_boundary());
943 st->print_cr("CodeCache: size=" SIZE_FORMAT "Kb used=" SIZE_FORMAT
944 "Kb max_used=" SIZE_FORMAT "Kb free=" SIZE_FORMAT "Kb",
945 total/K, (total - unallocated_capacity())/K,
946 maxCodeCacheUsed/K, unallocated_capacity()/K);
947
948 if (detailed) {
949 st->print_cr(" bounds [" INTPTR_FORMAT ", " INTPTR_FORMAT ", " INTPTR_FORMAT "]",
950 _heap->low_boundary(),
951 _heap->high(),
952 _heap->high_boundary());
953 st->print_cr(" total_blobs=" UINT32_FORMAT " nmethods=" UINT32_FORMAT
954 " adapters=" UINT32_FORMAT,
955 nof_blobs(), nof_nmethods(), nof_adapters());
956 st->print_cr(" compilation: %s", CompileBroker::should_compile_new_jobs() ?
957 "enabled" : Arguments::mode() == Arguments::_int ?
958 "disabled (interpreter mode)" :
959 "disabled (not enough contiguous free space left)");
960 }
961 }
962
963 void CodeCache::log_state(outputStream* st) {
964 st->print(" total_blobs='" UINT32_FORMAT "' nmethods='" UINT32_FORMAT "'"
965 " adapters='" UINT32_FORMAT "' free_code_cache='" SIZE_FORMAT "'",
966 nof_blobs(), nof_nmethods(), nof_adapters(),
967 unallocated_capacity());
968 }
969
|
31 #include "code/nmethod.hpp"
32 #include "code/pcDesc.hpp"
33 #include "compiler/compileBroker.hpp"
34 #include "gc_implementation/shared/markSweep.hpp"
35 #include "memory/allocation.inline.hpp"
36 #include "memory/gcLocker.hpp"
37 #include "memory/iterator.hpp"
38 #include "memory/resourceArea.hpp"
39 #include "oops/method.hpp"
40 #include "oops/objArrayOop.hpp"
41 #include "oops/oop.inline.hpp"
42 #include "runtime/handles.inline.hpp"
43 #include "runtime/arguments.hpp"
44 #include "runtime/icache.hpp"
45 #include "runtime/java.hpp"
46 #include "runtime/mutexLocker.hpp"
47 #include "services/memoryService.hpp"
48 #include "trace/tracing.hpp"
49 #include "utilities/xmlstream.hpp"
50
51
52 // Helper class for printing in CodeCache
53 class CodeBlob_sizes {
54 private:
55 int count;
56 int total_size;
57 int header_size;
58 int code_size;
59 int stub_size;
60 int relocation_size;
61 int scopes_oop_size;
62 int scopes_metadata_size;
63 int scopes_data_size;
64 int scopes_pcs_size;
65
66 public:
67 CodeBlob_sizes() {
68 count = 0;
69 total_size = 0;
70 header_size = 0;
71 code_size = 0;
72 stub_size = 0;
98 void add(CodeBlob* cb) {
99 count++;
100 total_size += cb->size();
101 header_size += cb->header_size();
102 relocation_size += cb->relocation_size();
103 if (cb->is_nmethod()) {
104 nmethod* nm = cb->as_nmethod_or_null();
105 code_size += nm->insts_size();
106 stub_size += nm->stub_size();
107
108 scopes_oop_size += nm->oops_size();
109 scopes_metadata_size += nm->metadata_size();
110 scopes_data_size += nm->scopes_data_size();
111 scopes_pcs_size += nm->scopes_pcs_size();
112 } else {
113 code_size += cb->code_size();
114 }
115 }
116 };
117
118 // Iterate over all CodeHeaps
119 #define FOR_ALL_HEAPS(it) for (GrowableArrayIterator<CodeHeap*> it = _heaps->begin(); it != _heaps->end(); ++it)
120 // Iterate over all CodeHeaps containing nmethods
121 #define FOR_ALL_METHOD_HEAPS(it) for (GrowableArrayFilterIterator<CodeHeap*, IsMethodPredicate> it(_heaps->begin(), IsMethodPredicate()); it != _heaps->end(); ++it)
122 // Iterate over all CodeBlobs (cb) on the given CodeHeap
123 #define FOR_ALL_BLOBS(cb, heap) for (CodeBlob* cb = first_blob(heap); cb != NULL; cb = next_blob(heap, cb))
124 // Iterate over all alive CodeBlobs (cb) on the given CodeHeap
125 #define FOR_ALL_ALIVE_BLOBS(cb, heap) for (CodeBlob* cb = first_alive_blob(heap); cb != NULL; cb = next_alive_blob(heap, cb))
126
127 address CodeCache::_low_bound = 0;
128 address CodeCache::_high_bound = 0;
129 int CodeCache::_number_of_blobs = 0;
130 int CodeCache::_number_of_adapters = 0;
131 int CodeCache::_number_of_nmethods = 0;
132 int CodeCache::_number_of_nmethods_with_dependencies = 0;
133 bool CodeCache::_needs_cache_clean = false;
134 nmethod* CodeCache::_scavenge_root_nmethods = NULL;
135 int CodeCache::_codemem_full_count = 0;
136
137 // Initialize array of CodeHeaps
138 GrowableArray<CodeHeap*>* CodeCache::_heaps = new(ResourceObj::C_HEAP, mtCode) GrowableArray<CodeHeap*> (3, true);
139
140 void CodeCache::initialize_heaps() {
141 // We do not need the profiled CodeHeap, use all space for the non-profiled CodeHeap
142 if(!heap_available(CodeBlobType::MethodProfile)) {
143 FLAG_SET_DEFAULT(NonProfiledCodeHeapSize, NonProfiledCodeHeapSize + ProfiledCodeHeapSize);
144 }
145
146 // Compute reserved sizes of CodeHeaps, we have
147 // ReservedCodeCacheSize = non_method_size + NonProfiledCodeHeapSize + ProfiledCodeHeapSize
148 // where by default NonProfiledCodeHeapSize is approximately ProfiledCodeHeapSize * 2
149 size_t no_profile_size = ReservedCodeSpace::allocation_align_size_up(NonProfiledCodeHeapSize);
150 size_t profile_size = ReservedCodeSpace::allocation_align_size_up(ProfiledCodeHeapSize);
151 size_t non_method_size = ReservedCodeSpace::allocation_align_size_up(ReservedCodeCacheSize - (no_profile_size + profile_size));
152
153 // Compute initial sizes of CodeHeaps
154 size_t init_non_method_size = MIN2(InitialCodeCacheSize, non_method_size);
155 size_t init_no_profile_size = MIN2(InitialCodeCacheSize, no_profile_size);
156 size_t init_profile_size = MIN2(InitialCodeCacheSize, profile_size);
157
158 // Reserve one continuous chunk of memory for CodeHeaps and split it into
159 // parts for the individual heaps. The memory layout looks like this:
160 // ---------- high -----------
161 // Non-methods
162 // Tier 2 and tier 3 methods
163 // Tier 1 and Tier 4 methods
164 // ---------- low ------------
165 ReservedCodeSpace rs = reserve_heap_memory(no_profile_size + profile_size + non_method_size);
166 ReservedSpace no_profile_space = rs.first_part(no_profile_size);
167 ReservedSpace rest = rs.last_part(no_profile_size);
168 ReservedSpace profile_space = rest.first_part(profile_size);
169 ReservedSpace non_method_space = rest.last_part(profile_size);
170
171 // Tier 1 and tier 4 methods (+ native)
172 add_heap(no_profile_space, "Tier 1 and tier 4 methods", init_no_profile_size, CodeBlobType::MethodNoProfile);
173 // Tier 2 and tier 3 methods
174 add_heap(profile_space, "Tier 2 and tier 3 methods", init_profile_size, CodeBlobType::MethodProfile);
175 // Non-methods
176 add_heap(non_method_space, "Non-methods", init_non_method_size, CodeBlobType::NonMethod);
177 }
178
179 ReservedCodeSpace CodeCache::reserve_heap_memory(size_t size) {
180 // Determine alignment
181 const size_t page_size = os::can_execute_large_page_memory() ?
182 os::page_size_for_region(InitialCodeCacheSize, size, 8) :
183 os::vm_page_size();
184 const size_t granularity = os::vm_allocation_granularity();
185 const size_t r_align = MAX2(page_size, granularity);
186 const size_t r_size = align_size_up(size, r_align);
187 const size_t rs_align = page_size == (size_t) os::vm_page_size() ? 0 :
188 MAX2(page_size, granularity);
189
190 ReservedCodeSpace rs(r_size, rs_align, rs_align > 0);
191
192 // Initialize bounds
193 _low_bound = (address)rs.base();
194 _high_bound = _low_bound + rs.size();
195 guarantee(low_bound() < high_bound(), "Bound check");
196
197 return rs;
198 }
199
200 bool CodeCache::heap_available(int code_blob_type) {
201 if (TieredCompilation || code_blob_type == CodeBlobType::NonMethod) {
202 // Use all heaps for TieredCompilation
203 return true;
204 } else {
205 // Without TieredCompilation we only need the non-profiled heap
206 return (code_blob_type == CodeBlobType::MethodNoProfile);
207 }
208 }
209
210 void CodeCache::add_heap(ReservedSpace rs, const char* name, size_t size_initial, int code_blob_type) {
211 // Check if heap is needed
212 if (!heap_available(code_blob_type)) {
213 return;
214 }
215
216 // Create CodeHeap
217 CodeHeap* heap = new CodeHeap(name, code_blob_type);
218 _heaps->append(heap);
219
220 // Reserve Space
221 size_initial = round_to(size_initial, os::vm_page_size());
222
223 if (!heap->reserve(rs, size_initial, CodeCacheSegmentSize)) {
224 vm_exit_during_initialization("Could not reserve enough space for code cache");
225 }
226
227 // Register the CodeHeap
228 MemoryService::add_code_heap_memory_pool(heap, name);
229 }
230
231 CodeHeap* CodeCache::get_code_heap(int code_blob_type) {
232 FOR_ALL_HEAPS(it) {
233 if ((*it)->accepts(code_blob_type)) {
234 return (*it);
235 }
236 }
237 return NULL;
238 }
239
240 CodeBlob* CodeCache::first_blob(CodeHeap* heap) {
241 assert_locked_or_safepoint(CodeCache_lock);
242 if (heap != NULL) {
243 return (CodeBlob*)heap->first();
244 }
245 return NULL;
246 }
247
248 CodeBlob* CodeCache::next_blob(CodeHeap* heap, CodeBlob* cb) {
249 assert_locked_or_safepoint(CodeCache_lock);
250 if (heap != NULL) {
251 return (CodeBlob*)heap->next(cb);
252 }
253 return NULL;
254 }
255
256 CodeBlob* CodeCache::first_alive_blob(CodeHeap* heap) {
257 assert_locked_or_safepoint(CodeCache_lock);
258 CodeBlob* cb = first_blob(heap);
259 while (cb != NULL && !cb->is_alive()) {
260 cb = next_blob(heap, cb);
261 }
262 return cb;
263 }
264
265 CodeBlob* CodeCache::next_alive_blob(CodeHeap* heap, CodeBlob* cb) {
266 assert_locked_or_safepoint(CodeCache_lock);
267 cb = next_blob(heap, cb);
268 while (cb != NULL && !cb->is_alive()) {
269 cb = next_blob(heap, cb);
270 }
271 return cb;
272 }
273
274 CodeBlob* CodeCache::allocate(int size, int code_blob_type, bool is_critical) {
275 // Do not seize the CodeCache lock here--if the caller has not
276 // already done so, we are going to lose bigtime, since the code
277 // cache will contain a garbage CodeBlob until the caller can
278 // run the constructor for the CodeBlob subclass he is busy
279 // instantiating.
280 guarantee(size >= 0, "allocation request must be reasonable");
281 assert_locked_or_safepoint(CodeCache_lock);
282 CodeBlob* cb = NULL;
283 _number_of_blobs++;
284
285 // Get CodeHeap for the given CodeBlobType
286 CodeHeap* heap = get_code_heap(code_blob_type);
287 assert (heap != NULL, "Heap exists");
288
289 while (true) {
290 cb = (CodeBlob*)heap->allocate(size, is_critical);
291 if (cb != NULL) break;
292 if (!heap->expand_by(CodeCacheExpansionSize)) {
293 // Expansion failed
294 return NULL;
295 }
296 if (PrintCodeCacheExtension) {
297 ResourceMark rm;
298 tty->print_cr("CodeHeap '%s' extended to [" INTPTR_FORMAT ", " INTPTR_FORMAT "] (%d bytes)",
299 heap->name(), (intptr_t)heap->low_boundary(), (intptr_t)heap->high(),
300 (address)heap->high() - (address)heap->low_boundary());
301 }
302 }
303
304 verify_if_often();
305 print_trace("allocation", cb, size);
306
307 return cb;
308 }
309
310 void CodeCache::free(CodeBlob* cb, int code_blob_type) {
311 assert_locked_or_safepoint(CodeCache_lock);
312 verify_if_often();
313
314 print_trace("free", cb);
315 if (cb->is_nmethod()) {
316 _number_of_nmethods--;
317 if (((nmethod *)cb)->has_dependencies()) {
318 _number_of_nmethods_with_dependencies--;
319 }
320 }
321 if (cb->is_adapter_blob()) {
322 _number_of_adapters--;
323 }
324 _number_of_blobs--;
325
326 // Get heap for given CodeBlobType and deallocate
327 get_code_heap(code_blob_type)->deallocate(cb);
328
329 verify_if_often();
330 assert(_number_of_blobs >= 0, "sanity check");
331 }
332
333 void CodeCache::commit(CodeBlob* cb) {
334 // this is called by nmethod::nmethod, which must already own CodeCache_lock
335 assert_locked_or_safepoint(CodeCache_lock);
336 if (cb->is_nmethod()) {
337 _number_of_nmethods++;
338 if (((nmethod *)cb)->has_dependencies()) {
339 _number_of_nmethods_with_dependencies++;
340 }
341 }
342 if (cb->is_adapter_blob()) {
343 _number_of_adapters++;
344 }
345
346 // flush the hardware I-cache
347 ICache::invalidate_range(cb->content_begin(), cb->content_size());
348 }
349
350 bool CodeCache::contains(void *p) {
351 // It should be ok to call contains without holding a lock
352 FOR_ALL_HEAPS(it) {
353 if ((*it)->contains(p)) {
354 return true;
355 }
356 }
357 return false;
358 }
359
360 // This method is safe to call without holding the CodeCache_lock, as long as a dead CodeBlob is not
361 // looked up (i.e., one that has been marked for deletion). It only depends on the _segmap to contain
362 // valid indices, which it will always do, as long as the CodeBlob is not in the process of being recycled.
363 CodeBlob* CodeCache::find_blob(void* start) {
364 CodeBlob* result = find_blob_unsafe(start);
365 // We could potentially look up non_entrant methods
366 guarantee(result == NULL || !result->is_zombie() || result->is_locked_by_vm() || is_error_reported(), "unsafe access to zombie method");
367 return result;
368 }
369
370 // Lookup that does not fail if you lookup a zombie method (if you call this, be sure to know
371 // what you are doing)
372 CodeBlob* CodeCache::find_blob_unsafe(void* start) {
373 // NMT can walk the stack before code cache is created
374 if (_heaps->first() == NULL) return NULL;
375
376 FOR_ALL_HEAPS(it) {
377 CodeBlob* result = (CodeBlob*) (*it)->find_start(start);
378 if (result != NULL && result->blob_contains((address)start)) {
379 return result;
380 }
381 }
382 return NULL;
383 }
384
385 nmethod* CodeCache::find_nmethod(void* start) {
386 CodeBlob* cb = find_blob(start);
387 assert(cb->is_nmethod(), "did not find an nmethod");
388 return (nmethod*)cb;
389 }
390
391 bool CodeCache::contains_nmethod(nmethod* nm) {
392 FOR_ALL_METHOD_HEAPS(it) {
393 if ((*it)->contains(nm)) {
394 return true;
395 }
396 }
397 return false;
398 }
399
400 void CodeCache::blobs_do(void f(CodeBlob* nm)) {
401 assert_locked_or_safepoint(CodeCache_lock);
402 FOR_ALL_HEAPS(it) {
403 FOR_ALL_BLOBS(cb, *it) {
404 f(cb);
405 }
406 }
407 }
408
409 void CodeCache::nmethods_do(void f(nmethod* nm)) {
410 assert_locked_or_safepoint(CodeCache_lock);
411 FOR_ALL_METHOD_HEAPS(it) {
412 FOR_ALL_BLOBS(cb, *it) {
413 f((nmethod*)cb);
414 }
415 }
416 }
417
418 void CodeCache::alive_nmethods_do(void f(nmethod* nm)) {
419 assert_locked_or_safepoint(CodeCache_lock);
420 FOR_ALL_METHOD_HEAPS(it) {
421 FOR_ALL_ALIVE_BLOBS(cb, *it) {
422 f((nmethod*)cb);
423 }
424 }
425 }
426
427 int CodeCache::alignment_unit() {
428 return (int)_heaps->first()->alignment_unit();
429 }
430
431 int CodeCache::alignment_offset() {
432 return (int)_heaps->first()->alignment_offset();
433 }
434
435 // Mark nmethods for unloading if they contain otherwise unreachable oops.
436 void CodeCache::do_unloading(BoolObjectClosure* is_alive, bool unloading_occurred) {
437 assert_locked_or_safepoint(CodeCache_lock);
438 FOR_ALL_METHOD_HEAPS(it) {
439 FOR_ALL_ALIVE_BLOBS(cb, *it) {
440 nmethod* nm = (nmethod*)cb;
441 nm->do_unloading(is_alive, unloading_occurred);
442 }
443 }
444 }
445
446 void CodeCache::blobs_do(CodeBlobClosure* f) {
447 assert_locked_or_safepoint(CodeCache_lock);
448 FOR_ALL_HEAPS(it) {
449 FOR_ALL_BLOBS(cb, *it) {
450 if (cb->is_alive()) {
451 f->do_code_blob(cb);
452
453 #ifdef ASSERT
454 if (cb->is_nmethod())
455 ((nmethod*)cb)->verify_scavenge_root_oops();
456 #endif //ASSERT
457 }
458 }
459 }
460 }
461
462 // Walk the list of methods which might contain non-perm oops.
463 void CodeCache::scavenge_root_nmethods_do(CodeBlobClosure* f) {
464 assert_locked_or_safepoint(CodeCache_lock);
465 debug_only(mark_scavenge_root_nmethods());
466
467 for (nmethod* cur = scavenge_root_nmethods(); cur != NULL; cur = cur->scavenge_root_link()) {
468 debug_only(cur->clear_scavenge_root_marked());
469 assert(cur->scavenge_root_not_marked(), "");
470 assert(cur->on_scavenge_root_list(), "else shouldn't be on this list");
471
472 bool is_live = (!cur->is_zombie() && !cur->is_unloaded());
473 #ifndef PRODUCT
474 if (TraceScavenge) {
475 cur->print_on(tty, is_live ? "scavenge root" : "dead scavenge root"); tty->cr();
476 }
477 #endif //PRODUCT
478 if (is_live) {
479 // Perform cur->oops_do(f), maybe just once per nmethod.
542 cur = next;
543 }
544
545 // Check for stray marks.
546 debug_only(verify_perm_nmethods(NULL));
547 }
548
549 #ifndef PRODUCT
550 void CodeCache::asserted_non_scavengable_nmethods_do(CodeBlobClosure* f) {
551 // While we are here, verify the integrity of the list.
552 mark_scavenge_root_nmethods();
553 for (nmethod* cur = scavenge_root_nmethods(); cur != NULL; cur = cur->scavenge_root_link()) {
554 assert(cur->on_scavenge_root_list(), "else shouldn't be on this list");
555 cur->clear_scavenge_root_marked();
556 }
557 verify_perm_nmethods(f);
558 }
559
560 // Temporarily mark nmethods that are claimed to be on the non-perm list.
561 void CodeCache::mark_scavenge_root_nmethods() {
562 FOR_ALL_METHOD_HEAPS(it) {
563 FOR_ALL_ALIVE_BLOBS(cb, *it) {
564 nmethod* nm = (nmethod*)cb;
565 assert(nm->scavenge_root_not_marked(), "clean state");
566 if (nm->on_scavenge_root_list())
567 nm->set_scavenge_root_marked();
568 }
569 }
570 }
571
572 // If the closure is given, run it on the unlisted nmethods.
573 // Also make sure that the effects of mark_scavenge_root_nmethods is gone.
574 void CodeCache::verify_perm_nmethods(CodeBlobClosure* f_or_null) {
575 FOR_ALL_METHOD_HEAPS(it) {
576 FOR_ALL_ALIVE_BLOBS(cb, *it) {
577 nmethod* nm = (nmethod*)cb;
578 bool call_f = (f_or_null != NULL);
579 assert(nm->scavenge_root_not_marked(), "must be already processed");
580 if (nm->on_scavenge_root_list())
581 call_f = false; // don't show this one to the client
582 nm->verify_scavenge_root_oops();
583 if (call_f) f_or_null->do_code_blob(nm);
584 }
585 }
586 }
587 #endif //PRODUCT
588
589 void CodeCache::gc_prologue() {
590 assert(!nmethod::oops_do_marking_is_active(), "oops_do_marking_epilogue must be called");
591 }
592
593 void CodeCache::gc_epilogue() {
594 assert_locked_or_safepoint(CodeCache_lock);
595 FOR_ALL_METHOD_HEAPS(it) {
596 FOR_ALL_ALIVE_BLOBS(cb, *it) {
597 nmethod* nm = (nmethod*)cb;
598 assert(!nm->is_unloaded(), "Tautology");
599 if (needs_cache_clean()) {
600 nm->cleanup_inline_caches();
601 }
602 DEBUG_ONLY(nm->verify());
603 nm->fix_oop_relocations();
604 }
605 }
606 set_needs_cache_clean(false);
607 prune_scavenge_root_nmethods();
608 assert(!nmethod::oops_do_marking_is_active(), "oops_do_marking_prologue must be called");
609
610 #ifdef ASSERT
611 // make sure that we aren't leaking icholders
612 int count = 0;
613 FOR_ALL_METHOD_HEAPS(it) {
614 FOR_ALL_BLOBS(cb, *it) {
615 RelocIterator iter((nmethod*)cb);
616 while(iter.next()) {
617 if (iter.type() == relocInfo::virtual_call_type) {
618 if (CompiledIC::is_icholder_call_site(iter.virtual_call_reloc())) {
619 CompiledIC *ic = CompiledIC_at(iter.reloc());
620 if (TraceCompiledIC) {
621 tty->print("noticed icholder " INTPTR_FORMAT " ", ic->cached_icholder());
622 ic->print();
623 }
624 assert(ic->cached_icholder() != NULL, "must be non-NULL");
625 count++;
626 }
627 }
628 }
629 }
630 }
631
632 assert(count + InlineCacheBuffer::pending_icholder_count() + CompiledICHolder::live_not_claimed_count() ==
633 CompiledICHolder::live_count(), "must agree");
634 #endif
635 }
636
637 void CodeCache::verify_oops() {
638 MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
639 VerifyOopClosure voc;
640 FOR_ALL_METHOD_HEAPS(it) {
641 FOR_ALL_ALIVE_BLOBS(cb, *it) {
642 nmethod* nm = (nmethod*)cb;
643 nm->oops_do(&voc);
644 nm->verify_oop_relocations();
645 }
646 }
647 }
648
649 size_t CodeCache::capacity() {
650 size_t cap = 0;
651 FOR_ALL_HEAPS(it) {
652 cap += (*it)->capacity();
653 }
654 return cap;
655 }
656
657 size_t CodeCache::unallocated_capacity() {
658 size_t unallocated_cap = 0;
659 FOR_ALL_HEAPS(it) {
660 unallocated_cap += (*it)->unallocated_capacity();
661 }
662 return unallocated_cap;
663 }
664
665 size_t CodeCache::max_capacity() {
666 size_t max_cap = 0;
667 FOR_ALL_HEAPS(it) {
668 max_cap += (*it)->max_capacity();
669 }
670 return max_cap;
671 }
672
673 /**
674 * Returns the reverse free ratio. E.g., if 25% (1/4) of the code cache
675 * is free, reverse_free_ratio() returns 4.
676 */
677 double CodeCache::reverse_free_ratio(int code_blob_type) {
678 CodeHeap* heap = get_code_heap(code_blob_type);
679 if (heap == NULL) {
680 return 0;
681 }
682 double unallocated_capacity = (double)(heap->unallocated_capacity() - CodeCacheMinimumFreeSpace);
683 double max_capacity = (double)heap->max_capacity();
684 return max_capacity / unallocated_capacity;
685 }
686
687 void icache_init();
688
689 void CodeCache::initialize() {
690 assert(CodeCacheSegmentSize >= (uintx)CodeEntryAlignment, "CodeCacheSegmentSize must be large enough to align entry points");
691 #ifdef COMPILER2
692 assert(CodeCacheSegmentSize >= (uintx)OptoLoopAlignment, "CodeCacheSegmentSize must be large enough to align inner loops");
693 #endif
694 assert(CodeCacheSegmentSize >= sizeof(jdouble), "CodeCacheSegmentSize must be large enough to align constants");
695 // This was originally just a check of the alignment, causing failure, instead, round
696 // the code cache to the page size. In particular, Solaris is moving to a larger
697 // default page size.
698 CodeCacheExpansionSize = round_to(CodeCacheExpansionSize, os::vm_page_size());
699
700 // Reserve space and create heaps
701 initialize_heaps();
702
703 // Initialize ICache flush mechanism
704 // This service is needed for os::register_code_area
705 icache_init();
706
707 // Give OS a chance to register generated code area.
708 // This is used on Windows 64 bit platforms to register
709 // Structured Exception Handlers for our generated code.
710 os::register_code_area((char*)low_bound(), (char*)high_bound());
711 }
712
713 void codeCache_init() {
714 CodeCache::initialize();
715 }
716
717 //------------------------------------------------------------------------------------------------
718
719 int CodeCache::number_of_nmethods_with_dependencies() {
720 return _number_of_nmethods_with_dependencies;
721 }
722
723 #ifndef PRODUCT
724 // used to keep track of how much time is spent in mark_for_deoptimization
725 static elapsedTimer dependentCheckTime;
726 static int dependentCheckCount = 0;
727 #endif // PRODUCT
728
729
730 int CodeCache::mark_for_deoptimization(DepChange& changes) {
731 MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
732
733 #ifndef PRODUCT
734 dependentCheckTime.start();
735 dependentCheckCount++;
736 #endif // PRODUCT
737
738 int number_of_marked_CodeBlobs = 0;
739
740 // search the hierarchy looking for nmethods which are affected by the loading of this class
741
742 // then search the interfaces this class implements looking for nmethods
743 // which might be dependent of the fact that an interface only had one
744 // implementor.
745
746 { No_Safepoint_Verifier nsv;
747 for (DepChange::ContextStream str(changes, nsv); str.next(); ) {
748 Klass* d = str.klass();
749 number_of_marked_CodeBlobs += InstanceKlass::cast(d)->mark_dependent_nmethods(changes);
750 }
751 }
752
753 if (VerifyDependencies) {
754 // Turn off dependency tracing while actually testing deps.
755 NOT_PRODUCT( FlagSetting fs(TraceDependencies, false) );
756 FOR_ALL_METHOD_HEAPS(it) {
757 FOR_ALL_ALIVE_BLOBS(cb, *it) {
758 nmethod* nm = (nmethod*)cb;
759 if (!nm->is_marked_for_deoptimization() &&
760 nm->check_all_dependencies()) {
761 ResourceMark rm;
762 tty->print_cr("Should have been marked for deoptimization:");
763 changes.print();
764 nm->print();
765 nm->print_dependencies();
766 }
767 }
768 }
769 }
770
771 #ifndef PRODUCT
772 dependentCheckTime.stop();
773 #endif // PRODUCT
774
775 return number_of_marked_CodeBlobs;
776 }
777
778
779 #ifdef HOTSWAP
780 int CodeCache::mark_for_evol_deoptimization(instanceKlassHandle dependee) {
781 MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
782 int number_of_marked_CodeBlobs = 0;
783
784 // Deoptimize all methods of the evolving class itself
785 Array<Method*>* old_methods = dependee->methods();
786 for (int i = 0; i < old_methods->length(); i++) {
787 ResourceMark rm;
788 Method* old_method = old_methods->at(i);
789 nmethod *nm = old_method->code();
790 if (nm != NULL) {
791 nm->mark_for_deoptimization();
792 number_of_marked_CodeBlobs++;
793 }
794 }
795
796 FOR_ALL_METHOD_HEAPS(it) {
797 FOR_ALL_ALIVE_BLOBS(cb, *it) {
798 nmethod* nm = (nmethod*)cb;
799 if (nm->is_marked_for_deoptimization()) {
800 // ...Already marked in the previous pass; don't count it again.
801 } else if (nm->is_evol_dependent_on(dependee())) {
802 ResourceMark rm;
803 nm->mark_for_deoptimization();
804 number_of_marked_CodeBlobs++;
805 } else {
806 // flush caches in case they refer to a redefined Method*
807 nm->clear_inline_caches();
808 }
809 }
810 }
811
812 return number_of_marked_CodeBlobs;
813 }
814 #endif // HOTSWAP
815
816
817 // Deoptimize all methods
818 void CodeCache::mark_all_nmethods_for_deoptimization() {
819 MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
820 FOR_ALL_METHOD_HEAPS(it) {
821 FOR_ALL_ALIVE_BLOBS(cb, *it) {
822 nmethod* nm = (nmethod*)cb;
823 nm->mark_for_deoptimization();
824 }
825 }
826 }
827
828 int CodeCache::mark_for_deoptimization(Method* dependee) {
829 MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
830 int number_of_marked_CodeBlobs = 0;
831
832 FOR_ALL_METHOD_HEAPS(it) {
833 FOR_ALL_ALIVE_BLOBS(cb, *it) {
834 nmethod* nm = (nmethod*)cb;
835 if (nm->is_dependent_on_method(dependee)) {
836 ResourceMark rm;
837 nm->mark_for_deoptimization();
838 number_of_marked_CodeBlobs++;
839 }
840 }
841 }
842
843 return number_of_marked_CodeBlobs;
844 }
845
846 void CodeCache::make_marked_nmethods_zombies() {
847 assert(SafepointSynchronize::is_at_safepoint(), "must be at a safepoint");
848 FOR_ALL_METHOD_HEAPS(it) {
849 FOR_ALL_ALIVE_BLOBS(cb, *it) {
850 nmethod* nm = (nmethod*)cb;
851 if (nm->is_marked_for_deoptimization()) {
852
853 // If the nmethod has already been made non-entrant and it can be converted
854 // then zombie it now. Otherwise make it non-entrant and it will eventually
855 // be zombied when it is no longer seen on the stack. Note that the nmethod
856 // might be "entrant" and not on the stack and so could be zombied immediately
857 // but we can't tell because we don't track it on stack until it becomes
858 // non-entrant.
859
860 if (nm->is_not_entrant() && nm->can_not_entrant_be_converted()) {
861 nm->make_zombie();
862 } else {
863 nm->make_not_entrant();
864 }
865 }
866 }
867 }
868 }
869
870 void CodeCache::make_marked_nmethods_not_entrant() {
871 assert_locked_or_safepoint(CodeCache_lock);
872 FOR_ALL_METHOD_HEAPS(it) {
873 FOR_ALL_ALIVE_BLOBS(cb, *it) {
874 nmethod* nm = (nmethod*)cb;
875 if (nm->is_marked_for_deoptimization()) {
876 nm->make_not_entrant();
877 }
878 }
879 }
880 }
881
882 void CodeCache::verify() {
883 assert_locked_or_safepoint(CodeCache_lock);
884 FOR_ALL_HEAPS(it) {
885 CodeHeap* heap = *it;
886 heap->verify();
887 FOR_ALL_BLOBS(cb, heap) {
888 if (cb->is_alive()) {
889 cb->verify();
890 }
891 }
892 }
893 }
894
895 // A CodeHeap is full. Print out warning and report event.
896 void CodeCache::report_codemem_full(int code_blob_type) {
897 // Get nmethod heap for the given CodeBlobType and build CodeCacheFull event
898 CodeHeap* heap = get_code_heap(code_blob_type);
899
900 if (!heap->was_full()) {
901 // Not yet reported for this heap, report
902 heap->report_full();
903 warning("CodeHeap for %s is full. Compiler has been disabled.", CodeCache::get_heap_name(code_blob_type));
904 warning("Try increasing the code heap size using -XX:%s=",
905 (code_blob_type == CodeBlobType::MethodNoProfile) ? "NonProfiledCodeHeapSize" : "ProfiledCodeHeapSize");
906
907 ResourceMark rm;
908 stringStream s;
909 // Dump CodeCache summary into a buffer before locking the tty
910 {
911 MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
912 print_summary(&s, true);
913 }
914 ttyLocker ttyl;
915 tty->print(s.as_string());
916 }
917
918 _codemem_full_count++;
919 EventCodeCacheFull event;
920 if (event.should_commit()) {
921 event.set_codeBlobType(code_blob_type);
922 event.set_startAddress((u8)heap->low_boundary());
923 event.set_commitedTopAddress((u8)heap->high());
924 event.set_reservedTopAddress((u8)heap->high_boundary());
925 event.set_entryCount(nof_blobs());
926 event.set_methodCount(nof_nmethods());
927 event.set_adaptorCount(nof_adapters());
928 event.set_unallocatedCapacity(heap->unallocated_capacity()/K);
929 event.set_fullCount(_codemem_full_count);
930 event.commit();
931 }
932 }
933
934 //------------------------------------------------------------------------------------------------
935 // Non-product version
936
937 #ifndef PRODUCT
938
939 void CodeCache::verify_if_often() {
940 if (VerifyCodeCacheOften) {
941 FOR_ALL_HEAPS(it) {
942 (*it)->verify();
943 }
944 }
945 }
946
947 void CodeCache::print_trace(const char* event, CodeBlob* cb, int size) {
948 if (PrintCodeCache2) { // Need to add a new flag
949 ResourceMark rm;
950 if (size == 0) size = cb->size();
951 tty->print_cr("CodeCache %s: addr: " INTPTR_FORMAT ", size: 0x%x", event, cb, size);
952 }
953 }
954
955 void CodeCache::print_internals() {
956 int nmethodCount = 0;
957 int runtimeStubCount = 0;
958 int adapterCount = 0;
959 int deoptimizationStubCount = 0;
960 int uncommonTrapStubCount = 0;
961 int bufferBlobCount = 0;
962 int total = 0;
963 int nmethodAlive = 0;
964 int nmethodNotEntrant = 0;
965 int nmethodZombie = 0;
966 int nmethodUnloaded = 0;
967 int nmethodJava = 0;
968 int nmethodNative = 0;
969 int maxCodeSize = 0;
970 ResourceMark rm;
971
972 int i = 0;
973 FOR_ALL_HEAPS(it) {
974 if (Verbose) {
975 tty->print_cr("## Heap '%s' ##", (*it)->name());
976 }
977 FOR_ALL_BLOBS(cb, *it) {
978 total++;
979 if (cb->is_nmethod()) {
980 nmethod* nm = (nmethod*)cb;
981
982 if (Verbose && nm->method() != NULL) {
983 ResourceMark rm;
984 char *method_name = nm->method()->name_and_sig_as_C_string();
985 tty->print("%s %d", method_name, nm->comp_level());
986 if(nm->is_alive()) { tty->print_cr(" alive"); }
987 if(nm->is_not_entrant()) { tty->print_cr(" not-entrant"); }
988 if(nm->is_zombie()) { tty->print_cr(" zombie"); }
989 }
990
991 nmethodCount++;
992
993 if(nm->is_alive()) { nmethodAlive++; }
994 if(nm->is_not_entrant()) { nmethodNotEntrant++; }
995 if(nm->is_zombie()) { nmethodZombie++; }
996 if(nm->is_unloaded()) { nmethodUnloaded++; }
997 if(nm->method() != NULL && nm->is_native_method()) { nmethodNative++; }
998
999 if(nm->method() != NULL && nm->is_java_method()) {
1000 nmethodJava++;
1001 if (nm->insts_size() > maxCodeSize) {
1002 maxCodeSize = nm->insts_size();
1003 }
1004 }
1005 } else if (cb->is_runtime_stub()) {
1006 runtimeStubCount++;
1007 } else if (cb->is_deoptimization_stub()) {
1008 deoptimizationStubCount++;
1009 } else if (cb->is_uncommon_trap_stub()) {
1010 uncommonTrapStubCount++;
1011 } else if (cb->is_adapter_blob()) {
1012 adapterCount++;
1013 } else if (cb->is_buffer_blob()) {
1014 bufferBlobCount++;
1015 }
1016 }
1017 }
1018
1019 int bucketSize = 512;
1020 int bucketLimit = maxCodeSize / bucketSize + 1;
1021 int *buckets = NEW_C_HEAP_ARRAY(int, bucketLimit, mtCode);
1022 memset(buckets,0,sizeof(int) * bucketLimit);
1023
1024 FOR_ALL_METHOD_HEAPS(it) {
1025 FOR_ALL_BLOBS(cb, *it) {
1026 nmethod* nm = (nmethod*)cb;
1027 if(nm->method() != NULL && nm->is_java_method()) {
1028 buckets[nm->insts_size() / bucketSize]++;
1029 }
1030 }
1031 }
1032 tty->print_cr("Code Cache Entries (total of %d)",total);
1033 tty->print_cr("-------------------------------------------------");
1034 tty->print_cr("nmethods: %d",nmethodCount);
1035 tty->print_cr("\talive: %d",nmethodAlive);
1036 tty->print_cr("\tnot_entrant: %d",nmethodNotEntrant);
1037 tty->print_cr("\tzombie: %d",nmethodZombie);
1038 tty->print_cr("\tunloaded: %d",nmethodUnloaded);
1039 tty->print_cr("\tjava: %d",nmethodJava);
1040 tty->print_cr("\tnative: %d",nmethodNative);
1041 tty->print_cr("runtime_stubs: %d",runtimeStubCount);
1042 tty->print_cr("adapters: %d",adapterCount);
1043 tty->print_cr("buffer blobs: %d",bufferBlobCount);
1044 tty->print_cr("deoptimization_stubs: %d",deoptimizationStubCount);
1045 tty->print_cr("uncommon_traps: %d",uncommonTrapStubCount);
1046 tty->print_cr("\nnmethod size distribution (non-zombie java)");
1047 tty->print_cr("-------------------------------------------------");
1048
1049 for(int i = 0; i < bucketLimit; ++i) {
1050 if(buckets[i] != 0) {
1051 tty->print("%d - %d bytes",i*bucketSize,(i+1)*bucketSize);
1052 tty->fill_to(40);
1053 tty->print_cr("%d",buckets[i]);
1054 }
1055 }
1056
1057 FREE_C_HEAP_ARRAY(int, buckets, mtCode);
1058 }
1059
1060 #endif // !PRODUCT
1061
1062 void CodeCache::print() {
1063 print_summary(tty);
1064
1065 #ifndef PRODUCT
1066 if (!Verbose) return;
1067
1068 CodeBlob_sizes live;
1069 CodeBlob_sizes dead;
1070
1071 FOR_ALL_HEAPS(it) {
1072 FOR_ALL_BLOBS(cb, *it) {
1073 if (!cb->is_alive()) {
1074 dead.add(cb);
1075 } else {
1076 live.add(cb);
1077 }
1078 }
1079 }
1080
1081 tty->print_cr("CodeCache:");
1082
1083 tty->print_cr("nmethod dependency checking time %f", dependentCheckTime.seconds(),
1084 dependentCheckTime.seconds() / dependentCheckCount);
1085
1086 if (!live.is_empty()) {
1087 live.print("live");
1088 }
1089 if (!dead.is_empty()) {
1090 dead.print("dead");
1091 }
1092
1093 if (WizardMode) {
1094 // print the oop_map usage
1095 int code_size = 0;
1096 int number_of_blobs = 0;
1097 int number_of_oop_maps = 0;
1098 int map_size = 0;
1099 FOR_ALL_HEAPS(it) {
1100 FOR_ALL_BLOBS(cb, *it) {
1101 if (cb->is_alive()) {
1102 number_of_blobs++;
1103 code_size += cb->code_size();
1104 OopMapSet* set = cb->oop_maps();
1105 if (set != NULL) {
1106 number_of_oop_maps += set->size();
1107 map_size += set->heap_size();
1108 }
1109 }
1110 }
1111 }
1112 tty->print_cr("OopMaps");
1113 tty->print_cr(" #blobs = %d", number_of_blobs);
1114 tty->print_cr(" code size = %d", code_size);
1115 tty->print_cr(" #oop_maps = %d", number_of_oop_maps);
1116 tty->print_cr(" map size = %d", map_size);
1117 }
1118
1119 #endif // !PRODUCT
1120 }
1121
1122 void CodeCache::print_summary(outputStream* st, bool detailed) {
1123 st->print_cr("CodeCache Summary:");
1124 FOR_ALL_HEAPS(it) {
1125 CodeHeap* heap = (*it);
1126 size_t total = (heap->high_boundary() - heap->low_boundary());
1127 st->print_cr("Heap '%s': size=" SIZE_FORMAT "Kb used=" SIZE_FORMAT
1128 "Kb max_used=" SIZE_FORMAT "Kb free=" SIZE_FORMAT "Kb",
1129 heap->name(), total/K, (total - heap->unallocated_capacity())/K,
1130 heap->max_allocated_capacity()/K, heap->unallocated_capacity()/K);
1131
1132 if (detailed) {
1133 st->print_cr(" bounds [" INTPTR_FORMAT ", " INTPTR_FORMAT ", " INTPTR_FORMAT "]",
1134 heap->low_boundary(),
1135 heap->high(),
1136 heap->high_boundary());
1137
1138 }
1139 }
1140
1141 if (detailed) {
1142 log_state(st);
1143 st->print_cr(" compilation: %s", CompileBroker::should_compile_new_jobs() ?
1144 "enabled" : Arguments::mode() == Arguments::_int ?
1145 "disabled (interpreter mode)" :
1146 "disabled (not enough contiguous free space left)");
1147 }
1148 }
1149
1150 void CodeCache::log_state(outputStream* st) {
1151 st->print(" total_blobs='" UINT32_FORMAT "' nmethods='" UINT32_FORMAT "'"
1152 " adapters='" UINT32_FORMAT "'",
1153 nof_blobs(), nof_nmethods(), nof_adapters());
1154 }
1155
|