rev 8245 : 8078628: linux-zero does not build without precompiled header
Summary: add missing includes
Reviewed-by: dholmes, stefank
1 /*
2 * Copyright (c) 1997, 2015, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "precompiled.hpp"
26 #include "code/codeBlob.hpp"
27 #include "code/codeCache.hpp"
28 #include "code/compiledIC.hpp"
29 #include "code/dependencies.hpp"
30 #include "code/icBuffer.hpp"
31 #include "code/nmethod.hpp"
32 #include "code/pcDesc.hpp"
33 #include "compiler/compileBroker.hpp"
34 #include "memory/allocation.inline.hpp"
35 #include "memory/gcLocker.hpp"
36 #include "memory/iterator.hpp"
37 #include "memory/resourceArea.hpp"
38 #include "oops/method.hpp"
39 #include "oops/objArrayOop.hpp"
40 #include "oops/oop.inline.hpp"
41 #include "oops/verifyOopClosure.hpp"
42 #include "runtime/handles.inline.hpp"
43 #include "runtime/arguments.hpp"
44 #include "runtime/deoptimization.hpp"
45 #include "runtime/icache.hpp"
46 #include "runtime/java.hpp"
47 #include "runtime/mutexLocker.hpp"
48 #include "runtime/sweeper.hpp"
49 #include "runtime/compilationPolicy.hpp"
50 #include "services/memoryService.hpp"
51 #include "trace/tracing.hpp"
52 #include "utilities/xmlstream.hpp"
53 #ifdef COMPILER1
54 #include "c1/c1_Compilation.hpp"
55 #include "c1/c1_Compiler.hpp"
56 #endif
57 #ifdef COMPILER2
58 #include "opto/c2compiler.hpp"
59 #include "opto/compile.hpp"
60 #include "opto/node.hpp"
61 #endif
62
63 // Helper class for printing in CodeCache
64 class CodeBlob_sizes {
65 private:
66 int count;
67 int total_size;
68 int header_size;
69 int code_size;
70 int stub_size;
71 int relocation_size;
72 int scopes_oop_size;
73 int scopes_metadata_size;
74 int scopes_data_size;
75 int scopes_pcs_size;
76
77 public:
78 CodeBlob_sizes() {
79 count = 0;
80 total_size = 0;
81 header_size = 0;
82 code_size = 0;
83 stub_size = 0;
84 relocation_size = 0;
85 scopes_oop_size = 0;
86 scopes_metadata_size = 0;
87 scopes_data_size = 0;
88 scopes_pcs_size = 0;
89 }
90
91 int total() { return total_size; }
92 bool is_empty() { return count == 0; }
93
94 void print(const char* title) {
95 tty->print_cr(" #%d %s = %dK (hdr %d%%, loc %d%%, code %d%%, stub %d%%, [oops %d%%, metadata %d%%, data %d%%, pcs %d%%])",
96 count,
97 title,
98 (int)(total() / K),
99 header_size * 100 / total_size,
100 relocation_size * 100 / total_size,
101 code_size * 100 / total_size,
102 stub_size * 100 / total_size,
103 scopes_oop_size * 100 / total_size,
104 scopes_metadata_size * 100 / total_size,
105 scopes_data_size * 100 / total_size,
106 scopes_pcs_size * 100 / total_size);
107 }
108
109 void add(CodeBlob* cb) {
110 count++;
111 total_size += cb->size();
112 header_size += cb->header_size();
113 relocation_size += cb->relocation_size();
114 if (cb->is_nmethod()) {
115 nmethod* nm = cb->as_nmethod_or_null();
116 code_size += nm->insts_size();
117 stub_size += nm->stub_size();
118
119 scopes_oop_size += nm->oops_size();
120 scopes_metadata_size += nm->metadata_size();
121 scopes_data_size += nm->scopes_data_size();
122 scopes_pcs_size += nm->scopes_pcs_size();
123 } else {
124 code_size += cb->code_size();
125 }
126 }
127 };
128
129 // Iterate over all CodeHeaps
130 #define FOR_ALL_HEAPS(heap) for (GrowableArrayIterator<CodeHeap*> heap = _heaps->begin(); heap != _heaps->end(); ++heap)
131 // Iterate over all CodeBlobs (cb) on the given CodeHeap
132 #define FOR_ALL_BLOBS(cb, heap) for (CodeBlob* cb = first_blob(heap); cb != NULL; cb = next_blob(heap, cb))
133
134 address CodeCache::_low_bound = 0;
135 address CodeCache::_high_bound = 0;
136 int CodeCache::_number_of_blobs = 0;
137 int CodeCache::_number_of_adapters = 0;
138 int CodeCache::_number_of_nmethods = 0;
139 int CodeCache::_number_of_nmethods_with_dependencies = 0;
140 bool CodeCache::_needs_cache_clean = false;
141 nmethod* CodeCache::_scavenge_root_nmethods = NULL;
142 int CodeCache::_codemem_full_count = 0;
143
144 // Initialize array of CodeHeaps
145 GrowableArray<CodeHeap*>* CodeCache::_heaps = new(ResourceObj::C_HEAP, mtCode) GrowableArray<CodeHeap*> (CodeBlobType::All, true);
146
147 void CodeCache::initialize_heaps() {
148 // Determine size of compiler buffers
149 size_t code_buffers_size = 0;
150 #ifdef COMPILER1
151 // C1 temporary code buffers (see Compiler::init_buffer_blob())
152 const int c1_count = CompilationPolicy::policy()->compiler_count(CompLevel_simple);
153 code_buffers_size += c1_count * Compiler::code_buffer_size();
154 #endif
155 #ifdef COMPILER2
156 // C2 scratch buffers (see Compile::init_scratch_buffer_blob())
157 const int c2_count = CompilationPolicy::policy()->compiler_count(CompLevel_full_optimization);
158 // Initial size of constant table (this may be increased if a compiled method needs more space)
159 code_buffers_size += c2_count * C2Compiler::initial_code_buffer_size();
160 #endif
161
162 // Calculate default CodeHeap sizes if not set by user
163 if (!FLAG_IS_CMDLINE(NonNMethodCodeHeapSize) && !FLAG_IS_CMDLINE(ProfiledCodeHeapSize)
164 && !FLAG_IS_CMDLINE(NonProfiledCodeHeapSize)) {
165 // Increase default NonNMethodCodeHeapSize to account for compiler buffers
166 FLAG_SET_ERGO(uintx, NonNMethodCodeHeapSize, NonNMethodCodeHeapSize + code_buffers_size);
167
168 // Check if we have enough space for the non-nmethod code heap
169 if (ReservedCodeCacheSize > NonNMethodCodeHeapSize) {
170 // Use the default value for NonNMethodCodeHeapSize and one half of the
171 // remaining size for non-profiled methods and one half for profiled methods
172 size_t remaining_size = ReservedCodeCacheSize - NonNMethodCodeHeapSize;
173 size_t profiled_size = remaining_size / 2;
174 size_t non_profiled_size = remaining_size - profiled_size;
175 FLAG_SET_ERGO(uintx, ProfiledCodeHeapSize, profiled_size);
176 FLAG_SET_ERGO(uintx, NonProfiledCodeHeapSize, non_profiled_size);
177 } else {
178 // Use all space for the non-nmethod heap and set other heaps to minimal size
179 FLAG_SET_ERGO(uintx, NonNMethodCodeHeapSize, ReservedCodeCacheSize - os::vm_page_size() * 2);
180 FLAG_SET_ERGO(uintx, ProfiledCodeHeapSize, os::vm_page_size());
181 FLAG_SET_ERGO(uintx, NonProfiledCodeHeapSize, os::vm_page_size());
182 }
183 }
184
185 // We do not need the profiled CodeHeap, use all space for the non-profiled CodeHeap
186 if(!heap_available(CodeBlobType::MethodProfiled)) {
187 FLAG_SET_ERGO(uintx, NonProfiledCodeHeapSize, NonProfiledCodeHeapSize + ProfiledCodeHeapSize);
188 FLAG_SET_ERGO(uintx, ProfiledCodeHeapSize, 0);
189 }
190 // We do not need the non-profiled CodeHeap, use all space for the non-nmethod CodeHeap
191 if(!heap_available(CodeBlobType::MethodNonProfiled)) {
192 FLAG_SET_ERGO(uintx, NonNMethodCodeHeapSize, NonNMethodCodeHeapSize + NonProfiledCodeHeapSize);
193 FLAG_SET_ERGO(uintx, NonProfiledCodeHeapSize, 0);
194 }
195
196 // Make sure we have enough space for VM internal code
197 uint min_code_cache_size = CodeCacheMinimumUseSpace DEBUG_ONLY(* 3);
198 if (NonNMethodCodeHeapSize < (min_code_cache_size + code_buffers_size)) {
199 vm_exit_during_initialization("Not enough space in non-nmethod code heap to run VM.");
200 }
201 guarantee(NonProfiledCodeHeapSize + ProfiledCodeHeapSize + NonNMethodCodeHeapSize <= ReservedCodeCacheSize, "Size check");
202
203 // Align CodeHeaps
204 size_t alignment = heap_alignment();
205 size_t non_method_size = align_size_up(NonNMethodCodeHeapSize, alignment);
206 size_t profiled_size = align_size_down(ProfiledCodeHeapSize, alignment);
207
208 // Reserve one continuous chunk of memory for CodeHeaps and split it into
209 // parts for the individual heaps. The memory layout looks like this:
210 // ---------- high -----------
211 // Non-profiled nmethods
212 // Profiled nmethods
213 // Non-nmethods
214 // ---------- low ------------
215 ReservedCodeSpace rs = reserve_heap_memory(ReservedCodeCacheSize);
216 ReservedSpace non_method_space = rs.first_part(non_method_size);
217 ReservedSpace rest = rs.last_part(non_method_size);
218 ReservedSpace profiled_space = rest.first_part(profiled_size);
219 ReservedSpace non_profiled_space = rest.last_part(profiled_size);
220
221 // Non-nmethods (stubs, adapters, ...)
222 add_heap(non_method_space, "CodeHeap 'non-nmethods'", CodeBlobType::NonNMethod);
223 // Tier 2 and tier 3 (profiled) methods
224 add_heap(profiled_space, "CodeHeap 'profiled nmethods'", CodeBlobType::MethodProfiled);
225 // Tier 1 and tier 4 (non-profiled) methods and native methods
226 add_heap(non_profiled_space, "CodeHeap 'non-profiled nmethods'", CodeBlobType::MethodNonProfiled);
227 }
228
229 size_t CodeCache::heap_alignment() {
230 // If large page support is enabled, align code heaps according to large
231 // page size to make sure that code cache is covered by large pages.
232 const size_t page_size = os::can_execute_large_page_memory() ?
233 os::page_size_for_region_unaligned(ReservedCodeCacheSize, 8) :
234 os::vm_page_size();
235 return MAX2(page_size, (size_t) os::vm_allocation_granularity());
236 }
237
238 ReservedCodeSpace CodeCache::reserve_heap_memory(size_t size) {
239 // Determine alignment
240 const size_t page_size = os::can_execute_large_page_memory() ?
241 MIN2(os::page_size_for_region_aligned(InitialCodeCacheSize, 8),
242 os::page_size_for_region_aligned(size, 8)) :
243 os::vm_page_size();
244 const size_t granularity = os::vm_allocation_granularity();
245 const size_t r_align = MAX2(page_size, granularity);
246 const size_t r_size = align_size_up(size, r_align);
247 const size_t rs_align = page_size == (size_t) os::vm_page_size() ? 0 :
248 MAX2(page_size, granularity);
249
250 ReservedCodeSpace rs(r_size, rs_align, rs_align > 0);
251
252 // Initialize bounds
253 _low_bound = (address)rs.base();
254 _high_bound = _low_bound + rs.size();
255
256 return rs;
257 }
258
259 bool CodeCache::heap_available(int code_blob_type) {
260 if (!SegmentedCodeCache) {
261 // No segmentation: use a single code heap
262 return (code_blob_type == CodeBlobType::All);
263 } else if (Arguments::mode() == Arguments::_int) {
264 // Interpreter only: we don't need any method code heaps
265 return (code_blob_type == CodeBlobType::NonNMethod);
266 } else if (TieredCompilation && (TieredStopAtLevel > CompLevel_simple)) {
267 // Tiered compilation: use all code heaps
268 return (code_blob_type < CodeBlobType::All);
269 } else {
270 // No TieredCompilation: we only need the non-nmethod and non-profiled code heap
271 return (code_blob_type == CodeBlobType::NonNMethod) ||
272 (code_blob_type == CodeBlobType::MethodNonProfiled);
273 }
274 }
275
276 const char* CodeCache::get_code_heap_flag_name(int code_blob_type) {
277 switch(code_blob_type) {
278 case CodeBlobType::NonNMethod:
279 return "NonNMethodCodeHeapSize";
280 break;
281 case CodeBlobType::MethodNonProfiled:
282 return "NonProfiledCodeHeapSize";
283 break;
284 case CodeBlobType::MethodProfiled:
285 return "ProfiledCodeHeapSize";
286 break;
287 }
288 ShouldNotReachHere();
289 return NULL;
290 }
291
292 void CodeCache::add_heap(ReservedSpace rs, const char* name, int code_blob_type) {
293 // Check if heap is needed
294 if (!heap_available(code_blob_type)) {
295 return;
296 }
297
298 // Create CodeHeap
299 CodeHeap* heap = new CodeHeap(name, code_blob_type);
300 _heaps->append(heap);
301
302 // Reserve Space
303 size_t size_initial = MIN2(InitialCodeCacheSize, rs.size());
304 size_initial = round_to(size_initial, os::vm_page_size());
305 if (!heap->reserve(rs, size_initial, CodeCacheSegmentSize)) {
306 vm_exit_during_initialization("Could not reserve enough space for code cache");
307 }
308
309 // Register the CodeHeap
310 MemoryService::add_code_heap_memory_pool(heap, name);
311 }
312
313 CodeHeap* CodeCache::get_code_heap(const CodeBlob* cb) {
314 assert(cb != NULL, "CodeBlob is null");
315 FOR_ALL_HEAPS(heap) {
316 if ((*heap)->contains(cb)) {
317 return *heap;
318 }
319 }
320 ShouldNotReachHere();
321 return NULL;
322 }
323
324 CodeHeap* CodeCache::get_code_heap(int code_blob_type) {
325 FOR_ALL_HEAPS(heap) {
326 if ((*heap)->accepts(code_blob_type)) {
327 return *heap;
328 }
329 }
330 return NULL;
331 }
332
333 CodeBlob* CodeCache::first_blob(CodeHeap* heap) {
334 assert_locked_or_safepoint(CodeCache_lock);
335 assert(heap != NULL, "heap is null");
336 return (CodeBlob*)heap->first();
337 }
338
339 CodeBlob* CodeCache::first_blob(int code_blob_type) {
340 if (heap_available(code_blob_type)) {
341 return first_blob(get_code_heap(code_blob_type));
342 } else {
343 return NULL;
344 }
345 }
346
347 CodeBlob* CodeCache::next_blob(CodeHeap* heap, CodeBlob* cb) {
348 assert_locked_or_safepoint(CodeCache_lock);
349 assert(heap != NULL, "heap is null");
350 return (CodeBlob*)heap->next(cb);
351 }
352
353 CodeBlob* CodeCache::next_blob(CodeBlob* cb) {
354 return next_blob(get_code_heap(cb), cb);
355 }
356
357 /**
358 * Do not seize the CodeCache lock here--if the caller has not
359 * already done so, we are going to lose bigtime, since the code
360 * cache will contain a garbage CodeBlob until the caller can
361 * run the constructor for the CodeBlob subclass he is busy
362 * instantiating.
363 */
364 CodeBlob* CodeCache::allocate(int size, int code_blob_type, bool strict) {
365 // Possibly wakes up the sweeper thread.
366 NMethodSweeper::notify(code_blob_type);
367 assert_locked_or_safepoint(CodeCache_lock);
368 assert(size > 0, err_msg_res("Code cache allocation request must be > 0 but is %d", size));
369 if (size <= 0) {
370 return NULL;
371 }
372 CodeBlob* cb = NULL;
373
374 // Get CodeHeap for the given CodeBlobType
375 CodeHeap* heap = get_code_heap(code_blob_type);
376 assert(heap != NULL, "heap is null");
377
378 while (true) {
379 cb = (CodeBlob*)heap->allocate(size);
380 if (cb != NULL) break;
381 if (!heap->expand_by(CodeCacheExpansionSize)) {
382 // Expansion failed
383 if (SegmentedCodeCache && !strict) {
384 // Fallback solution: Try to store code in another code heap.
385 // Note that in the sweeper, we check the reverse_free_ratio of the code heap
386 // and force stack scanning if less than 10% of the code heap are free.
387 int type = code_blob_type;
388 switch (type) {
389 case CodeBlobType::NonNMethod:
390 type = CodeBlobType::MethodNonProfiled;
391 strict = false; // Allow recursive search for other heaps
392 break;
393 case CodeBlobType::MethodProfiled:
394 type = CodeBlobType::MethodNonProfiled;
395 strict = true;
396 break;
397 case CodeBlobType::MethodNonProfiled:
398 type = CodeBlobType::MethodProfiled;
399 strict = true;
400 break;
401 }
402 if (heap_available(type)) {
403 return allocate(size, type, strict);
404 }
405 }
406 MutexUnlockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
407 CompileBroker::handle_full_code_cache(code_blob_type);
408 return NULL;
409 }
410 if (PrintCodeCacheExtension) {
411 ResourceMark rm;
412 if (SegmentedCodeCache) {
413 tty->print("%s", heap->name());
414 } else {
415 tty->print("CodeCache");
416 }
417 tty->print_cr(" extended to [" INTPTR_FORMAT ", " INTPTR_FORMAT "] (" SSIZE_FORMAT " bytes)",
418 (intptr_t)heap->low_boundary(), (intptr_t)heap->high(),
419 (address)heap->high() - (address)heap->low_boundary());
420 }
421 }
422 print_trace("allocation", cb, size);
423 _number_of_blobs++;
424 return cb;
425 }
426
427 void CodeCache::free(CodeBlob* cb) {
428 assert_locked_or_safepoint(CodeCache_lock);
429
430 print_trace("free", cb);
431 if (cb->is_nmethod()) {
432 _number_of_nmethods--;
433 if (((nmethod *)cb)->has_dependencies()) {
434 _number_of_nmethods_with_dependencies--;
435 }
436 }
437 if (cb->is_adapter_blob()) {
438 _number_of_adapters--;
439 }
440 _number_of_blobs--;
441
442 // Get heap for given CodeBlob and deallocate
443 get_code_heap(cb)->deallocate(cb);
444
445 assert(_number_of_blobs >= 0, "sanity check");
446 }
447
448 void CodeCache::commit(CodeBlob* cb) {
449 // this is called by nmethod::nmethod, which must already own CodeCache_lock
450 assert_locked_or_safepoint(CodeCache_lock);
451 if (cb->is_nmethod()) {
452 _number_of_nmethods++;
453 if (((nmethod *)cb)->has_dependencies()) {
454 _number_of_nmethods_with_dependencies++;
455 }
456 }
457 if (cb->is_adapter_blob()) {
458 _number_of_adapters++;
459 }
460
461 // flush the hardware I-cache
462 ICache::invalidate_range(cb->content_begin(), cb->content_size());
463 }
464
465 bool CodeCache::contains(void *p) {
466 // It should be ok to call contains without holding a lock
467 FOR_ALL_HEAPS(heap) {
468 if ((*heap)->contains(p)) {
469 return true;
470 }
471 }
472 return false;
473 }
474
475 // This method is safe to call without holding the CodeCache_lock, as long as a dead CodeBlob is not
476 // looked up (i.e., one that has been marked for deletion). It only depends on the _segmap to contain
477 // valid indices, which it will always do, as long as the CodeBlob is not in the process of being recycled.
478 CodeBlob* CodeCache::find_blob(void* start) {
479 CodeBlob* result = find_blob_unsafe(start);
480 // We could potentially look up non_entrant methods
481 guarantee(result == NULL || !result->is_zombie() || result->is_locked_by_vm() || is_error_reported(), "unsafe access to zombie method");
482 return result;
483 }
484
485 // Lookup that does not fail if you lookup a zombie method (if you call this, be sure to know
486 // what you are doing)
487 CodeBlob* CodeCache::find_blob_unsafe(void* start) {
488 // NMT can walk the stack before code cache is created
489 if (_heaps == NULL || _heaps->is_empty()) return NULL;
490
491 FOR_ALL_HEAPS(heap) {
492 CodeBlob* result = (CodeBlob*) (*heap)->find_start(start);
493 if (result != NULL && result->blob_contains((address)start)) {
494 return result;
495 }
496 }
497 return NULL;
498 }
499
500 nmethod* CodeCache::find_nmethod(void* start) {
501 CodeBlob* cb = find_blob(start);
502 assert(cb->is_nmethod(), "did not find an nmethod");
503 return (nmethod*)cb;
504 }
505
506 void CodeCache::blobs_do(void f(CodeBlob* nm)) {
507 assert_locked_or_safepoint(CodeCache_lock);
508 FOR_ALL_HEAPS(heap) {
509 FOR_ALL_BLOBS(cb, *heap) {
510 f(cb);
511 }
512 }
513 }
514
515 void CodeCache::nmethods_do(void f(nmethod* nm)) {
516 assert_locked_or_safepoint(CodeCache_lock);
517 NMethodIterator iter;
518 while(iter.next()) {
519 f(iter.method());
520 }
521 }
522
523 void CodeCache::alive_nmethods_do(void f(nmethod* nm)) {
524 assert_locked_or_safepoint(CodeCache_lock);
525 NMethodIterator iter;
526 while(iter.next_alive()) {
527 f(iter.method());
528 }
529 }
530
531 int CodeCache::alignment_unit() {
532 return (int)_heaps->first()->alignment_unit();
533 }
534
535 int CodeCache::alignment_offset() {
536 return (int)_heaps->first()->alignment_offset();
537 }
538
539 // Mark nmethods for unloading if they contain otherwise unreachable oops.
540 void CodeCache::do_unloading(BoolObjectClosure* is_alive, bool unloading_occurred) {
541 assert_locked_or_safepoint(CodeCache_lock);
542 NMethodIterator iter;
543 while(iter.next_alive()) {
544 iter.method()->do_unloading(is_alive, unloading_occurred);
545 }
546 }
547
548 void CodeCache::blobs_do(CodeBlobClosure* f) {
549 assert_locked_or_safepoint(CodeCache_lock);
550 FOR_ALL_HEAPS(heap) {
551 FOR_ALL_BLOBS(cb, *heap) {
552 if (cb->is_alive()) {
553 f->do_code_blob(cb);
554
555 #ifdef ASSERT
556 if (cb->is_nmethod())
557 ((nmethod*)cb)->verify_scavenge_root_oops();
558 #endif //ASSERT
559 }
560 }
561 }
562 }
563
564 // Walk the list of methods which might contain non-perm oops.
565 void CodeCache::scavenge_root_nmethods_do(CodeBlobClosure* f) {
566 assert_locked_or_safepoint(CodeCache_lock);
567
568 if (UseG1GC) {
569 return;
570 }
571
572 debug_only(mark_scavenge_root_nmethods());
573
574 for (nmethod* cur = scavenge_root_nmethods(); cur != NULL; cur = cur->scavenge_root_link()) {
575 debug_only(cur->clear_scavenge_root_marked());
576 assert(cur->scavenge_root_not_marked(), "");
577 assert(cur->on_scavenge_root_list(), "else shouldn't be on this list");
578
579 bool is_live = (!cur->is_zombie() && !cur->is_unloaded());
580 #ifndef PRODUCT
581 if (TraceScavenge) {
582 cur->print_on(tty, is_live ? "scavenge root" : "dead scavenge root"); tty->cr();
583 }
584 #endif //PRODUCT
585 if (is_live) {
586 // Perform cur->oops_do(f), maybe just once per nmethod.
587 f->do_code_blob(cur);
588 }
589 }
590
591 // Check for stray marks.
592 debug_only(verify_perm_nmethods(NULL));
593 }
594
595 void CodeCache::add_scavenge_root_nmethod(nmethod* nm) {
596 assert_locked_or_safepoint(CodeCache_lock);
597
598 if (UseG1GC) {
599 return;
600 }
601
602 nm->set_on_scavenge_root_list();
603 nm->set_scavenge_root_link(_scavenge_root_nmethods);
604 set_scavenge_root_nmethods(nm);
605 print_trace("add_scavenge_root", nm);
606 }
607
608 void CodeCache::drop_scavenge_root_nmethod(nmethod* nm) {
609 assert_locked_or_safepoint(CodeCache_lock);
610
611 if (UseG1GC) {
612 return;
613 }
614
615 print_trace("drop_scavenge_root", nm);
616 nmethod* last = NULL;
617 nmethod* cur = scavenge_root_nmethods();
618 while (cur != NULL) {
619 nmethod* next = cur->scavenge_root_link();
620 if (cur == nm) {
621 if (last != NULL)
622 last->set_scavenge_root_link(next);
623 else set_scavenge_root_nmethods(next);
624 nm->set_scavenge_root_link(NULL);
625 nm->clear_on_scavenge_root_list();
626 return;
627 }
628 last = cur;
629 cur = next;
630 }
631 assert(false, "should have been on list");
632 }
633
634 void CodeCache::prune_scavenge_root_nmethods() {
635 assert_locked_or_safepoint(CodeCache_lock);
636
637 if (UseG1GC) {
638 return;
639 }
640
641 debug_only(mark_scavenge_root_nmethods());
642
643 nmethod* last = NULL;
644 nmethod* cur = scavenge_root_nmethods();
645 while (cur != NULL) {
646 nmethod* next = cur->scavenge_root_link();
647 debug_only(cur->clear_scavenge_root_marked());
648 assert(cur->scavenge_root_not_marked(), "");
649 assert(cur->on_scavenge_root_list(), "else shouldn't be on this list");
650
651 if (!cur->is_zombie() && !cur->is_unloaded()
652 && cur->detect_scavenge_root_oops()) {
653 // Keep it. Advance 'last' to prevent deletion.
654 last = cur;
655 } else {
656 // Prune it from the list, so we don't have to look at it any more.
657 print_trace("prune_scavenge_root", cur);
658 cur->set_scavenge_root_link(NULL);
659 cur->clear_on_scavenge_root_list();
660 if (last != NULL)
661 last->set_scavenge_root_link(next);
662 else set_scavenge_root_nmethods(next);
663 }
664 cur = next;
665 }
666
667 // Check for stray marks.
668 debug_only(verify_perm_nmethods(NULL));
669 }
670
671 #ifndef PRODUCT
672 void CodeCache::asserted_non_scavengable_nmethods_do(CodeBlobClosure* f) {
673 if (UseG1GC) {
674 return;
675 }
676
677 // While we are here, verify the integrity of the list.
678 mark_scavenge_root_nmethods();
679 for (nmethod* cur = scavenge_root_nmethods(); cur != NULL; cur = cur->scavenge_root_link()) {
680 assert(cur->on_scavenge_root_list(), "else shouldn't be on this list");
681 cur->clear_scavenge_root_marked();
682 }
683 verify_perm_nmethods(f);
684 }
685
686 // Temporarily mark nmethods that are claimed to be on the non-perm list.
687 void CodeCache::mark_scavenge_root_nmethods() {
688 NMethodIterator iter;
689 while(iter.next_alive()) {
690 nmethod* nm = iter.method();
691 assert(nm->scavenge_root_not_marked(), "clean state");
692 if (nm->on_scavenge_root_list())
693 nm->set_scavenge_root_marked();
694 }
695 }
696
697 // If the closure is given, run it on the unlisted nmethods.
698 // Also make sure that the effects of mark_scavenge_root_nmethods is gone.
699 void CodeCache::verify_perm_nmethods(CodeBlobClosure* f_or_null) {
700 NMethodIterator iter;
701 while(iter.next_alive()) {
702 nmethod* nm = iter.method();
703 bool call_f = (f_or_null != NULL);
704 assert(nm->scavenge_root_not_marked(), "must be already processed");
705 if (nm->on_scavenge_root_list())
706 call_f = false; // don't show this one to the client
707 nm->verify_scavenge_root_oops();
708 if (call_f) f_or_null->do_code_blob(nm);
709 }
710 }
711 #endif //PRODUCT
712
713 void CodeCache::verify_clean_inline_caches() {
714 #ifdef ASSERT
715 NMethodIterator iter;
716 while(iter.next_alive()) {
717 nmethod* nm = iter.method();
718 assert(!nm->is_unloaded(), "Tautology");
719 nm->verify_clean_inline_caches();
720 nm->verify();
721 }
722 #endif
723 }
724
725 void CodeCache::verify_icholder_relocations() {
726 #ifdef ASSERT
727 // make sure that we aren't leaking icholders
728 int count = 0;
729 FOR_ALL_HEAPS(heap) {
730 FOR_ALL_BLOBS(cb, *heap) {
731 if (cb->is_nmethod()) {
732 nmethod* nm = (nmethod*)cb;
733 count += nm->verify_icholder_relocations();
734 }
735 }
736 }
737
738 assert(count + InlineCacheBuffer::pending_icholder_count() + CompiledICHolder::live_not_claimed_count() ==
739 CompiledICHolder::live_count(), "must agree");
740 #endif
741 }
742
743 void CodeCache::gc_prologue() {
744 }
745
746 void CodeCache::gc_epilogue() {
747 assert_locked_or_safepoint(CodeCache_lock);
748 NMethodIterator iter;
749 while(iter.next_alive()) {
750 nmethod* nm = iter.method();
751 assert(!nm->is_unloaded(), "Tautology");
752 if (needs_cache_clean()) {
753 nm->cleanup_inline_caches();
754 }
755 DEBUG_ONLY(nm->verify());
756 DEBUG_ONLY(nm->verify_oop_relocations());
757 }
758 set_needs_cache_clean(false);
759 prune_scavenge_root_nmethods();
760
761 verify_icholder_relocations();
762 }
763
764 void CodeCache::verify_oops() {
765 MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
766 VerifyOopClosure voc;
767 NMethodIterator iter;
768 while(iter.next_alive()) {
769 nmethod* nm = iter.method();
770 nm->oops_do(&voc);
771 nm->verify_oop_relocations();
772 }
773 }
774
775 size_t CodeCache::capacity() {
776 size_t cap = 0;
777 FOR_ALL_HEAPS(heap) {
778 cap += (*heap)->capacity();
779 }
780 return cap;
781 }
782
783 size_t CodeCache::unallocated_capacity(int code_blob_type) {
784 CodeHeap* heap = get_code_heap(code_blob_type);
785 return (heap != NULL) ? heap->unallocated_capacity() : 0;
786 }
787
788 size_t CodeCache::unallocated_capacity() {
789 size_t unallocated_cap = 0;
790 FOR_ALL_HEAPS(heap) {
791 unallocated_cap += (*heap)->unallocated_capacity();
792 }
793 return unallocated_cap;
794 }
795
796 size_t CodeCache::max_capacity() {
797 size_t max_cap = 0;
798 FOR_ALL_HEAPS(heap) {
799 max_cap += (*heap)->max_capacity();
800 }
801 return max_cap;
802 }
803
804 /**
805 * Returns the reverse free ratio. E.g., if 25% (1/4) of the code heap
806 * is free, reverse_free_ratio() returns 4.
807 */
808 double CodeCache::reverse_free_ratio(int code_blob_type) {
809 CodeHeap* heap = get_code_heap(code_blob_type);
810 if (heap == NULL) {
811 return 0;
812 }
813
814 double unallocated_capacity = MAX2((double)heap->unallocated_capacity(), 1.0); // Avoid division by 0;
815 double max_capacity = (double)heap->max_capacity();
816 double result = max_capacity / unallocated_capacity;
817 assert (max_capacity >= unallocated_capacity, "Must be");
818 assert (result >= 1.0, err_msg_res("reverse_free_ratio must be at least 1. It is %f", result));
819 return result;
820 }
821
822 size_t CodeCache::bytes_allocated_in_freelists() {
823 size_t allocated_bytes = 0;
824 FOR_ALL_HEAPS(heap) {
825 allocated_bytes += (*heap)->allocated_in_freelist();
826 }
827 return allocated_bytes;
828 }
829
830 int CodeCache::allocated_segments() {
831 int number_of_segments = 0;
832 FOR_ALL_HEAPS(heap) {
833 number_of_segments += (*heap)->allocated_segments();
834 }
835 return number_of_segments;
836 }
837
838 size_t CodeCache::freelists_length() {
839 size_t length = 0;
840 FOR_ALL_HEAPS(heap) {
841 length += (*heap)->freelist_length();
842 }
843 return length;
844 }
845
846 void icache_init();
847
848 void CodeCache::initialize() {
849 assert(CodeCacheSegmentSize >= (uintx)CodeEntryAlignment, "CodeCacheSegmentSize must be large enough to align entry points");
850 #ifdef COMPILER2
851 assert(CodeCacheSegmentSize >= (uintx)OptoLoopAlignment, "CodeCacheSegmentSize must be large enough to align inner loops");
852 #endif
853 assert(CodeCacheSegmentSize >= sizeof(jdouble), "CodeCacheSegmentSize must be large enough to align constants");
854 // This was originally just a check of the alignment, causing failure, instead, round
855 // the code cache to the page size. In particular, Solaris is moving to a larger
856 // default page size.
857 CodeCacheExpansionSize = round_to(CodeCacheExpansionSize, os::vm_page_size());
858
859 if (SegmentedCodeCache) {
860 // Use multiple code heaps
861 initialize_heaps();
862 } else {
863 // Use a single code heap
864 ReservedCodeSpace rs = reserve_heap_memory(ReservedCodeCacheSize);
865 add_heap(rs, "CodeCache", CodeBlobType::All);
866 }
867
868 // Initialize ICache flush mechanism
869 // This service is needed for os::register_code_area
870 icache_init();
871
872 // Give OS a chance to register generated code area.
873 // This is used on Windows 64 bit platforms to register
874 // Structured Exception Handlers for our generated code.
875 os::register_code_area((char*)low_bound(), (char*)high_bound());
876 }
877
878 void codeCache_init() {
879 CodeCache::initialize();
880 }
881
882 //------------------------------------------------------------------------------------------------
883
884 int CodeCache::number_of_nmethods_with_dependencies() {
885 return _number_of_nmethods_with_dependencies;
886 }
887
888 void CodeCache::clear_inline_caches() {
889 assert_locked_or_safepoint(CodeCache_lock);
890 NMethodIterator iter;
891 while(iter.next_alive()) {
892 iter.method()->clear_inline_caches();
893 }
894 }
895
896 // Keeps track of time spent for checking dependencies
897 NOT_PRODUCT(static elapsedTimer dependentCheckTime;)
898
899 int CodeCache::mark_for_deoptimization(DepChange& changes) {
900 MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
901 int number_of_marked_CodeBlobs = 0;
902
903 // search the hierarchy looking for nmethods which are affected by the loading of this class
904
905 // then search the interfaces this class implements looking for nmethods
906 // which might be dependent of the fact that an interface only had one
907 // implementor.
908 // nmethod::check_all_dependencies works only correctly, if no safepoint
909 // can happen
910 No_Safepoint_Verifier nsv;
911 for (DepChange::ContextStream str(changes, nsv); str.next(); ) {
912 Klass* d = str.klass();
913 number_of_marked_CodeBlobs += InstanceKlass::cast(d)->mark_dependent_nmethods(changes);
914 }
915
916 #ifndef PRODUCT
917 if (VerifyDependencies) {
918 // Object pointers are used as unique identifiers for dependency arguments. This
919 // is only possible if no safepoint, i.e., GC occurs during the verification code.
920 dependentCheckTime.start();
921 nmethod::check_all_dependencies(changes);
922 dependentCheckTime.stop();
923 }
924 #endif
925
926 return number_of_marked_CodeBlobs;
927 }
928
929
930 #ifdef HOTSWAP
931 int CodeCache::mark_for_evol_deoptimization(instanceKlassHandle dependee) {
932 MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
933 int number_of_marked_CodeBlobs = 0;
934
935 // Deoptimize all methods of the evolving class itself
936 Array<Method*>* old_methods = dependee->methods();
937 for (int i = 0; i < old_methods->length(); i++) {
938 ResourceMark rm;
939 Method* old_method = old_methods->at(i);
940 nmethod *nm = old_method->code();
941 if (nm != NULL) {
942 nm->mark_for_deoptimization();
943 number_of_marked_CodeBlobs++;
944 }
945 }
946
947 NMethodIterator iter;
948 while(iter.next_alive()) {
949 nmethod* nm = iter.method();
950 if (nm->is_marked_for_deoptimization()) {
951 // ...Already marked in the previous pass; don't count it again.
952 } else if (nm->is_evol_dependent_on(dependee())) {
953 ResourceMark rm;
954 nm->mark_for_deoptimization();
955 number_of_marked_CodeBlobs++;
956 } else {
957 // flush caches in case they refer to a redefined Method*
958 nm->clear_inline_caches();
959 }
960 }
961
962 return number_of_marked_CodeBlobs;
963 }
964 #endif // HOTSWAP
965
966
967 // Deoptimize all methods
968 void CodeCache::mark_all_nmethods_for_deoptimization() {
969 MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
970 NMethodIterator iter;
971 while(iter.next_alive()) {
972 nmethod* nm = iter.method();
973 if (!nm->method()->is_method_handle_intrinsic()) {
974 nm->mark_for_deoptimization();
975 }
976 }
977 }
978
979 int CodeCache::mark_for_deoptimization(Method* dependee) {
980 MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
981 int number_of_marked_CodeBlobs = 0;
982
983 NMethodIterator iter;
984 while(iter.next_alive()) {
985 nmethod* nm = iter.method();
986 if (nm->is_dependent_on_method(dependee)) {
987 ResourceMark rm;
988 nm->mark_for_deoptimization();
989 number_of_marked_CodeBlobs++;
990 }
991 }
992
993 return number_of_marked_CodeBlobs;
994 }
995
996 void CodeCache::make_marked_nmethods_zombies() {
997 assert(SafepointSynchronize::is_at_safepoint(), "must be at a safepoint");
998 NMethodIterator iter;
999 while(iter.next_alive()) {
1000 nmethod* nm = iter.method();
1001 if (nm->is_marked_for_deoptimization()) {
1002
1003 // If the nmethod has already been made non-entrant and it can be converted
1004 // then zombie it now. Otherwise make it non-entrant and it will eventually
1005 // be zombied when it is no longer seen on the stack. Note that the nmethod
1006 // might be "entrant" and not on the stack and so could be zombied immediately
1007 // but we can't tell because we don't track it on stack until it becomes
1008 // non-entrant.
1009
1010 if (nm->is_not_entrant() && nm->can_not_entrant_be_converted()) {
1011 nm->make_zombie();
1012 } else {
1013 nm->make_not_entrant();
1014 }
1015 }
1016 }
1017 }
1018
1019 void CodeCache::make_marked_nmethods_not_entrant() {
1020 assert_locked_or_safepoint(CodeCache_lock);
1021 NMethodIterator iter;
1022 while(iter.next_alive()) {
1023 nmethod* nm = iter.method();
1024 if (nm->is_marked_for_deoptimization()) {
1025 nm->make_not_entrant();
1026 }
1027 }
1028 }
1029
1030 // Flushes compiled methods dependent on dependee.
1031 void CodeCache::flush_dependents_on(instanceKlassHandle dependee) {
1032 assert_lock_strong(Compile_lock);
1033
1034 if (number_of_nmethods_with_dependencies() == 0) return;
1035
1036 // CodeCache can only be updated by a thread_in_VM and they will all be
1037 // stopped during the safepoint so CodeCache will be safe to update without
1038 // holding the CodeCache_lock.
1039
1040 KlassDepChange changes(dependee);
1041
1042 // Compute the dependent nmethods
1043 if (mark_for_deoptimization(changes) > 0) {
1044 // At least one nmethod has been marked for deoptimization
1045 VM_Deoptimize op;
1046 VMThread::execute(&op);
1047 }
1048 }
1049
1050 // Flushes compiled methods dependent on a particular CallSite
1051 // instance when its target is different than the given MethodHandle.
1052 void CodeCache::flush_dependents_on(Handle call_site, Handle method_handle) {
1053 assert_lock_strong(Compile_lock);
1054
1055 if (number_of_nmethods_with_dependencies() == 0) return;
1056
1057 // CodeCache can only be updated by a thread_in_VM and they will all be
1058 // stopped during the safepoint so CodeCache will be safe to update without
1059 // holding the CodeCache_lock.
1060
1061 CallSiteDepChange changes(call_site(), method_handle());
1062
1063 // Compute the dependent nmethods that have a reference to a
1064 // CallSite object. We use InstanceKlass::mark_dependent_nmethod
1065 // directly instead of CodeCache::mark_for_deoptimization because we
1066 // want dependents on the call site class only not all classes in
1067 // the ContextStream.
1068 int marked = 0;
1069 {
1070 MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
1071 InstanceKlass* call_site_klass = InstanceKlass::cast(call_site->klass());
1072 marked = call_site_klass->mark_dependent_nmethods(changes);
1073 }
1074 if (marked > 0) {
1075 // At least one nmethod has been marked for deoptimization
1076 VM_Deoptimize op;
1077 VMThread::execute(&op);
1078 }
1079 }
1080
1081 #ifdef HOTSWAP
1082 // Flushes compiled methods dependent on dependee in the evolutionary sense
1083 void CodeCache::flush_evol_dependents_on(instanceKlassHandle ev_k_h) {
1084 // --- Compile_lock is not held. However we are at a safepoint.
1085 assert_locked_or_safepoint(Compile_lock);
1086 if (number_of_nmethods_with_dependencies() == 0) return;
1087
1088 // CodeCache can only be updated by a thread_in_VM and they will all be
1089 // stopped during the safepoint so CodeCache will be safe to update without
1090 // holding the CodeCache_lock.
1091
1092 // Compute the dependent nmethods
1093 if (mark_for_evol_deoptimization(ev_k_h) > 0) {
1094 // At least one nmethod has been marked for deoptimization
1095
1096 // All this already happens inside a VM_Operation, so we'll do all the work here.
1097 // Stuff copied from VM_Deoptimize and modified slightly.
1098
1099 // We do not want any GCs to happen while we are in the middle of this VM operation
1100 ResourceMark rm;
1101 DeoptimizationMarker dm;
1102
1103 // Deoptimize all activations depending on marked nmethods
1104 Deoptimization::deoptimize_dependents();
1105
1106 // Make the dependent methods not entrant (in VM_Deoptimize they are made zombies)
1107 make_marked_nmethods_not_entrant();
1108 }
1109 }
1110 #endif // HOTSWAP
1111
1112
1113 // Flushes compiled methods dependent on dependee
1114 void CodeCache::flush_dependents_on_method(methodHandle m_h) {
1115 // --- Compile_lock is not held. However we are at a safepoint.
1116 assert_locked_or_safepoint(Compile_lock);
1117
1118 // CodeCache can only be updated by a thread_in_VM and they will all be
1119 // stopped dring the safepoint so CodeCache will be safe to update without
1120 // holding the CodeCache_lock.
1121
1122 // Compute the dependent nmethods
1123 if (mark_for_deoptimization(m_h()) > 0) {
1124 // At least one nmethod has been marked for deoptimization
1125
1126 // All this already happens inside a VM_Operation, so we'll do all the work here.
1127 // Stuff copied from VM_Deoptimize and modified slightly.
1128
1129 // We do not want any GCs to happen while we are in the middle of this VM operation
1130 ResourceMark rm;
1131 DeoptimizationMarker dm;
1132
1133 // Deoptimize all activations depending on marked nmethods
1134 Deoptimization::deoptimize_dependents();
1135
1136 // Make the dependent methods not entrant (in VM_Deoptimize they are made zombies)
1137 make_marked_nmethods_not_entrant();
1138 }
1139 }
1140
1141 void CodeCache::verify() {
1142 assert_locked_or_safepoint(CodeCache_lock);
1143 FOR_ALL_HEAPS(heap) {
1144 (*heap)->verify();
1145 FOR_ALL_BLOBS(cb, *heap) {
1146 if (cb->is_alive()) {
1147 cb->verify();
1148 }
1149 }
1150 }
1151 }
1152
1153 // A CodeHeap is full. Print out warning and report event.
1154 void CodeCache::report_codemem_full(int code_blob_type, bool print) {
1155 // Get nmethod heap for the given CodeBlobType and build CodeCacheFull event
1156 CodeHeap* heap = get_code_heap(code_blob_type);
1157 assert(heap != NULL, "heap is null");
1158
1159 if (!heap->was_full() || print) {
1160 // Not yet reported for this heap, report
1161 heap->report_full();
1162 if (SegmentedCodeCache) {
1163 warning("%s is full. Compiler has been disabled.", get_code_heap_name(code_blob_type));
1164 warning("Try increasing the code heap size using -XX:%s=", get_code_heap_flag_name(code_blob_type));
1165 } else {
1166 warning("CodeCache is full. Compiler has been disabled.");
1167 warning("Try increasing the code cache size using -XX:ReservedCodeCacheSize=");
1168 }
1169 ResourceMark rm;
1170 stringStream s;
1171 // Dump code cache into a buffer before locking the tty,
1172 {
1173 MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
1174 print_summary(&s);
1175 }
1176 ttyLocker ttyl;
1177 tty->print("%s", s.as_string());
1178 }
1179
1180 _codemem_full_count++;
1181 EventCodeCacheFull event;
1182 if (event.should_commit()) {
1183 event.set_codeBlobType((u1)code_blob_type);
1184 event.set_startAddress((u8)heap->low_boundary());
1185 event.set_commitedTopAddress((u8)heap->high());
1186 event.set_reservedTopAddress((u8)heap->high_boundary());
1187 event.set_entryCount(nof_blobs());
1188 event.set_methodCount(nof_nmethods());
1189 event.set_adaptorCount(nof_adapters());
1190 event.set_unallocatedCapacity(heap->unallocated_capacity()/K);
1191 event.set_fullCount(_codemem_full_count);
1192 event.commit();
1193 }
1194 }
1195
1196 void CodeCache::print_memory_overhead() {
1197 size_t wasted_bytes = 0;
1198 FOR_ALL_HEAPS(heap) {
1199 CodeHeap* curr_heap = *heap;
1200 for (CodeBlob* cb = (CodeBlob*)curr_heap->first(); cb != NULL; cb = (CodeBlob*)curr_heap->next(cb)) {
1201 HeapBlock* heap_block = ((HeapBlock*)cb) - 1;
1202 wasted_bytes += heap_block->length() * CodeCacheSegmentSize - cb->size();
1203 }
1204 }
1205 // Print bytes that are allocated in the freelist
1206 ttyLocker ttl;
1207 tty->print_cr("Number of elements in freelist: " SSIZE_FORMAT, freelists_length());
1208 tty->print_cr("Allocated in freelist: " SSIZE_FORMAT "kB", bytes_allocated_in_freelists()/K);
1209 tty->print_cr("Unused bytes in CodeBlobs: " SSIZE_FORMAT "kB", (wasted_bytes/K));
1210 tty->print_cr("Segment map size: " SSIZE_FORMAT "kB", allocated_segments()/K); // 1 byte per segment
1211 }
1212
1213 //------------------------------------------------------------------------------------------------
1214 // Non-product version
1215
1216 #ifndef PRODUCT
1217
1218 void CodeCache::print_trace(const char* event, CodeBlob* cb, int size) {
1219 if (PrintCodeCache2) { // Need to add a new flag
1220 ResourceMark rm;
1221 if (size == 0) size = cb->size();
1222 tty->print_cr("CodeCache %s: addr: " INTPTR_FORMAT ", size: 0x%x", event, p2i(cb), size);
1223 }
1224 }
1225
1226 void CodeCache::print_internals() {
1227 int nmethodCount = 0;
1228 int runtimeStubCount = 0;
1229 int adapterCount = 0;
1230 int deoptimizationStubCount = 0;
1231 int uncommonTrapStubCount = 0;
1232 int bufferBlobCount = 0;
1233 int total = 0;
1234 int nmethodAlive = 0;
1235 int nmethodNotEntrant = 0;
1236 int nmethodZombie = 0;
1237 int nmethodUnloaded = 0;
1238 int nmethodJava = 0;
1239 int nmethodNative = 0;
1240 int max_nm_size = 0;
1241 ResourceMark rm;
1242
1243 int i = 0;
1244 FOR_ALL_HEAPS(heap) {
1245 if (SegmentedCodeCache && Verbose) {
1246 tty->print_cr("-- %s --", (*heap)->name());
1247 }
1248 FOR_ALL_BLOBS(cb, *heap) {
1249 total++;
1250 if (cb->is_nmethod()) {
1251 nmethod* nm = (nmethod*)cb;
1252
1253 if (Verbose && nm->method() != NULL) {
1254 ResourceMark rm;
1255 char *method_name = nm->method()->name_and_sig_as_C_string();
1256 tty->print("%s", method_name);
1257 if(nm->is_alive()) { tty->print_cr(" alive"); }
1258 if(nm->is_not_entrant()) { tty->print_cr(" not-entrant"); }
1259 if(nm->is_zombie()) { tty->print_cr(" zombie"); }
1260 }
1261
1262 nmethodCount++;
1263
1264 if(nm->is_alive()) { nmethodAlive++; }
1265 if(nm->is_not_entrant()) { nmethodNotEntrant++; }
1266 if(nm->is_zombie()) { nmethodZombie++; }
1267 if(nm->is_unloaded()) { nmethodUnloaded++; }
1268 if(nm->method() != NULL && nm->is_native_method()) { nmethodNative++; }
1269
1270 if(nm->method() != NULL && nm->is_java_method()) {
1271 nmethodJava++;
1272 max_nm_size = MAX2(max_nm_size, nm->size());
1273 }
1274 } else if (cb->is_runtime_stub()) {
1275 runtimeStubCount++;
1276 } else if (cb->is_deoptimization_stub()) {
1277 deoptimizationStubCount++;
1278 } else if (cb->is_uncommon_trap_stub()) {
1279 uncommonTrapStubCount++;
1280 } else if (cb->is_adapter_blob()) {
1281 adapterCount++;
1282 } else if (cb->is_buffer_blob()) {
1283 bufferBlobCount++;
1284 }
1285 }
1286 }
1287
1288 int bucketSize = 512;
1289 int bucketLimit = max_nm_size / bucketSize + 1;
1290 int *buckets = NEW_C_HEAP_ARRAY(int, bucketLimit, mtCode);
1291 memset(buckets, 0, sizeof(int) * bucketLimit);
1292
1293 NMethodIterator iter;
1294 while(iter.next()) {
1295 nmethod* nm = iter.method();
1296 if(nm->method() != NULL && nm->is_java_method()) {
1297 buckets[nm->size() / bucketSize]++;
1298 }
1299 }
1300
1301 tty->print_cr("Code Cache Entries (total of %d)",total);
1302 tty->print_cr("-------------------------------------------------");
1303 tty->print_cr("nmethods: %d",nmethodCount);
1304 tty->print_cr("\talive: %d",nmethodAlive);
1305 tty->print_cr("\tnot_entrant: %d",nmethodNotEntrant);
1306 tty->print_cr("\tzombie: %d",nmethodZombie);
1307 tty->print_cr("\tunloaded: %d",nmethodUnloaded);
1308 tty->print_cr("\tjava: %d",nmethodJava);
1309 tty->print_cr("\tnative: %d",nmethodNative);
1310 tty->print_cr("runtime_stubs: %d",runtimeStubCount);
1311 tty->print_cr("adapters: %d",adapterCount);
1312 tty->print_cr("buffer blobs: %d",bufferBlobCount);
1313 tty->print_cr("deoptimization_stubs: %d",deoptimizationStubCount);
1314 tty->print_cr("uncommon_traps: %d",uncommonTrapStubCount);
1315 tty->print_cr("\nnmethod size distribution (non-zombie java)");
1316 tty->print_cr("-------------------------------------------------");
1317
1318 for(int i=0; i<bucketLimit; i++) {
1319 if(buckets[i] != 0) {
1320 tty->print("%d - %d bytes",i*bucketSize,(i+1)*bucketSize);
1321 tty->fill_to(40);
1322 tty->print_cr("%d",buckets[i]);
1323 }
1324 }
1325
1326 FREE_C_HEAP_ARRAY(int, buckets);
1327 print_memory_overhead();
1328 }
1329
1330 #endif // !PRODUCT
1331
1332 void CodeCache::print() {
1333 print_summary(tty);
1334
1335 #ifndef PRODUCT
1336 if (!Verbose) return;
1337
1338 CodeBlob_sizes live;
1339 CodeBlob_sizes dead;
1340
1341 FOR_ALL_HEAPS(heap) {
1342 FOR_ALL_BLOBS(cb, *heap) {
1343 if (!cb->is_alive()) {
1344 dead.add(cb);
1345 } else {
1346 live.add(cb);
1347 }
1348 }
1349 }
1350
1351 tty->print_cr("CodeCache:");
1352 tty->print_cr("nmethod dependency checking time %fs", dependentCheckTime.seconds());
1353
1354 if (!live.is_empty()) {
1355 live.print("live");
1356 }
1357 if (!dead.is_empty()) {
1358 dead.print("dead");
1359 }
1360
1361 if (WizardMode) {
1362 // print the oop_map usage
1363 int code_size = 0;
1364 int number_of_blobs = 0;
1365 int number_of_oop_maps = 0;
1366 int map_size = 0;
1367 FOR_ALL_HEAPS(heap) {
1368 FOR_ALL_BLOBS(cb, *heap) {
1369 if (cb->is_alive()) {
1370 number_of_blobs++;
1371 code_size += cb->code_size();
1372 OopMapSet* set = cb->oop_maps();
1373 if (set != NULL) {
1374 number_of_oop_maps += set->size();
1375 map_size += set->heap_size();
1376 }
1377 }
1378 }
1379 }
1380 tty->print_cr("OopMaps");
1381 tty->print_cr(" #blobs = %d", number_of_blobs);
1382 tty->print_cr(" code size = %d", code_size);
1383 tty->print_cr(" #oop_maps = %d", number_of_oop_maps);
1384 tty->print_cr(" map size = %d", map_size);
1385 }
1386
1387 #endif // !PRODUCT
1388 }
1389
1390 void CodeCache::print_summary(outputStream* st, bool detailed) {
1391 FOR_ALL_HEAPS(heap_iterator) {
1392 CodeHeap* heap = (*heap_iterator);
1393 size_t total = (heap->high_boundary() - heap->low_boundary());
1394 if (SegmentedCodeCache) {
1395 st->print("%s:", heap->name());
1396 } else {
1397 st->print("CodeCache:");
1398 }
1399 st->print_cr(" size=" SIZE_FORMAT "Kb used=" SIZE_FORMAT
1400 "Kb max_used=" SIZE_FORMAT "Kb free=" SIZE_FORMAT "Kb",
1401 total/K, (total - heap->unallocated_capacity())/K,
1402 heap->max_allocated_capacity()/K, heap->unallocated_capacity()/K);
1403
1404 if (detailed) {
1405 st->print_cr(" bounds [" INTPTR_FORMAT ", " INTPTR_FORMAT ", " INTPTR_FORMAT "]",
1406 p2i(heap->low_boundary()),
1407 p2i(heap->high()),
1408 p2i(heap->high_boundary()));
1409 }
1410 }
1411
1412 if (detailed) {
1413 st->print_cr(" total_blobs=" UINT32_FORMAT " nmethods=" UINT32_FORMAT
1414 " adapters=" UINT32_FORMAT,
1415 nof_blobs(), nof_nmethods(), nof_adapters());
1416 st->print_cr(" compilation: %s", CompileBroker::should_compile_new_jobs() ?
1417 "enabled" : Arguments::mode() == Arguments::_int ?
1418 "disabled (interpreter mode)" :
1419 "disabled (not enough contiguous free space left)");
1420 }
1421 }
1422
1423 void CodeCache::print_codelist(outputStream* st) {
1424 assert_locked_or_safepoint(CodeCache_lock);
1425
1426 NMethodIterator iter;
1427 while(iter.next_alive()) {
1428 nmethod* nm = iter.method();
1429 ResourceMark rm;
1430 char *method_name = nm->method()->name_and_sig_as_C_string();
1431 st->print_cr("%d %d %s ["INTPTR_FORMAT", "INTPTR_FORMAT" - "INTPTR_FORMAT"]",
1432 nm->compile_id(), nm->comp_level(), method_name, (intptr_t)nm->header_begin(),
1433 (intptr_t)nm->code_begin(), (intptr_t)nm->code_end());
1434 }
1435 }
1436
1437 void CodeCache::print_layout(outputStream* st) {
1438 assert_locked_or_safepoint(CodeCache_lock);
1439 ResourceMark rm;
1440
1441 print_summary(st, true);
1442 }
1443
1444 void CodeCache::log_state(outputStream* st) {
1445 st->print(" total_blobs='" UINT32_FORMAT "' nmethods='" UINT32_FORMAT "'"
1446 " adapters='" UINT32_FORMAT "' free_code_cache='" SIZE_FORMAT "'",
1447 nof_blobs(), nof_nmethods(), nof_adapters(),
1448 unallocated_capacity());
1449 }
--- EOF ---