57 58 void print(const char* title) { 59 tty->print_cr(" #%d %s = %dK (hdr %d%%, loc %d%%, code %d%%, stub %d%%, [oops %d%%, data %d%%, pcs %d%%])", 60 count, 61 title, 62 total() / K, 63 header_size * 100 / total_size, 64 relocation_size * 100 / total_size, 65 code_size * 100 / total_size, 66 stub_size * 100 / total_size, 67 scopes_oop_size * 100 / total_size, 68 scopes_data_size * 100 / total_size, 69 scopes_pcs_size * 100 / total_size); 70 } 71 72 void add(CodeBlob* cb) { 73 count++; 74 total_size += cb->size(); 75 header_size += cb->header_size(); 76 relocation_size += cb->relocation_size(); 77 scopes_oop_size += cb->oops_size(); 78 if (cb->is_nmethod()) { 79 nmethod *nm = (nmethod*)cb; 80 code_size += nm->code_size(); 81 stub_size += nm->stub_size(); 82 83 scopes_data_size += nm->scopes_data_size(); 84 scopes_pcs_size += nm->scopes_pcs_size(); 85 } else { 86 code_size += cb->instructions_size(); 87 } 88 } 89 }; 90 91 92 // CodeCache implementation 93 94 CodeHeap * CodeCache::_heap = new CodeHeap(); 95 int CodeCache::_number_of_blobs = 0; 96 int CodeCache::_number_of_nmethods_with_dependencies = 0; 97 bool CodeCache::_needs_cache_clean = false; 98 nmethod* CodeCache::_scavenge_root_nmethods = NULL; 99 nmethod* CodeCache::_saved_nmethods = NULL; 100 101 102 CodeBlob* CodeCache::first() { 245 246 247 void CodeCache::nmethods_do(void f(nmethod* nm)) { 248 assert_locked_or_safepoint(CodeCache_lock); 249 FOR_ALL_BLOBS(nm) { 250 if (nm->is_nmethod()) f((nmethod*)nm); 251 } 252 } 253 254 255 int CodeCache::alignment_unit() { 256 return (int)_heap->alignment_unit(); 257 } 258 259 260 int CodeCache::alignment_offset() { 261 return (int)_heap->alignment_offset(); 262 } 263 264 265 // Mark code blobs for unloading if they contain otherwise 266 // unreachable oops. 267 void CodeCache::do_unloading(BoolObjectClosure* is_alive, 268 OopClosure* keep_alive, 269 bool unloading_occurred) { 270 assert_locked_or_safepoint(CodeCache_lock); 271 FOR_ALL_ALIVE_BLOBS(cb) { 272 cb->do_unloading(is_alive, keep_alive, unloading_occurred); 273 } 274 } 275 276 void CodeCache::blobs_do(CodeBlobClosure* f) { 277 assert_locked_or_safepoint(CodeCache_lock); 278 FOR_ALL_ALIVE_BLOBS(cb) { 279 f->do_code_blob(cb); 280 281 #ifdef ASSERT 282 if (cb->is_nmethod()) 283 ((nmethod*)cb)->verify_scavenge_root_oops(); 284 #endif //ASSERT 285 } 286 } 287 288 // Walk the list of methods which might contain non-perm oops. 289 void CodeCache::scavenge_root_nmethods_do(CodeBlobClosure* f) { 290 assert_locked_or_safepoint(CodeCache_lock); 291 debug_only(mark_scavenge_root_nmethods()); 292 492 } 493 nm->method()->clear_code(); 494 nm->set_speculatively_disconnected(true); 495 } 496 497 498 void CodeCache::gc_prologue() { 499 assert(!nmethod::oops_do_marking_is_active(), "oops_do_marking_epilogue must be called"); 500 } 501 502 503 void CodeCache::gc_epilogue() { 504 assert_locked_or_safepoint(CodeCache_lock); 505 FOR_ALL_ALIVE_BLOBS(cb) { 506 if (cb->is_nmethod()) { 507 nmethod *nm = (nmethod*)cb; 508 assert(!nm->is_unloaded(), "Tautology"); 509 if (needs_cache_clean()) { 510 nm->cleanup_inline_caches(); 511 } 512 debug_only(nm->verify();) 513 } 514 cb->fix_oop_relocations(); 515 } 516 set_needs_cache_clean(false); 517 prune_scavenge_root_nmethods(); 518 assert(!nmethod::oops_do_marking_is_active(), "oops_do_marking_prologue must be called"); 519 } 520 521 522 address CodeCache::first_address() { 523 assert_locked_or_safepoint(CodeCache_lock); 524 return (address)_heap->begin(); 525 } 526 527 528 address CodeCache::last_address() { 529 assert_locked_or_safepoint(CodeCache_lock); 530 return (address)_heap->end(); 531 } 532 533 534 void icache_init(); | 57 58 void print(const char* title) { 59 tty->print_cr(" #%d %s = %dK (hdr %d%%, loc %d%%, code %d%%, stub %d%%, [oops %d%%, data %d%%, pcs %d%%])", 60 count, 61 title, 62 total() / K, 63 header_size * 100 / total_size, 64 relocation_size * 100 / total_size, 65 code_size * 100 / total_size, 66 stub_size * 100 / total_size, 67 scopes_oop_size * 100 / total_size, 68 scopes_data_size * 100 / total_size, 69 scopes_pcs_size * 100 / total_size); 70 } 71 72 void add(CodeBlob* cb) { 73 count++; 74 total_size += cb->size(); 75 header_size += cb->header_size(); 76 relocation_size += cb->relocation_size(); 77 if (cb->is_nmethod()) { 78 nmethod* nm = cb->as_nmethod_or_null(); 79 code_size += nm->code_size(); 80 stub_size += nm->stub_size(); 81 82 scopes_oop_size += nm->oops_size(); 83 scopes_data_size += nm->scopes_data_size(); 84 scopes_pcs_size += nm->scopes_pcs_size(); 85 } else { 86 code_size += cb->instructions_size(); 87 } 88 } 89 }; 90 91 92 // CodeCache implementation 93 94 CodeHeap * CodeCache::_heap = new CodeHeap(); 95 int CodeCache::_number_of_blobs = 0; 96 int CodeCache::_number_of_nmethods_with_dependencies = 0; 97 bool CodeCache::_needs_cache_clean = false; 98 nmethod* CodeCache::_scavenge_root_nmethods = NULL; 99 nmethod* CodeCache::_saved_nmethods = NULL; 100 101 102 CodeBlob* CodeCache::first() { 245 246 247 void CodeCache::nmethods_do(void f(nmethod* nm)) { 248 assert_locked_or_safepoint(CodeCache_lock); 249 FOR_ALL_BLOBS(nm) { 250 if (nm->is_nmethod()) f((nmethod*)nm); 251 } 252 } 253 254 255 int CodeCache::alignment_unit() { 256 return (int)_heap->alignment_unit(); 257 } 258 259 260 int CodeCache::alignment_offset() { 261 return (int)_heap->alignment_offset(); 262 } 263 264 265 // Mark nmethods for unloading if they contain otherwise unreachable 266 // oops. 267 void CodeCache::do_unloading(BoolObjectClosure* is_alive, 268 OopClosure* keep_alive, 269 bool unloading_occurred) { 270 assert_locked_or_safepoint(CodeCache_lock); 271 FOR_ALL_ALIVE_NMETHODS(nm) { 272 nm->do_unloading(is_alive, keep_alive, unloading_occurred); 273 } 274 } 275 276 void CodeCache::blobs_do(CodeBlobClosure* f) { 277 assert_locked_or_safepoint(CodeCache_lock); 278 FOR_ALL_ALIVE_BLOBS(cb) { 279 f->do_code_blob(cb); 280 281 #ifdef ASSERT 282 if (cb->is_nmethod()) 283 ((nmethod*)cb)->verify_scavenge_root_oops(); 284 #endif //ASSERT 285 } 286 } 287 288 // Walk the list of methods which might contain non-perm oops. 289 void CodeCache::scavenge_root_nmethods_do(CodeBlobClosure* f) { 290 assert_locked_or_safepoint(CodeCache_lock); 291 debug_only(mark_scavenge_root_nmethods()); 292 492 } 493 nm->method()->clear_code(); 494 nm->set_speculatively_disconnected(true); 495 } 496 497 498 void CodeCache::gc_prologue() { 499 assert(!nmethod::oops_do_marking_is_active(), "oops_do_marking_epilogue must be called"); 500 } 501 502 503 void CodeCache::gc_epilogue() { 504 assert_locked_or_safepoint(CodeCache_lock); 505 FOR_ALL_ALIVE_BLOBS(cb) { 506 if (cb->is_nmethod()) { 507 nmethod *nm = (nmethod*)cb; 508 assert(!nm->is_unloaded(), "Tautology"); 509 if (needs_cache_clean()) { 510 nm->cleanup_inline_caches(); 511 } 512 DEBUG_ONLY(nm->verify()); 513 nm->fix_oop_relocations(); 514 } 515 } 516 set_needs_cache_clean(false); 517 prune_scavenge_root_nmethods(); 518 assert(!nmethod::oops_do_marking_is_active(), "oops_do_marking_prologue must be called"); 519 } 520 521 522 address CodeCache::first_address() { 523 assert_locked_or_safepoint(CodeCache_lock); 524 return (address)_heap->begin(); 525 } 526 527 528 address CodeCache::last_address() { 529 assert_locked_or_safepoint(CodeCache_lock); 530 return (address)_heap->end(); 531 } 532 533 534 void icache_init(); |