110 111 void NMethodSweeper::record_sweep(nmethod* nm, int line) { 112 if (_records != NULL) { 113 _records[_sweep_index].traversal = _traversals; 114 _records[_sweep_index].traversal_mark = nm->_stack_traversal_mark; 115 _records[_sweep_index].invocation = _invocations; 116 _records[_sweep_index].compile_id = nm->compile_id(); 117 _records[_sweep_index].kind = nm->compile_kind(); 118 _records[_sweep_index].state = nm->_state; 119 _records[_sweep_index].vep = nm->verified_entry_point(); 120 _records[_sweep_index].uep = nm->entry_point(); 121 _records[_sweep_index].line = line; 122 123 _sweep_index = (_sweep_index + 1) % SweeperLogEntries; 124 } 125 } 126 #else 127 #define SWEEP(nm) 128 #endif 129 130 131 long NMethodSweeper::_traversals = 0; // No. of stack traversals performed 132 nmethod* NMethodSweeper::_current = NULL; // Current nmethod 133 int NMethodSweeper::_seen = 0 ; // No. of nmethods we have currently processed in current pass of CodeCache 134 int NMethodSweeper::_flushed_count = 0; // Nof. nmethods flushed in current sweep 135 int NMethodSweeper::_zombified_count = 0; // Nof. nmethods made zombie in current sweep 136 int NMethodSweeper::_marked_count = 0; // Nof. nmethods marked for reclaim in current sweep 137 138 volatile int NMethodSweeper::_invocations = 0; // No. of invocations left until we are completed with this pass 139 volatile int NMethodSweeper::_sweep_started = 0; // Whether a sweep is in progress. 140 141 jint NMethodSweeper::_locked_seen = 0; 142 jint NMethodSweeper::_not_entrant_seen_on_stack = 0; 143 bool NMethodSweeper::_resweep = false; 144 jint NMethodSweeper::_flush_token = 0; 145 jlong NMethodSweeper::_last_full_flush_time = 0; 146 int NMethodSweeper::_highest_marked = 0; 147 int NMethodSweeper::_dead_compile_ids = 0; 148 long NMethodSweeper::_last_flush_traversal_id = 0; 149 150 int NMethodSweeper::_number_of_flushes = 0; // Total of full traversals caused by full cache 151 int NMethodSweeper::_total_nof_methods_reclaimed = 0; 152 jlong NMethodSweeper::_total_time_sweeping = 0; 153 jlong NMethodSweeper::_total_time_this_sweep = 0; 154 jlong NMethodSweeper::_peak_sweep_time = 0; 155 jlong NMethodSweeper::_peak_sweep_fraction_time = 0; 156 jlong NMethodSweeper::_total_disconnect_time = 0; 157 jlong NMethodSweeper::_peak_disconnect_time = 0; 158 159 class MarkActivationClosure: public CodeBlobClosure { 160 public: 161 virtual void do_code_blob(CodeBlob* cb) { 162 // If we see an activation belonging to a non_entrant nmethod, we mark it. 163 if (cb->is_nmethod() && ((nmethod*)cb)->is_not_entrant()) { 164 ((nmethod*)cb)->mark_as_seen_on_stack(); 165 } 166 } 167 }; 168 static MarkActivationClosure mark_activation_closure; 169 170 bool NMethodSweeper::sweep_in_progress() { 171 return (_current != NULL); 172 } 173 174 void NMethodSweeper::scan_stacks() { 175 assert(SafepointSynchronize::is_at_safepoint(), "must be executed at a safepoint"); 176 if (!MethodFlushing) return; 177 178 // No need to synchronize access, since this is always executed at a 179 // safepoint. 180 181 // Make sure CompiledIC_lock in unlocked, since we might update some 182 // inline caches. If it is, we just bail-out and try later. 183 if (CompiledIC_lock->is_locked() || Patching_lock->is_locked()) return; 184 185 // Check for restart 186 assert(CodeCache::find_blob_unsafe(_current) == _current, "Sweeper nmethod cached state invalid"); 187 if (!sweep_in_progress() && _resweep) { 188 _seen = 0; 189 _invocations = NmethodSweepFraction; 190 _current = CodeCache::first_nmethod(); 191 _traversals += 1; 192 _total_time_this_sweep = 0; 193 194 if (PrintMethodFlushing) { 195 tty->print_cr("### Sweep: stack traversal %d", _traversals); 196 } 197 Threads::nmethods_do(&mark_activation_closure); 198 199 // reset the flags since we started a scan from the beginning. 200 _resweep = false; 201 _locked_seen = 0; 202 _not_entrant_seen_on_stack = 0; 203 } 204 205 if (UseCodeCacheFlushing) { 206 // only allow new flushes after the interval is complete. 207 jlong now = os::javaTimeMillis(); 208 jlong max_interval = (jlong)MinCodeCacheFlushingInterval * (jlong)1000; 209 jlong curr_interval = now - _last_full_flush_time; 210 if (curr_interval > max_interval) { 211 _flush_token = 0; 212 } 213 214 if (!CodeCache::needs_flushing() && !CompileBroker::should_compile_new_jobs()) { 215 CompileBroker::set_should_compile_new_jobs(CompileBroker::run_compilation); 216 log_sweep("restart_compiler"); 217 } 218 } 219 } 220 221 void NMethodSweeper::possibly_sweep() { 222 assert(JavaThread::current()->thread_state() == _thread_in_vm, "must run in vm mode"); 223 if (!MethodFlushing || !sweep_in_progress()) return; 224 225 if (_invocations > 0) { 226 // Only one thread at a time will sweep 227 jint old = Atomic::cmpxchg( 1, &_sweep_started, 0 ); 228 if (old != 0) { 229 return; 230 } 231 #ifdef ASSERT 232 if (LogSweeper && _records == NULL) { 233 // Create the ring buffer for the logging code 234 _records = NEW_C_HEAP_ARRAY(SweeperRecord, SweeperLogEntries, mtGC); 235 memset(_records, 0, sizeof(SweeperRecord) * SweeperLogEntries); 236 } 237 #endif 238 if (_invocations > 0) { 239 sweep_code_cache(); 240 _invocations--; 241 } 242 _sweep_started = 0; 243 } 244 } 245 246 void NMethodSweeper::sweep_code_cache() { 247 248 jlong sweep_start_counter = os::elapsed_counter(); 249 250 _flushed_count = 0; 251 _zombified_count = 0; 252 _marked_count = 0; 253 254 if (PrintMethodFlushing && Verbose) { 255 tty->print_cr("### Sweep at %d out of %d. Invocations left: %d", _seen, CodeCache::nof_nmethods(), _invocations); 256 } 257 258 if (!CompileBroker::should_compile_new_jobs()) { 259 // If we have turned off compilations we might as well do full sweeps 260 // in order to reach the clean state faster. Otherwise the sleeping compiler 261 // threads will slow down sweeping. After a few iterations the cache 262 // will be clean and sweeping stops (_resweep will not be set) 263 _invocations = 1; 264 } 265 266 // We want to visit all nmethods after NmethodSweepFraction 267 // invocations so divide the remaining number of nmethods by the 268 // remaining number of invocations. This is only an estimate since 269 // the number of nmethods changes during the sweep so the final 270 // stage must iterate until it there are no more nmethods. 271 int todo = (CodeCache::nof_nmethods() - _seen) / _invocations; 272 273 assert(!SafepointSynchronize::is_at_safepoint(), "should not be in safepoint when we get here"); 274 assert(!CodeCache_lock->owned_by_self(), "just checking"); 275 276 { 277 MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag); 278 279 // The last invocation iterates until there are no more nmethods 280 for (int i = 0; (i < todo || _invocations == 1) && _current != NULL; i++) { 281 if (SafepointSynchronize::is_synchronizing()) { // Safepoint request 282 if (PrintMethodFlushing && Verbose) { 283 tty->print_cr("### Sweep at %d out of %d, invocation: %d, yielding to safepoint", _seen, CodeCache::nof_nmethods(), _invocations); 284 } 285 MutexUnlockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag); 286 287 assert(Thread::current()->is_Java_thread(), "should be java thread"); 288 JavaThread* thread = (JavaThread*)Thread::current(); 289 ThreadBlockInVM tbivm(thread); 290 thread->java_suspend_self(); 291 } 292 // Since we will give up the CodeCache_lock, always skip ahead 293 // to the next nmethod. Other blobs can be deleted by other 294 // threads but nmethods are only reclaimed by the sweeper. 295 nmethod* next = CodeCache::next_nmethod(_current); 296 297 // Now ready to process nmethod and give up CodeCache_lock 298 { 299 MutexUnlockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag); 300 process_nmethod(_current); 301 } 302 _seen++; 303 _current = next; 304 } 305 } 306 307 assert(_invocations > 1 || _current == NULL, "must have scanned the whole cache"); 308 309 if (!sweep_in_progress() && !_resweep && (_locked_seen || _not_entrant_seen_on_stack)) { 310 // we've completed a scan without making progress but there were 311 // nmethods we were unable to process either because they were 312 // locked or were still on stack. We don't have to aggresively 313 // clean them up so just stop scanning. We could scan once more 314 // but that complicates the control logic and it's unlikely to 315 // matter much. 316 if (PrintMethodFlushing) { 317 tty->print_cr("### Couldn't make progress on some nmethods so stopping sweep"); 318 } 319 } 320 321 jlong sweep_end_counter = os::elapsed_counter(); 322 jlong sweep_time = sweep_end_counter - sweep_start_counter; 323 _total_time_sweeping += sweep_time; 324 _total_time_this_sweep += sweep_time; 325 _peak_sweep_fraction_time = MAX2(sweep_time, _peak_sweep_fraction_time); 326 _total_nof_methods_reclaimed += _flushed_count; 327 328 EventSweepCodeCache event(UNTIMED); 329 if (event.should_commit()) { 330 event.set_starttime(sweep_start_counter); 331 event.set_endtime(sweep_end_counter); 332 event.set_sweepIndex(_traversals); 333 event.set_sweepFractionIndex(NmethodSweepFraction - _invocations + 1); 334 event.set_sweptCount(todo); 335 event.set_flushedCount(_flushed_count); 336 event.set_markedCount(_marked_count); 337 event.set_zombifiedCount(_zombified_count); 338 event.commit(); 339 } 340 341 #ifdef ASSERT 342 if(PrintMethodFlushing) { 343 tty->print_cr("### sweeper: sweep time(%d): " INT64_FORMAT, _invocations, (jlong)sweep_time); 344 } 345 #endif 346 347 if (_invocations == 1) { 348 _peak_sweep_time = MAX2(_peak_sweep_time, _total_time_this_sweep); 349 log_sweep("finished"); 350 } 351 352 // Sweeper is the only case where memory is released, 353 // check here if it is time to restart the compiler. 354 if (UseCodeCacheFlushing && !CompileBroker::should_compile_new_jobs() && !CodeCache::needs_flushing()) { 355 CompileBroker::set_should_compile_new_jobs(CompileBroker::run_compilation); 356 log_sweep("restart_compiler"); 357 } 358 } 359 360 class NMethodMarker: public StackObj { 361 private: 362 CompilerThread* _thread; 363 public: 364 NMethodMarker(nmethod* nm) { 365 _thread = CompilerThread::current(); 366 if (!nm->is_zombie() && !nm->is_unloaded()) { 367 // Only expose live nmethods for scanning 368 _thread->set_scanned_nmethod(nm); 369 } 370 } 371 ~NMethodMarker() { 372 _thread->set_scanned_nmethod(NULL); 373 } 374 }; 375 376 void NMethodSweeper::release_nmethod(nmethod *nm) { 377 // Clean up any CompiledICHolders 378 { 379 ResourceMark rm; 380 MutexLocker ml_patch(CompiledIC_lock); 381 RelocIterator iter(nm); 382 while (iter.next()) { 383 if (iter.type() == relocInfo::virtual_call_type) { 384 CompiledIC::cleanup_call_site(iter.virtual_call_reloc()); 385 } 386 } 387 } 388 389 MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag); 390 nm->flush(); 391 } 392 393 void NMethodSweeper::process_nmethod(nmethod *nm) { 394 assert(!CodeCache_lock->owned_by_self(), "just checking"); 395 396 // Make sure this nmethod doesn't get unloaded during the scan, 397 // since the locks acquired below might safepoint. 398 NMethodMarker nmm(nm); 399 400 SWEEP(nm); 401 402 // Skip methods that are currently referenced by the VM 403 if (nm->is_locked_by_vm()) { 404 // But still remember to clean-up inline caches for alive nmethods 405 if (nm->is_alive()) { 406 // Clean-up all inline caches that points to zombie/non-reentrant methods 407 MutexLocker cl(CompiledIC_lock); 408 nm->cleanup_inline_caches(); 409 SWEEP(nm); 410 } else { 411 _locked_seen++; 412 SWEEP(nm); 413 } 414 return; 415 } 416 417 if (nm->is_zombie()) { 418 // If it is first time, we see nmethod then we mark it. Otherwise, 419 // we reclame it. When we have seen a zombie method twice, we know that 420 // there are no inline caches that refer to it. 421 if (nm->is_marked_for_reclamation()) { 422 assert(!nm->is_locked_by_vm(), "must not flush locked nmethods"); 423 if (PrintMethodFlushing && Verbose) { 424 tty->print_cr("### Nmethod %3d/" PTR_FORMAT " (marked for reclamation) being flushed", nm->compile_id(), nm); 425 } 426 release_nmethod(nm); 427 _flushed_count++; 428 } else { 429 if (PrintMethodFlushing && Verbose) { 430 tty->print_cr("### Nmethod %3d/" PTR_FORMAT " (zombie) being marked for reclamation", nm->compile_id(), nm); 431 } 432 nm->mark_for_reclamation(); 433 _resweep = true; 434 _marked_count++; 435 SWEEP(nm); 436 } 437 } else if (nm->is_not_entrant()) { 438 // If there is no current activations of this method on the 439 // stack we can safely convert it to a zombie method 440 if (nm->can_not_entrant_be_converted()) { 441 if (PrintMethodFlushing && Verbose) { 442 tty->print_cr("### Nmethod %3d/" PTR_FORMAT " (not entrant) being made zombie", nm->compile_id(), nm); 443 } 444 nm->make_zombie(); 445 _resweep = true; 446 _zombified_count++; 447 SWEEP(nm); 448 } else { 449 // Still alive, clean up its inline caches 450 MutexLocker cl(CompiledIC_lock); 451 nm->cleanup_inline_caches(); 452 // we coudn't transition this nmethod so don't immediately 453 // request a rescan. If this method stays on the stack for a 454 // long time we don't want to keep rescanning the code cache. 455 _not_entrant_seen_on_stack++; 456 SWEEP(nm); 457 } 458 } else if (nm->is_unloaded()) { 459 // Unloaded code, just make it a zombie 460 if (PrintMethodFlushing && Verbose) 461 tty->print_cr("### Nmethod %3d/" PTR_FORMAT " (unloaded) being made zombie", nm->compile_id(), nm); 462 463 if (nm->is_osr_method()) { 464 SWEEP(nm); 465 // No inline caches will ever point to osr methods, so we can just remove it 466 release_nmethod(nm); 467 _flushed_count++; 468 } else { 469 nm->make_zombie(); 470 _resweep = true; 471 _zombified_count++; 472 SWEEP(nm); 473 } 474 } else { 475 assert(nm->is_alive(), "should be alive"); 476 477 if (UseCodeCacheFlushing) { 478 if (nm->is_speculatively_disconnected() && !nm->is_locked_by_vm() && !nm->is_osr_method() && 479 (_traversals > _last_flush_traversal_id + 2) && (nm->compile_id() < _highest_marked)) { 480 // This method has not been called since the forced cleanup happened 481 nm->make_not_entrant(); 482 } 483 } 484 485 // Clean-up all inline caches that points to zombie/non-reentrant methods 486 MutexLocker cl(CompiledIC_lock); 487 nm->cleanup_inline_caches(); 488 SWEEP(nm); 489 } 490 } 491 492 // Code cache unloading: when compilers notice the code cache is getting full, 493 // they will call a vm op that comes here. This code attempts to speculatively 494 // unload the oldest half of the nmethods (based on the compile job id) by 495 // saving the old code in a list in the CodeCache. Then 496 // execution resumes. If a method so marked is not called by the second sweeper 497 // stack traversal after the current one, the nmethod will be marked non-entrant and 498 // got rid of by normal sweeping. If the method is called, the Method*'s 499 // _code field is restored and the Method*/nmethod 500 // go back to their normal state. 501 void NMethodSweeper::handle_full_code_cache(bool is_full) { 502 503 if (is_full) { 504 // Since code cache is full, immediately stop new compiles 505 if (CompileBroker::set_should_compile_new_jobs(CompileBroker::stop_compilation)) { 506 log_sweep("disable_compiler"); 507 } 508 } 509 510 // Make sure only one thread can flush 511 // The token is reset after CodeCacheMinimumFlushInterval in scan stacks, 512 // no need to check the timeout here. 513 jint old = Atomic::cmpxchg( 1, &_flush_token, 0 ); 514 if (old != 0) { 515 return; 516 } 517 518 VM_HandleFullCodeCache op(is_full); 519 VMThread::execute(&op); 520 521 // resweep again as soon as possible 522 _resweep = true; 523 } 524 525 void NMethodSweeper::speculative_disconnect_nmethods(bool is_full) { 526 // If there was a race in detecting full code cache, only run 527 // one vm op for it or keep the compiler shut off 528 529 jlong disconnect_start_counter = os::elapsed_counter(); 530 531 // Traverse the code cache trying to dump the oldest nmethods 532 int curr_max_comp_id = CompileBroker::get_compilation_id(); 533 int flush_target = ((curr_max_comp_id - _dead_compile_ids) / CodeCacheFlushingFraction) + _dead_compile_ids; 534 535 log_sweep("start_cleaning"); 536 537 nmethod* nm = CodeCache::alive_nmethod(CodeCache::first()); 538 jint disconnected = 0; 539 jint made_not_entrant = 0; 540 jint nmethod_count = 0; 541 542 while ((nm != NULL)){ 543 int curr_comp_id = nm->compile_id(); 544 545 // OSR methods cannot be flushed like this. Also, don't flush native methods 546 // since they are part of the JDK in most cases 547 if (!nm->is_osr_method() && !nm->is_locked_by_vm() && !nm->is_native_method()) { 548 549 // only count methods that can be speculatively disconnected 550 nmethod_count++; 551 552 if (nm->is_in_use() && (curr_comp_id < flush_target)) { 553 if ((nm->method()->code() == nm)) { 554 // This method has not been previously considered for 555 // unloading or it was restored already 556 CodeCache::speculatively_disconnect(nm); 557 disconnected++; 558 } else if (nm->is_speculatively_disconnected()) { 559 // This method was previously considered for preemptive unloading and was not called since then 560 CompilationPolicy::policy()->delay_compilation(nm->method()); 561 nm->make_not_entrant(); 562 made_not_entrant++; 563 } 564 565 if (curr_comp_id > _highest_marked) { 566 _highest_marked = curr_comp_id; 567 } 568 } 569 } 570 nm = CodeCache::alive_nmethod(CodeCache::next(nm)); 571 } 572 573 // remember how many compile_ids wheren't seen last flush. 574 _dead_compile_ids = curr_max_comp_id - nmethod_count; 575 576 log_sweep("stop_cleaning", 577 "disconnected='" UINT32_FORMAT "' made_not_entrant='" UINT32_FORMAT "'", 578 disconnected, made_not_entrant); 579 580 // Shut off compiler. Sweeper will start over with a new stack scan and 581 // traversal cycle and turn it back on if it clears enough space. 582 if (is_full) { 583 _last_full_flush_time = os::javaTimeMillis(); 584 } 585 586 jlong disconnect_end_counter = os::elapsed_counter(); 587 jlong disconnect_time = disconnect_end_counter - disconnect_start_counter; 588 _total_disconnect_time += disconnect_time; 589 _peak_disconnect_time = MAX2(disconnect_time, _peak_disconnect_time); 590 591 EventCleanCodeCache event(UNTIMED); 592 if (event.should_commit()) { 593 event.set_starttime(disconnect_start_counter); 594 event.set_endtime(disconnect_end_counter); 595 event.set_disconnectedCount(disconnected); 596 event.set_madeNonEntrantCount(made_not_entrant); 597 event.commit(); 598 } 599 _number_of_flushes++; 600 601 // After two more traversals the sweeper will get rid of unrestored nmethods 602 _last_flush_traversal_id = _traversals; 603 _resweep = true; 604 #ifdef ASSERT 605 606 if(PrintMethodFlushing && Verbose) { 607 tty->print_cr("### sweeper: unload time: " INT64_FORMAT, (jlong)disconnect_time); 608 } 609 #endif 610 } 611 612 613 // Print out some state information about the current sweep and the 614 // state of the code cache if it's requested. 615 void NMethodSweeper::log_sweep(const char* msg, const char* format, ...) { 616 if (PrintMethodFlushing) { 617 stringStream s; 618 // Dump code cache state into a buffer before locking the tty, 619 // because log_state() will use locks causing lock conflicts. 620 CodeCache::log_state(&s); 621 622 ttyLocker ttyl; 623 tty->print("### sweeper: %s ", msg); 624 if (format != NULL) { 625 va_list ap; 626 va_start(ap, format); 627 tty->vprint(format, ap); 628 va_end(ap); 629 } 630 tty->print_cr(s.as_string()); 631 } | 110 111 void NMethodSweeper::record_sweep(nmethod* nm, int line) { 112 if (_records != NULL) { 113 _records[_sweep_index].traversal = _traversals; 114 _records[_sweep_index].traversal_mark = nm->_stack_traversal_mark; 115 _records[_sweep_index].invocation = _invocations; 116 _records[_sweep_index].compile_id = nm->compile_id(); 117 _records[_sweep_index].kind = nm->compile_kind(); 118 _records[_sweep_index].state = nm->_state; 119 _records[_sweep_index].vep = nm->verified_entry_point(); 120 _records[_sweep_index].uep = nm->entry_point(); 121 _records[_sweep_index].line = line; 122 123 _sweep_index = (_sweep_index + 1) % SweeperLogEntries; 124 } 125 } 126 #else 127 #define SWEEP(nm) 128 #endif 129 130 nmethod* NMethodSweeper::_current = NULL; // Current nmethod 131 long NMethodSweeper::_traversals = 0; // Nof. stack traversals performed 132 int NMethodSweeper::_seen = 0; // Nof. nmethods we have currently processed in current pass of CodeCache 133 int NMethodSweeper::_flushed_count = 0; // Nof. nmethods flushed in current sweep 134 int NMethodSweeper::_zombified_count = 0; // Nof. nmethods made zombie in current sweep 135 int NMethodSweeper::_marked_count = 0; // Nof. nmethods marked for reclaim in current sweep 136 137 volatile int NMethodSweeper::_invocations = 0; // Nof. invocations left until we are completed with this pass 138 volatile int NMethodSweeper::_sweep_started = 0; // Whether a sweep is in progress. 139 140 jint NMethodSweeper::_locked_seen = 0; 141 jint NMethodSweeper::_not_entrant_seen_on_stack = 0; 142 bool NMethodSweeper::_request_mark_phase = false; 143 144 int NMethodSweeper::_total_nof_methods_reclaimed = 0; 145 jlong NMethodSweeper::_total_time_sweeping = 0; 146 jlong NMethodSweeper::_total_time_this_sweep = 0; 147 jlong NMethodSweeper::_peak_sweep_time = 0; 148 jlong NMethodSweeper::_peak_sweep_fraction_time = 0; 149 int NMethodSweeper::_hotness_counter_reset_val = 0; 150 151 152 class MarkActivationClosure: public CodeBlobClosure { 153 public: 154 virtual void do_code_blob(CodeBlob* cb) { 155 if (cb->is_nmethod()) { 156 nmethod* nm = (nmethod*)cb; 157 nm->set_hotness_counter(NMethodSweeper::hotness_counter_reset_val()); 158 // If we see an activation belonging to a non_entrant nmethod, we mark it. 159 if (nm->is_not_entrant()) { 160 nm->mark_as_seen_on_stack(); 161 } 162 } 163 } 164 }; 165 static MarkActivationClosure mark_activation_closure; 166 167 class SetHotnessClosure: public CodeBlobClosure { 168 public: 169 virtual void do_code_blob(CodeBlob* cb) { 170 if (cb->is_nmethod()) { 171 nmethod* nm = (nmethod*)cb; 172 nm->set_hotness_counter(NMethodSweeper::hotness_counter_reset_val()); 173 } 174 } 175 }; 176 static SetHotnessClosure set_hotness_closure; 177 178 179 int NMethodSweeper::hotness_counter_reset_val() { 180 if (_hotness_counter_reset_val == 0) { 181 _hotness_counter_reset_val = (ReservedCodeCacheSize < M) ? 1 : (ReservedCodeCacheSize / M) * 2; 182 } 183 return _hotness_counter_reset_val; 184 } 185 bool NMethodSweeper::sweep_in_progress() { 186 return (_current != NULL); 187 } 188 189 // Scans the stacks of all Java threads and marks activations of not-entrant methods. 190 // No need to synchronize access, since 'mark_active_nmethods' is always executed at a 191 // safepoint. 192 void NMethodSweeper::mark_active_nmethods() { 193 assert(SafepointSynchronize::is_at_safepoint(), "must be executed at a safepoint"); 194 // If we do not want to reclaim not-entrant or zombie methods there is no need 195 // to scan stacks 196 if (!MethodFlushing) { 197 return; 198 } 199 200 // Check for restart 201 assert(CodeCache::find_blob_unsafe(_current) == _current, "Sweeper nmethod cached state invalid"); 202 if (!sweep_in_progress() && need_marking_phase()) { 203 _seen = 0; 204 _invocations = NmethodSweepFraction; 205 _current = CodeCache::first_nmethod(); 206 _traversals += 1; 207 _total_time_this_sweep = 0; 208 209 if (PrintMethodFlushing) { 210 tty->print_cr("### Sweep: stack traversal %d", _traversals); 211 } 212 Threads::nmethods_do(&mark_activation_closure); 213 214 // reset the flags since we started a scan from the beginning. 215 reset_nmethod_marking(); 216 _locked_seen = 0; 217 _not_entrant_seen_on_stack = 0; 218 } else { 219 // Only set hotness counter 220 Threads::nmethods_do(&set_hotness_closure); 221 } 222 } 223 224 void NMethodSweeper::possibly_sweep() { 225 assert(JavaThread::current()->thread_state() == _thread_in_vm, "must run in vm mode"); 226 if (!MethodFlushing || !sweep_in_progress()) { 227 return; 228 } 229 230 if (_invocations > 0) { 231 // Only one thread at a time will sweep 232 jint old = Atomic::cmpxchg( 1, &_sweep_started, 0 ); 233 if (old != 0) { 234 return; 235 } 236 #ifdef ASSERT 237 if (LogSweeper && _records == NULL) { 238 // Create the ring buffer for the logging code 239 _records = NEW_C_HEAP_ARRAY(SweeperRecord, SweeperLogEntries, mtGC); 240 memset(_records, 0, sizeof(SweeperRecord) * SweeperLogEntries); 241 } 242 #endif 243 if (_invocations > 0) { 244 sweep_code_cache(); 245 _invocations--; 246 } 247 _sweep_started = 0; 248 } 249 } 250 251 void NMethodSweeper::sweep_code_cache() { 252 253 jlong sweep_start_counter = os::elapsed_counter(); 254 255 _flushed_count = 0; 256 _zombified_count = 0; 257 _marked_count = 0; 258 259 if (PrintMethodFlushing && Verbose) { 260 tty->print_cr("### Sweep at %d out of %d. Invocations left: %d", _seen, CodeCache::nof_nmethods(), _invocations); 261 } 262 263 if (!CompileBroker::should_compile_new_jobs()) { 264 // If we have turned off compilations we might as well do full sweeps 265 // in order to reach the clean state faster. Otherwise the sleeping compiler 266 // threads will slow down sweeping. 267 _invocations = 1; 268 } 269 270 // We want to visit all nmethods after NmethodSweepFraction 271 // invocations so divide the remaining number of nmethods by the 272 // remaining number of invocations. This is only an estimate since 273 // the number of nmethods changes during the sweep so the final 274 // stage must iterate until it there are no more nmethods. 275 int todo = (CodeCache::nof_nmethods() - _seen) / _invocations; 276 assert(!SafepointSynchronize::is_at_safepoint(), "should not be in safepoint when we get here"); 277 assert(!CodeCache_lock->owned_by_self(), "just checking"); 278 int freed_memory = 0; 279 { 280 MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag); 281 282 // The last invocation iterates until there are no more nmethods 283 for (int i = 0; (i < todo || _invocations == 1) && _current != NULL; i++) { 284 if (SafepointSynchronize::is_synchronizing()) { // Safepoint request 285 if (PrintMethodFlushing && Verbose) { 286 tty->print_cr("### Sweep at %d out of %d, invocation: %d, yielding to safepoint", _seen, CodeCache::nof_nmethods(), _invocations); 287 } 288 MutexUnlockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag); 289 290 assert(Thread::current()->is_Java_thread(), "should be java thread"); 291 JavaThread* thread = (JavaThread*)Thread::current(); 292 ThreadBlockInVM tbivm(thread); 293 thread->java_suspend_self(); 294 } 295 // Since we will give up the CodeCache_lock, always skip ahead 296 // to the next nmethod. Other blobs can be deleted by other 297 // threads but nmethods are only reclaimed by the sweeper. 298 nmethod* next = CodeCache::next_nmethod(_current); 299 300 // Now ready to process nmethod and give up CodeCache_lock 301 { 302 MutexUnlockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag); 303 freed_memory += process_nmethod(_current); 304 } 305 _seen++; 306 _current = next; 307 } 308 } 309 310 assert(_invocations > 1 || _current == NULL, "must have scanned the whole cache"); 311 312 if (!sweep_in_progress() && !need_marking_phase() && (_locked_seen || _not_entrant_seen_on_stack)) { 313 // we've completed a scan without making progress but there were 314 // nmethods we were unable to process either because they were 315 // locked or were still on stack. We don't have to aggressively 316 // clean them up so just stop scanning. We could scan once more 317 // but that complicates the control logic and it's unlikely to 318 // matter much. 319 if (PrintMethodFlushing) { 320 tty->print_cr("### Couldn't make progress on some nmethods so stopping sweep"); 321 } 322 } 323 324 jlong sweep_end_counter = os::elapsed_counter(); 325 jlong sweep_time = sweep_end_counter - sweep_start_counter; 326 _total_time_sweeping += sweep_time; 327 _total_time_this_sweep += sweep_time; 328 _peak_sweep_fraction_time = MAX2(sweep_time, _peak_sweep_fraction_time); 329 _total_nof_methods_reclaimed += _flushed_count; 330 331 EventSweepCodeCache event(UNTIMED); 332 if (event.should_commit()) { 333 event.set_starttime(sweep_start_counter); 334 event.set_endtime(sweep_end_counter); 335 event.set_sweepIndex(_traversals); 336 event.set_sweepFractionIndex(NmethodSweepFraction - _invocations + 1); 337 event.set_sweptCount(todo); 338 event.set_flushedCount(_flushed_count); 339 event.set_markedCount(_marked_count); 340 event.set_zombifiedCount(_zombified_count); 341 event.commit(); 342 } 343 344 #ifdef ASSERT 345 if(PrintMethodFlushing) { 346 tty->print_cr("### sweeper: sweep time(%d): " INT64_FORMAT, _invocations, (jlong)sweep_time); 347 } 348 #endif 349 350 if (_invocations == 1) { 351 _peak_sweep_time = MAX2(_peak_sweep_time, _total_time_this_sweep); 352 log_sweep("finished"); 353 } 354 355 // Sweeper is the only case where memory is released, check here if it 356 // is time to restart the compiler. Only checking if there is a certain 357 // amount of free memory in the code cache might lead to re-enabling 358 // compilation although no memory has been released. For example, there are 359 // cases when compilation was disabled although there is 4MB (or more) free 360 // memory in the code cache. The reason is code cache fragmentation. Therefore, 361 // it only makes sense to re-enable compilation if we have actually freed memory. 362 // Note that typically several kB are released for sweeping 16MB of the code 363 // cache. As a result, 'freed_memory' > 0 to restart the compiler. 364 if (UseCodeCacheFlushing && (!CompileBroker::should_compile_new_jobs() && (freed_memory > 0))) { 365 CompileBroker::set_should_compile_new_jobs(CompileBroker::run_compilation); 366 log_sweep("restart_compiler"); 367 } 368 } 369 370 class NMethodMarker: public StackObj { 371 private: 372 CompilerThread* _thread; 373 public: 374 NMethodMarker(nmethod* nm) { 375 _thread = CompilerThread::current(); 376 if (!nm->is_zombie() && !nm->is_unloaded()) { 377 // Only expose live nmethods for scanning 378 _thread->set_scanned_nmethod(nm); 379 } 380 } 381 ~NMethodMarker() { 382 _thread->set_scanned_nmethod(NULL); 383 } 384 }; 385 386 void NMethodSweeper::release_nmethod(nmethod *nm) { 387 // Clean up any CompiledICHolders 388 { 389 ResourceMark rm; 390 MutexLocker ml_patch(CompiledIC_lock); 391 RelocIterator iter(nm); 392 while (iter.next()) { 393 if (iter.type() == relocInfo::virtual_call_type) { 394 CompiledIC::cleanup_call_site(iter.virtual_call_reloc()); 395 } 396 } 397 } 398 399 MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag); 400 nm->flush(); 401 } 402 403 int NMethodSweeper::process_nmethod(nmethod *nm) { 404 assert(!CodeCache_lock->owned_by_self(), "just checking"); 405 406 int freed_memory = 0; 407 // Make sure this nmethod doesn't get unloaded during the scan, 408 // since safepoints may happen during acquired below locks. 409 NMethodMarker nmm(nm); 410 SWEEP(nm); 411 412 // Skip methods that are currently referenced by the VM 413 if (nm->is_locked_by_vm()) { 414 // But still remember to clean-up inline caches for alive nmethods 415 if (nm->is_alive()) { 416 // Clean inline caches that point to zombie/non-entrant methods 417 MutexLocker cl(CompiledIC_lock); 418 nm->cleanup_inline_caches(); 419 SWEEP(nm); 420 } else { 421 _locked_seen++; 422 SWEEP(nm); 423 } 424 return freed_memory; 425 } 426 427 if (nm->is_zombie()) { 428 // If it is the first time we see nmethod then we mark it. Otherwise, 429 // we reclaim it. When we have seen a zombie method twice, we know that 430 // there are no inline caches that refer to it. 431 if (nm->is_marked_for_reclamation()) { 432 assert(!nm->is_locked_by_vm(), "must not flush locked nmethods"); 433 if (PrintMethodFlushing && Verbose) { 434 tty->print_cr("### Nmethod %3d/" PTR_FORMAT " (marked for reclamation) being flushed", nm->compile_id(), nm); 435 } 436 freed_memory = nm->total_size(); 437 release_nmethod(nm); 438 _flushed_count++; 439 } else { 440 if (PrintMethodFlushing && Verbose) { 441 tty->print_cr("### Nmethod %3d/" PTR_FORMAT " (zombie) being marked for reclamation", nm->compile_id(), nm); 442 } 443 nm->mark_for_reclamation(); 444 request_nmethod_marking(); 445 _marked_count++; 446 SWEEP(nm); 447 } 448 } else if (nm->is_not_entrant()) { 449 // If there are no current activations of this method on the 450 // stack we can safely convert it to a zombie method 451 if (nm->can_not_entrant_be_converted()) { 452 if (PrintMethodFlushing && Verbose) { 453 tty->print_cr("### Nmethod %3d/" PTR_FORMAT " (not entrant) being made zombie", nm->compile_id(), nm); 454 } 455 nm->make_zombie(); 456 request_nmethod_marking(); 457 _zombified_count++; 458 SWEEP(nm); 459 } else { 460 // Still alive, clean up its inline caches 461 MutexLocker cl(CompiledIC_lock); 462 nm->cleanup_inline_caches(); 463 // we coudn't transition this nmethod so don't immediately 464 // request a rescan. If this method stays on the stack for a 465 // long time we don't want to keep rescanning the code cache. 466 _not_entrant_seen_on_stack++; 467 SWEEP(nm); 468 } 469 } else if (nm->is_unloaded()) { 470 // Unloaded code, just make it a zombie 471 if (PrintMethodFlushing && Verbose) { 472 tty->print_cr("### Nmethod %3d/" PTR_FORMAT " (unloaded) being made zombie", nm->compile_id(), nm); 473 } 474 if (nm->is_osr_method()) { 475 SWEEP(nm); 476 // No inline caches will ever point to osr methods, so we can just remove it 477 freed_memory = nm->total_size(); 478 release_nmethod(nm); 479 _flushed_count++; 480 } else { 481 nm->make_zombie(); 482 request_nmethod_marking(); 483 _zombified_count++; 484 SWEEP(nm); 485 } 486 } else { 487 if (UseCodeCacheFlushing) { 488 if (!nm->is_locked_by_vm() && !nm->is_osr_method() && !nm->is_native_method()) { 489 // Do not make native methods and OSR-methods not-entrant 490 nm->dec_hotness_counter(); 491 // Get the initial value of the hotness counter. This value depends on the 492 // ReservedCodeCacheSize 493 int reset_val = hotness_counter_reset_val(); 494 int time_since_reset = reset_val - nm->hotness_counter(); 495 double threshold = -reset_val + (CodeCache::reverse_free_ratio() * NmethodSweepActivity); 496 // The less free space in the code cache we have - the bigger reverse_free_ratio() is. 497 // I.e., 'threshold' increases with lower available space in the code cache and a higher 498 // NmethodSweepActivity. If the current hotness counter - which decreases from its initial 499 // value until it is reset by stack walking - is smaller than the computed threshold, the 500 // corresponding nmethod is considered for removal. 501 if ((NmethodSweepActivity > 0) && (nm->hotness_counter() < threshold) && (time_since_reset > 10)) { 502 // A method is marked as not-entrant if the method is 503 // 1) 'old enough': nm->hotness_counter() < threshold 504 // 2) The method was in_use for a minimum amount of time: (time_since_reset > 10) 505 // The second condition is necessary if we are dealing with very small code cache 506 // sizes (e.g., <10m) and the code cache size is too small to hold all hot methods. 507 // The second condition ensures that methods are not immediately made not-entrant 508 // after compilation. 509 nm->make_not_entrant(); 510 request_nmethod_marking(); 511 } 512 } 513 } 514 // Clean-up all inline caches that point to zombie/non-reentrant methods 515 MutexLocker cl(CompiledIC_lock); 516 nm->cleanup_inline_caches(); 517 SWEEP(nm); 518 } 519 return freed_memory; 520 } 521 522 // Print out some state information about the current sweep and the 523 // state of the code cache if it's requested. 524 void NMethodSweeper::log_sweep(const char* msg, const char* format, ...) { 525 if (PrintMethodFlushing) { 526 stringStream s; 527 // Dump code cache state into a buffer before locking the tty, 528 // because log_state() will use locks causing lock conflicts. 529 CodeCache::log_state(&s); 530 531 ttyLocker ttyl; 532 tty->print("### sweeper: %s ", msg); 533 if (format != NULL) { 534 va_list ap; 535 va_start(ap, format); 536 tty->vprint(format, ap); 537 va_end(ap); 538 } 539 tty->print_cr(s.as_string()); 540 } |