1 /* 2 * Copyright (c) 1997, 2017, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "code/codeCache.hpp" 27 #include "code/compiledIC.hpp" 28 #include "code/icBuffer.hpp" 29 #include "code/nmethod.hpp" 30 #include "compiler/compileBroker.hpp" 31 #include "logging/log.hpp" 32 #include "logging/logStream.hpp" 33 #include "memory/resourceArea.hpp" 34 #include "oops/method.hpp" 35 #include "runtime/atomic.hpp" 36 #include "runtime/compilationPolicy.hpp" 37 #include "runtime/mutexLocker.hpp" 38 #include "runtime/orderAccess.inline.hpp" 39 #include "runtime/os.hpp" 40 #include "runtime/sweeper.hpp" 41 #include "runtime/thread.inline.hpp" 42 #include "runtime/vm_operations.hpp" 43 #include "trace/tracing.hpp" 44 #include "utilities/events.hpp" 45 #include "utilities/ticks.inline.hpp" 46 #include "utilities/xmlstream.hpp" 47 48 #ifdef ASSERT 49 50 #define SWEEP(nm) record_sweep(nm, __LINE__) 51 // Sweeper logging code 52 class SweeperRecord { 53 public: 54 int traversal; 55 int compile_id; 56 long traversal_mark; 57 int state; 58 const char* kind; 59 address vep; 60 address uep; 61 int line; 62 63 void print() { 64 tty->print_cr("traversal = %d compile_id = %d %s uep = " PTR_FORMAT " vep = " 65 PTR_FORMAT " state = %d traversal_mark %ld line = %d", 66 traversal, 67 compile_id, 68 kind == NULL ? "" : kind, 69 p2i(uep), 70 p2i(vep), 71 state, 72 traversal_mark, 73 line); 74 } 75 }; 76 77 static int _sweep_index = 0; 78 static SweeperRecord* _records = NULL; 79 80 void NMethodSweeper::report_events(int id, address entry) { 81 if (_records != NULL) { 82 for (int i = _sweep_index; i < SweeperLogEntries; i++) { 83 if (_records[i].uep == entry || 84 _records[i].vep == entry || 85 _records[i].compile_id == id) { 86 _records[i].print(); 87 } 88 } 89 for (int i = 0; i < _sweep_index; i++) { 90 if (_records[i].uep == entry || 91 _records[i].vep == entry || 92 _records[i].compile_id == id) { 93 _records[i].print(); 94 } 95 } 96 } 97 } 98 99 void NMethodSweeper::report_events() { 100 if (_records != NULL) { 101 for (int i = _sweep_index; i < SweeperLogEntries; i++) { 102 // skip empty records 103 if (_records[i].vep == NULL) continue; 104 _records[i].print(); 105 } 106 for (int i = 0; i < _sweep_index; i++) { 107 // skip empty records 108 if (_records[i].vep == NULL) continue; 109 _records[i].print(); 110 } 111 } 112 } 113 114 void NMethodSweeper::record_sweep(CompiledMethod* nm, int line) { 115 if (_records != NULL) { 116 _records[_sweep_index].traversal = _traversals; 117 _records[_sweep_index].traversal_mark = nm->is_nmethod() ? ((nmethod*)nm)->stack_traversal_mark() : 0; 118 _records[_sweep_index].compile_id = nm->compile_id(); 119 _records[_sweep_index].kind = nm->compile_kind(); 120 _records[_sweep_index].state = nm->get_state(); 121 _records[_sweep_index].vep = nm->verified_entry_point(); 122 _records[_sweep_index].uep = nm->entry_point(); 123 _records[_sweep_index].line = line; 124 _sweep_index = (_sweep_index + 1) % SweeperLogEntries; 125 } 126 } 127 128 void NMethodSweeper::init_sweeper_log() { 129 if (LogSweeper && _records == NULL) { 130 // Create the ring buffer for the logging code 131 _records = NEW_C_HEAP_ARRAY(SweeperRecord, SweeperLogEntries, mtGC); 132 memset(_records, 0, sizeof(SweeperRecord) * SweeperLogEntries); 133 } 134 } 135 #else 136 #define SWEEP(nm) 137 #endif 138 139 CompiledMethodIterator NMethodSweeper::_current; // Current compiled method 140 long NMethodSweeper::_traversals = 0; // Stack scan count, also sweep ID. 141 long NMethodSweeper::_total_nof_code_cache_sweeps = 0; // Total number of full sweeps of the code cache 142 long NMethodSweeper::_time_counter = 0; // Virtual time used to periodically invoke sweeper 143 long NMethodSweeper::_last_sweep = 0; // Value of _time_counter when the last sweep happened 144 int NMethodSweeper::_seen = 0; // Nof. nmethod we have currently processed in current pass of CodeCache 145 146 volatile bool NMethodSweeper::_should_sweep = true; // Indicates if we should invoke the sweeper 147 volatile bool NMethodSweeper::_force_sweep = false;// Indicates if we should force a sweep 148 volatile int NMethodSweeper::_bytes_changed = 0; // Counts the total nmethod size if the nmethod changed from: 149 // 1) alive -> not_entrant 150 // 2) not_entrant -> zombie 151 int NMethodSweeper::_hotness_counter_reset_val = 0; 152 153 long NMethodSweeper::_total_nof_methods_reclaimed = 0; // Accumulated nof methods flushed 154 long NMethodSweeper::_total_nof_c2_methods_reclaimed = 0; // Accumulated nof methods flushed 155 size_t NMethodSweeper::_total_flushed_size = 0; // Total number of bytes flushed from the code cache 156 Tickspan NMethodSweeper::_total_time_sweeping; // Accumulated time sweeping 157 Tickspan NMethodSweeper::_total_time_this_sweep; // Total time this sweep 158 Tickspan NMethodSweeper::_peak_sweep_time; // Peak time for a full sweep 159 Tickspan NMethodSweeper::_peak_sweep_fraction_time; // Peak time sweeping one fraction 160 161 Monitor* NMethodSweeper::_stat_lock = new Monitor(Mutex::special, "Sweeper::Statistics", true, Monitor::_safepoint_check_sometimes); 162 163 class MarkActivationClosure: public CodeBlobClosure { 164 public: 165 virtual void do_code_blob(CodeBlob* cb) { 166 assert(cb->is_nmethod(), "CodeBlob should be nmethod"); 167 nmethod* nm = (nmethod*)cb; 168 nm->set_hotness_counter(NMethodSweeper::hotness_counter_reset_val()); 169 // If we see an activation belonging to a non_entrant nmethod, we mark it. 170 if (nm->is_not_entrant()) { 171 nm->mark_as_seen_on_stack(); 172 } 173 } 174 }; 175 static MarkActivationClosure mark_activation_closure; 176 177 class SetHotnessClosure: public CodeBlobClosure { 178 public: 179 virtual void do_code_blob(CodeBlob* cb) { 180 assert(cb->is_nmethod(), "CodeBlob should be nmethod"); 181 nmethod* nm = (nmethod*)cb; 182 nm->set_hotness_counter(NMethodSweeper::hotness_counter_reset_val()); 183 } 184 }; 185 static SetHotnessClosure set_hotness_closure; 186 187 188 int NMethodSweeper::hotness_counter_reset_val() { 189 if (_hotness_counter_reset_val == 0) { 190 _hotness_counter_reset_val = (ReservedCodeCacheSize < M) ? 1 : (ReservedCodeCacheSize / M) * 2; 191 } 192 return _hotness_counter_reset_val; 193 } 194 bool NMethodSweeper::wait_for_stack_scanning() { 195 return _current.end(); 196 } 197 198 /** 199 * Scans the stacks of all Java threads and marks activations of not-entrant methods. 200 * No need to synchronize access, since 'mark_active_nmethods' is always executed at a 201 * safepoint. 202 */ 203 void NMethodSweeper::mark_active_nmethods() { 204 CodeBlobClosure* cl = prepare_mark_active_nmethods(); 205 if (cl != NULL) { 206 Threads::nmethods_do(cl); 207 } 208 } 209 210 CodeBlobClosure* NMethodSweeper::prepare_mark_active_nmethods() { 211 assert(SafepointSynchronize::is_at_safepoint(), "must be executed at a safepoint"); 212 // If we do not want to reclaim not-entrant or zombie methods there is no need 213 // to scan stacks 214 if (!MethodFlushing) { 215 return NULL; 216 } 217 218 // Increase time so that we can estimate when to invoke the sweeper again. 219 _time_counter++; 220 221 // Check for restart 222 if (_current.method() != NULL) { 223 if (_current.method()->is_nmethod()) { 224 assert(CodeCache::find_blob_unsafe(_current.method()) == _current.method(), "Sweeper nmethod cached state invalid"); 225 } else if (_current.method()->is_aot()) { 226 assert(CodeCache::find_blob_unsafe(_current.method()->code_begin()) == _current.method(), "Sweeper AOT method cached state invalid"); 227 } else { 228 ShouldNotReachHere(); 229 } 230 } 231 232 if (wait_for_stack_scanning()) { 233 _seen = 0; 234 _current = CompiledMethodIterator(); 235 // Initialize to first nmethod 236 _current.next(); 237 _traversals += 1; 238 _total_time_this_sweep = Tickspan(); 239 240 if (PrintMethodFlushing) { 241 tty->print_cr("### Sweep: stack traversal %ld", _traversals); 242 } 243 return &mark_activation_closure; 244 245 } else { 246 // Only set hotness counter 247 return &set_hotness_closure; 248 } 249 250 } 251 252 /** 253 * This function triggers a VM operation that does stack scanning of active 254 * methods. Stack scanning is mandatory for the sweeper to make progress. 255 */ 256 void NMethodSweeper::do_stack_scanning() { 257 assert(!CodeCache_lock->owned_by_self(), "just checking"); 258 if (wait_for_stack_scanning()) { 259 VM_MarkActiveNMethods op; 260 VMThread::execute(&op); 261 _should_sweep = true; 262 } 263 } 264 265 void NMethodSweeper::sweeper_loop() { 266 bool timeout; 267 while (true) { 268 { 269 ThreadBlockInVM tbivm(JavaThread::current()); 270 MutexLockerEx waiter(CodeCache_lock, Mutex::_no_safepoint_check_flag); 271 const long wait_time = 60*60*24 * 1000; 272 timeout = CodeCache_lock->wait(Mutex::_no_safepoint_check_flag, wait_time); 273 } 274 if (!timeout) { 275 possibly_sweep(); 276 } 277 } 278 } 279 280 /** 281 * Wakes up the sweeper thread to possibly sweep. 282 */ 283 void NMethodSweeper::notify(int code_blob_type) { 284 // Makes sure that we do not invoke the sweeper too often during startup. 285 double start_threshold = 100.0 / (double)StartAggressiveSweepingAt; 286 double aggressive_sweep_threshold = MIN2(start_threshold, 1.1); 287 if (CodeCache::reverse_free_ratio(code_blob_type) >= aggressive_sweep_threshold) { 288 assert_locked_or_safepoint(CodeCache_lock); 289 CodeCache_lock->notify(); 290 } 291 } 292 293 /** 294 * Wakes up the sweeper thread and forces a sweep. Blocks until it finished. 295 */ 296 void NMethodSweeper::force_sweep() { 297 ThreadBlockInVM tbivm(JavaThread::current()); 298 MutexLockerEx waiter(CodeCache_lock, Mutex::_no_safepoint_check_flag); 299 // Request forced sweep 300 _force_sweep = true; 301 while (_force_sweep) { 302 // Notify sweeper that we want to force a sweep and wait for completion. 303 // In case a sweep currently takes place we timeout and try again because 304 // we want to enforce a full sweep. 305 CodeCache_lock->notify(); 306 CodeCache_lock->wait(Mutex::_no_safepoint_check_flag, 1000); 307 } 308 } 309 310 /** 311 * Handle a safepoint request 312 */ 313 void NMethodSweeper::handle_safepoint_request() { 314 if (SafepointSynchronize::is_synchronizing()) { 315 if (PrintMethodFlushing && Verbose) { 316 tty->print_cr("### Sweep at %d out of %d, yielding to safepoint", _seen, CodeCache::nmethod_count()); 317 } 318 MutexUnlockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag); 319 320 JavaThread* thread = JavaThread::current(); 321 ThreadBlockInVM tbivm(thread); 322 thread->java_suspend_self(); 323 } 324 } 325 326 /** 327 * This function invokes the sweeper if at least one of the three conditions is met: 328 * (1) The code cache is getting full 329 * (2) There are sufficient state changes in/since the last sweep. 330 * (3) We have not been sweeping for 'some time' 331 */ 332 void NMethodSweeper::possibly_sweep() { 333 assert(JavaThread::current()->thread_state() == _thread_in_vm, "must run in vm mode"); 334 // If there was no state change while nmethod sweeping, 'should_sweep' will be false. 335 // This is one of the two places where should_sweep can be set to true. The general 336 // idea is as follows: If there is enough free space in the code cache, there is no 337 // need to invoke the sweeper. The following formula (which determines whether to invoke 338 // the sweeper or not) depends on the assumption that for larger ReservedCodeCacheSizes 339 // we need less frequent sweeps than for smaller ReservedCodecCacheSizes. Furthermore, 340 // the formula considers how much space in the code cache is currently used. Here are 341 // some examples that will (hopefully) help in understanding. 342 // 343 // Small ReservedCodeCacheSizes: (e.g., < 16M) We invoke the sweeper every time, since 344 // the result of the division is 0. This 345 // keeps the used code cache size small 346 // (important for embedded Java) 347 // Large ReservedCodeCacheSize : (e.g., 256M + code cache is 10% full). The formula 348 // computes: (256 / 16) - 1 = 15 349 // As a result, we invoke the sweeper after 350 // 15 invocations of 'mark_active_nmethods. 351 // Large ReservedCodeCacheSize: (e.g., 256M + code Cache is 90% full). The formula 352 // computes: (256 / 16) - 10 = 6. 353 if (!_should_sweep) { 354 const int time_since_last_sweep = _time_counter - _last_sweep; 355 // ReservedCodeCacheSize has an 'unsigned' type. We need a 'signed' type for max_wait_time, 356 // since 'time_since_last_sweep' can be larger than 'max_wait_time'. If that happens using 357 // an unsigned type would cause an underflow (wait_until_next_sweep becomes a large positive 358 // value) that disables the intended periodic sweeps. 359 const int max_wait_time = ReservedCodeCacheSize / (16 * M); 360 double wait_until_next_sweep = max_wait_time - time_since_last_sweep - 361 MAX2(CodeCache::reverse_free_ratio(CodeBlobType::MethodProfiled), 362 CodeCache::reverse_free_ratio(CodeBlobType::MethodNonProfiled)); 363 assert(wait_until_next_sweep <= (double)max_wait_time, "Calculation of code cache sweeper interval is incorrect"); 364 365 if ((wait_until_next_sweep <= 0.0) || !CompileBroker::should_compile_new_jobs()) { 366 _should_sweep = true; 367 } 368 } 369 370 // Remember if this was a forced sweep 371 bool forced = _force_sweep; 372 373 // Force stack scanning if there is only 10% free space in the code cache. 374 // We force stack scanning only if the non-profiled code heap gets full, since critical 375 // allocations go to the non-profiled heap and we must be make sure that there is 376 // enough space. 377 double free_percent = 1 / CodeCache::reverse_free_ratio(CodeBlobType::MethodNonProfiled) * 100; 378 if (free_percent <= StartAggressiveSweepingAt) { 379 do_stack_scanning(); 380 } 381 382 if (_should_sweep || forced) { 383 init_sweeper_log(); 384 sweep_code_cache(); 385 } 386 387 // We are done with sweeping the code cache once. 388 _total_nof_code_cache_sweeps++; 389 _last_sweep = _time_counter; 390 // Reset flag; temporarily disables sweeper 391 _should_sweep = false; 392 // If there was enough state change, 'possibly_enable_sweeper()' 393 // sets '_should_sweep' to true 394 possibly_enable_sweeper(); 395 // Reset _bytes_changed only if there was enough state change. _bytes_changed 396 // can further increase by calls to 'report_state_change'. 397 if (_should_sweep) { 398 _bytes_changed = 0; 399 } 400 401 if (forced) { 402 // Notify requester that forced sweep finished 403 assert(_force_sweep, "Should be a forced sweep"); 404 MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag); 405 _force_sweep = false; 406 CodeCache_lock->notify(); 407 } 408 } 409 410 void NMethodSweeper::sweep_code_cache() { 411 ResourceMark rm; 412 Ticks sweep_start_counter = Ticks::now(); 413 414 log_debug(codecache, sweep, start)("CodeCache flushing"); 415 416 int flushed_count = 0; 417 int zombified_count = 0; 418 int flushed_c2_count = 0; 419 420 if (PrintMethodFlushing && Verbose) { 421 tty->print_cr("### Sweep at %d out of %d", _seen, CodeCache::nmethod_count()); 422 } 423 424 int swept_count = 0; 425 assert(!SafepointSynchronize::is_at_safepoint(), "should not be in safepoint when we get here"); 426 assert(!CodeCache_lock->owned_by_self(), "just checking"); 427 428 int freed_memory = 0; 429 { 430 MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag); 431 432 while (!_current.end()) { 433 swept_count++; 434 // Since we will give up the CodeCache_lock, always skip ahead 435 // to the next nmethod. Other blobs can be deleted by other 436 // threads but nmethods are only reclaimed by the sweeper. 437 CompiledMethod* nm = _current.method(); 438 _current.next(); 439 440 // Now ready to process nmethod and give up CodeCache_lock 441 { 442 MutexUnlockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag); 443 // Save information before potentially flushing the nmethod 444 // Only flushing nmethods so size only matters for them. 445 int size = nm->is_nmethod() ? ((nmethod*)nm)->total_size() : 0; 446 bool is_c2_method = nm->is_compiled_by_c2(); 447 bool is_osr = nm->is_osr_method(); 448 int compile_id = nm->compile_id(); 449 intptr_t address = p2i(nm); 450 const char* state_before = nm->state(); 451 const char* state_after = ""; 452 453 MethodStateChange type = process_compiled_method(nm); 454 switch (type) { 455 case Flushed: 456 state_after = "flushed"; 457 freed_memory += size; 458 ++flushed_count; 459 if (is_c2_method) { 460 ++flushed_c2_count; 461 } 462 break; 463 case MadeZombie: 464 state_after = "made zombie"; 465 ++zombified_count; 466 break; 467 case None: 468 break; 469 default: 470 ShouldNotReachHere(); 471 } 472 if (PrintMethodFlushing && Verbose && type != None) { 473 tty->print_cr("### %s nmethod %3d/" PTR_FORMAT " (%s) %s", is_osr ? "osr" : "", compile_id, address, state_before, state_after); 474 } 475 } 476 477 _seen++; 478 handle_safepoint_request(); 479 } 480 } 481 482 assert(_current.end(), "must have scanned the whole cache"); 483 484 const Ticks sweep_end_counter = Ticks::now(); 485 const Tickspan sweep_time = sweep_end_counter - sweep_start_counter; 486 { 487 MutexLockerEx mu(_stat_lock, Mutex::_no_safepoint_check_flag); 488 _total_time_sweeping += sweep_time; 489 _total_time_this_sweep += sweep_time; 490 _peak_sweep_fraction_time = MAX2(sweep_time, _peak_sweep_fraction_time); 491 _total_flushed_size += freed_memory; 492 _total_nof_methods_reclaimed += flushed_count; 493 _total_nof_c2_methods_reclaimed += flushed_c2_count; 494 _peak_sweep_time = MAX2(_peak_sweep_time, _total_time_this_sweep); 495 } 496 EventSweepCodeCache event(UNTIMED); 497 if (event.should_commit()) { 498 event.set_starttime(sweep_start_counter); 499 event.set_endtime(sweep_end_counter); 500 event.set_sweepId(_traversals); 501 event.set_sweptCount(swept_count); 502 event.set_flushedCount(flushed_count); 503 event.set_zombifiedCount(zombified_count); 504 event.commit(); 505 } 506 507 #ifdef ASSERT 508 if(PrintMethodFlushing) { 509 tty->print_cr("### sweeper: sweep time(" JLONG_FORMAT "): ", sweep_time.value()); 510 } 511 #endif 512 513 Log(codecache, sweep) log; 514 if (log.is_debug()) { 515 LogStream ls(log.debug()); 516 CodeCache::print_summary(&ls, false); 517 } 518 log_sweep("finished"); 519 520 // Sweeper is the only case where memory is released, check here if it 521 // is time to restart the compiler. Only checking if there is a certain 522 // amount of free memory in the code cache might lead to re-enabling 523 // compilation although no memory has been released. For example, there are 524 // cases when compilation was disabled although there is 4MB (or more) free 525 // memory in the code cache. The reason is code cache fragmentation. Therefore, 526 // it only makes sense to re-enable compilation if we have actually freed memory. 527 // Note that typically several kB are released for sweeping 16MB of the code 528 // cache. As a result, 'freed_memory' > 0 to restart the compiler. 529 if (!CompileBroker::should_compile_new_jobs() && (freed_memory > 0)) { 530 CompileBroker::set_should_compile_new_jobs(CompileBroker::run_compilation); 531 log.debug("restart compiler"); 532 log_sweep("restart_compiler"); 533 } 534 } 535 536 /** 537 * This function updates the sweeper statistics that keep track of nmethods 538 * state changes. If there is 'enough' state change, the sweeper is invoked 539 * as soon as possible. There can be data races on _bytes_changed. The data 540 * races are benign, since it does not matter if we loose a couple of bytes. 541 * In the worst case we call the sweeper a little later. Also, we are guaranteed 542 * to invoke the sweeper if the code cache gets full. 543 */ 544 void NMethodSweeper::report_state_change(nmethod* nm) { 545 _bytes_changed += nm->total_size(); 546 possibly_enable_sweeper(); 547 } 548 549 /** 550 * Function determines if there was 'enough' state change in the code cache to invoke 551 * the sweeper again. Currently, we determine 'enough' as more than 1% state change in 552 * the code cache since the last sweep. 553 */ 554 void NMethodSweeper::possibly_enable_sweeper() { 555 double percent_changed = ((double)_bytes_changed / (double)ReservedCodeCacheSize) * 100; 556 if (percent_changed > 1.0) { 557 _should_sweep = true; 558 } 559 } 560 561 class CompiledMethodMarker: public StackObj { 562 private: 563 CodeCacheSweeperThread* _thread; 564 public: 565 CompiledMethodMarker(CompiledMethod* cm) { 566 JavaThread* current = JavaThread::current(); 567 assert (current->is_Code_cache_sweeper_thread(), "Must be"); 568 _thread = (CodeCacheSweeperThread*)current; 569 if (!cm->is_zombie() && !cm->is_unloaded()) { 570 // Only expose live nmethods for scanning 571 _thread->set_scanned_compiled_method(cm); 572 } 573 } 574 ~CompiledMethodMarker() { 575 _thread->set_scanned_compiled_method(NULL); 576 } 577 }; 578 579 void NMethodSweeper::release_compiled_method(CompiledMethod* nm) { 580 // Make sure the released nmethod is no longer referenced by the sweeper thread 581 CodeCacheSweeperThread* thread = (CodeCacheSweeperThread*)JavaThread::current(); 582 thread->set_scanned_compiled_method(NULL); 583 584 // Clean up any CompiledICHolders 585 { 586 ResourceMark rm; 587 MutexLocker ml_patch(CompiledIC_lock); 588 RelocIterator iter(nm); 589 while (iter.next()) { 590 if (iter.type() == relocInfo::virtual_call_type) { 591 CompiledIC::cleanup_call_site(iter.virtual_call_reloc(), nm); 592 } 593 } 594 } 595 596 MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag); 597 nm->flush(); 598 } 599 600 NMethodSweeper::MethodStateChange NMethodSweeper::process_compiled_method(CompiledMethod* cm) { 601 assert(cm != NULL, "sanity"); 602 assert(!CodeCache_lock->owned_by_self(), "just checking"); 603 604 MethodStateChange result = None; 605 // Make sure this nmethod doesn't get unloaded during the scan, 606 // since safepoints may happen during acquired below locks. 607 CompiledMethodMarker nmm(cm); 608 SWEEP(cm); 609 610 // Skip methods that are currently referenced by the VM 611 if (cm->is_locked_by_vm()) { 612 // But still remember to clean-up inline caches for alive nmethods 613 if (cm->is_alive()) { 614 // Clean inline caches that point to zombie/non-entrant/unloaded nmethods 615 MutexLocker cl(CompiledIC_lock); 616 cm->cleanup_inline_caches(); 617 SWEEP(cm); 618 } 619 return result; 620 } 621 622 if (cm->is_zombie()) { 623 // All inline caches that referred to this nmethod were cleaned in the 624 // previous sweeper cycle. Now flush the nmethod from the code cache. 625 assert(!cm->is_locked_by_vm(), "must not flush locked Compiled Methods"); 626 release_compiled_method(cm); 627 assert(result == None, "sanity"); 628 result = Flushed; 629 } else if (cm->is_not_entrant()) { 630 // If there are no current activations of this method on the 631 // stack we can safely convert it to a zombie method 632 OrderAccess::loadload(); // _stack_traversal_mark and _state 633 if (cm->can_convert_to_zombie()) { 634 // Clear ICStubs to prevent back patching stubs of zombie or flushed 635 // nmethods during the next safepoint (see ICStub::finalize). 636 { 637 MutexLocker cl(CompiledIC_lock); 638 cm->clear_ic_stubs(); 639 } 640 // Code cache state change is tracked in make_zombie() 641 cm->make_zombie(); 642 SWEEP(cm); 643 // The nmethod may have been locked by JVMTI after being made zombie (see 644 // JvmtiDeferredEvent::compiled_method_unload_event()). If so, we cannot 645 // flush the osr nmethod directly but have to wait for a later sweeper cycle. 646 if (cm->is_osr_method() && !cm->is_locked_by_vm()) { 647 // No inline caches will ever point to osr methods, so we can just remove it. 648 // Make sure that we unregistered the nmethod with the heap and flushed all 649 // dependencies before removing the nmethod (done in make_zombie()). 650 assert(cm->is_zombie(), "nmethod must be unregistered"); 651 release_compiled_method(cm); 652 assert(result == None, "sanity"); 653 result = Flushed; 654 } else { 655 assert(result == None, "sanity"); 656 result = MadeZombie; 657 assert(cm->is_zombie(), "nmethod must be zombie"); 658 } 659 } else { 660 // Still alive, clean up its inline caches 661 MutexLocker cl(CompiledIC_lock); 662 cm->cleanup_inline_caches(); 663 SWEEP(cm); 664 } 665 } else if (cm->is_unloaded()) { 666 // Code is unloaded, so there are no activations on the stack. 667 // Convert the nmethod to zombie or flush it directly in the OSR case. 668 { 669 // Clean ICs of unloaded nmethods as well because they may reference other 670 // unloaded nmethods that may be flushed earlier in the sweeper cycle. 671 MutexLocker cl(CompiledIC_lock); 672 cm->cleanup_inline_caches(); 673 } 674 if (cm->is_osr_method()) { 675 SWEEP(cm); 676 // No inline caches will ever point to osr methods, so we can just remove it 677 release_compiled_method(cm); 678 assert(result == None, "sanity"); 679 result = Flushed; 680 } else { 681 // Code cache state change is tracked in make_zombie() 682 cm->make_zombie(); 683 SWEEP(cm); 684 assert(result == None, "sanity"); 685 result = MadeZombie; 686 } 687 } else { 688 if (cm->is_nmethod()) { 689 possibly_flush((nmethod*)cm); 690 } 691 // Clean inline caches that point to zombie/non-entrant/unloaded nmethods 692 MutexLocker cl(CompiledIC_lock); 693 cm->cleanup_inline_caches(); 694 SWEEP(cm); 695 } 696 return result; 697 } 698 699 700 void NMethodSweeper::possibly_flush(nmethod* nm) { 701 if (UseCodeCacheFlushing) { 702 if (!nm->is_locked_by_vm() && !nm->is_native_method() && !nm->is_not_installed()) { 703 bool make_not_entrant = false; 704 705 // Do not make native methods not-entrant 706 nm->dec_hotness_counter(); 707 // Get the initial value of the hotness counter. This value depends on the 708 // ReservedCodeCacheSize 709 int reset_val = hotness_counter_reset_val(); 710 int time_since_reset = reset_val - nm->hotness_counter(); 711 int code_blob_type = CodeCache::get_code_blob_type(nm); 712 double threshold = -reset_val + (CodeCache::reverse_free_ratio(code_blob_type) * NmethodSweepActivity); 713 // The less free space in the code cache we have - the bigger reverse_free_ratio() is. 714 // I.e., 'threshold' increases with lower available space in the code cache and a higher 715 // NmethodSweepActivity. If the current hotness counter - which decreases from its initial 716 // value until it is reset by stack walking - is smaller than the computed threshold, the 717 // corresponding nmethod is considered for removal. 718 if ((NmethodSweepActivity > 0) && (nm->hotness_counter() < threshold) && (time_since_reset > MinPassesBeforeFlush)) { 719 // A method is marked as not-entrant if the method is 720 // 1) 'old enough': nm->hotness_counter() < threshold 721 // 2) The method was in_use for a minimum amount of time: (time_since_reset > MinPassesBeforeFlush) 722 // The second condition is necessary if we are dealing with very small code cache 723 // sizes (e.g., <10m) and the code cache size is too small to hold all hot methods. 724 // The second condition ensures that methods are not immediately made not-entrant 725 // after compilation. 726 make_not_entrant = true; 727 } 728 729 // The stack-scanning low-cost detection may not see the method was used (which can happen for 730 // flat profiles). Check the age counter for possible data. 731 if (UseCodeAging && make_not_entrant && (nm->is_compiled_by_c2() || nm->is_compiled_by_c1())) { 732 MethodCounters* mc = nm->method()->get_method_counters(Thread::current()); 733 if (mc != NULL) { 734 // Snapshot the value as it's changed concurrently 735 int age = mc->nmethod_age(); 736 if (MethodCounters::is_nmethod_hot(age)) { 737 // The method has gone through flushing, and it became relatively hot that it deopted 738 // before we could take a look at it. Give it more time to appear in the stack traces, 739 // proportional to the number of deopts. 740 MethodData* md = nm->method()->method_data(); 741 if (md != NULL && time_since_reset > (int)(MinPassesBeforeFlush * (md->tenure_traps() + 1))) { 742 // It's been long enough, we still haven't seen it on stack. 743 // Try to flush it, but enable counters the next time. 744 mc->reset_nmethod_age(); 745 } else { 746 make_not_entrant = false; 747 } 748 } else if (MethodCounters::is_nmethod_warm(age)) { 749 // Method has counters enabled, and the method was used within 750 // previous MinPassesBeforeFlush sweeps. Reset the counter. Stay in the existing 751 // compiled state. 752 mc->reset_nmethod_age(); 753 // delay the next check 754 nm->set_hotness_counter(NMethodSweeper::hotness_counter_reset_val()); 755 make_not_entrant = false; 756 } else if (MethodCounters::is_nmethod_age_unset(age)) { 757 // No counters were used before. Set the counters to the detection 758 // limit value. If the method is going to be used again it will be compiled 759 // with counters that we're going to use for analysis the the next time. 760 mc->reset_nmethod_age(); 761 } else { 762 // Method was totally idle for 10 sweeps 763 // The counter already has the initial value, flush it and may be recompile 764 // later with counters 765 } 766 } 767 } 768 769 if (make_not_entrant) { 770 nm->make_not_entrant(); 771 772 // Code cache state change is tracked in make_not_entrant() 773 if (PrintMethodFlushing && Verbose) { 774 tty->print_cr("### Nmethod %d/" PTR_FORMAT "made not-entrant: hotness counter %d/%d threshold %f", 775 nm->compile_id(), p2i(nm), nm->hotness_counter(), reset_val, threshold); 776 } 777 } 778 } 779 } 780 } 781 782 // Print out some state information about the current sweep and the 783 // state of the code cache if it's requested. 784 void NMethodSweeper::log_sweep(const char* msg, const char* format, ...) { 785 if (PrintMethodFlushing) { 786 ResourceMark rm; 787 stringStream s; 788 // Dump code cache state into a buffer before locking the tty, 789 // because log_state() will use locks causing lock conflicts. 790 CodeCache::log_state(&s); 791 792 ttyLocker ttyl; 793 tty->print("### sweeper: %s ", msg); 794 if (format != NULL) { 795 va_list ap; 796 va_start(ap, format); 797 tty->vprint(format, ap); 798 va_end(ap); 799 } 800 tty->print_cr("%s", s.as_string()); 801 } 802 803 if (LogCompilation && (xtty != NULL)) { 804 ResourceMark rm; 805 stringStream s; 806 // Dump code cache state into a buffer before locking the tty, 807 // because log_state() will use locks causing lock conflicts. 808 CodeCache::log_state(&s); 809 810 ttyLocker ttyl; 811 xtty->begin_elem("sweeper state='%s' traversals='" INTX_FORMAT "' ", msg, (intx)traversal_count()); 812 if (format != NULL) { 813 va_list ap; 814 va_start(ap, format); 815 xtty->vprint(format, ap); 816 va_end(ap); 817 } 818 xtty->print("%s", s.as_string()); 819 xtty->stamp(); 820 xtty->end_elem(); 821 } 822 } 823 824 void NMethodSweeper::print() { 825 ttyLocker ttyl; 826 tty->print_cr("Code cache sweeper statistics:"); 827 tty->print_cr(" Total sweep time: %1.0lfms", (double)_total_time_sweeping.value()/1000000); 828 tty->print_cr(" Total number of full sweeps: %ld", _total_nof_code_cache_sweeps); 829 tty->print_cr(" Total number of flushed methods: %ld(%ld C2 methods)", _total_nof_methods_reclaimed, 830 _total_nof_c2_methods_reclaimed); 831 tty->print_cr(" Total size of flushed methods: " SIZE_FORMAT "kB", _total_flushed_size/K); 832 }