1 /* 2 * Copyright (c) 1997, 2019, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "code/codeCache.hpp" 27 #include "code/compiledIC.hpp" 28 #include "code/icBuffer.hpp" 29 #include "code/nmethod.hpp" 30 #include "compiler/compileBroker.hpp" 31 #include "gc/shared/collectedHeap.hpp" 32 #include "gc/shared/workgroup.hpp" 33 #include "jfr/jfrEvents.hpp" 34 #include "logging/log.hpp" 35 #include "logging/logStream.hpp" 36 #include "memory/allocation.inline.hpp" 37 #include "memory/resourceArea.hpp" 38 #include "memory/universe.hpp" 39 #include "oops/method.hpp" 40 #include "runtime/atomic.hpp" 41 #include "runtime/interfaceSupport.inline.hpp" 42 #include "runtime/handshake.hpp" 43 #include "runtime/mutexLocker.hpp" 44 #include "runtime/orderAccess.hpp" 45 #include "runtime/os.hpp" 46 #include "runtime/sweeper.hpp" 47 #include "runtime/thread.inline.hpp" 48 #include "runtime/vmOperations.hpp" 49 #include "runtime/vmThread.hpp" 50 #include "utilities/events.hpp" 51 #include "utilities/xmlstream.hpp" 52 53 #ifdef ASSERT 54 55 #define SWEEP(nm) record_sweep(nm, __LINE__) 56 // Sweeper logging code 57 class SweeperRecord { 58 public: 59 int traversal; 60 int compile_id; 61 long traversal_mark; 62 int state; 63 const char* kind; 64 address vep; 65 address uep; 66 int line; 67 68 void print() { 69 tty->print_cr("traversal = %d compile_id = %d %s uep = " PTR_FORMAT " vep = " 70 PTR_FORMAT " state = %d traversal_mark %ld line = %d", 71 traversal, 72 compile_id, 73 kind == NULL ? "" : kind, 74 p2i(uep), 75 p2i(vep), 76 state, 77 traversal_mark, 78 line); 79 } 80 }; 81 82 static int _sweep_index = 0; 83 static SweeperRecord* _records = NULL; 84 85 void NMethodSweeper::report_events(int id, address entry) { 86 if (_records != NULL) { 87 for (int i = _sweep_index; i < SweeperLogEntries; i++) { 88 if (_records[i].uep == entry || 89 _records[i].vep == entry || 90 _records[i].compile_id == id) { 91 _records[i].print(); 92 } 93 } 94 for (int i = 0; i < _sweep_index; i++) { 95 if (_records[i].uep == entry || 96 _records[i].vep == entry || 97 _records[i].compile_id == id) { 98 _records[i].print(); 99 } 100 } 101 } 102 } 103 104 void NMethodSweeper::report_events() { 105 if (_records != NULL) { 106 for (int i = _sweep_index; i < SweeperLogEntries; i++) { 107 // skip empty records 108 if (_records[i].vep == NULL) continue; 109 _records[i].print(); 110 } 111 for (int i = 0; i < _sweep_index; i++) { 112 // skip empty records 113 if (_records[i].vep == NULL) continue; 114 _records[i].print(); 115 } 116 } 117 } 118 119 void NMethodSweeper::record_sweep(CompiledMethod* nm, int line) { 120 if (_records != NULL) { 121 _records[_sweep_index].traversal = _traversals; 122 _records[_sweep_index].traversal_mark = nm->is_nmethod() ? ((nmethod*)nm)->stack_traversal_mark() : 0; 123 _records[_sweep_index].compile_id = nm->compile_id(); 124 _records[_sweep_index].kind = nm->compile_kind(); 125 _records[_sweep_index].state = nm->get_state(); 126 _records[_sweep_index].vep = nm->verified_entry_point(); 127 _records[_sweep_index].uep = nm->entry_point(); 128 _records[_sweep_index].line = line; 129 _sweep_index = (_sweep_index + 1) % SweeperLogEntries; 130 } 131 } 132 133 void NMethodSweeper::init_sweeper_log() { 134 if (LogSweeper && _records == NULL) { 135 // Create the ring buffer for the logging code 136 _records = NEW_C_HEAP_ARRAY(SweeperRecord, SweeperLogEntries, mtGC); 137 memset(_records, 0, sizeof(SweeperRecord) * SweeperLogEntries); 138 } 139 } 140 #else 141 #define SWEEP(nm) 142 #endif 143 144 CompiledMethodIterator NMethodSweeper::_current(CompiledMethodIterator::all_blobs); // Current compiled method 145 long NMethodSweeper::_traversals = 0; // Stack scan count, also sweep ID. 146 long NMethodSweeper::_total_nof_code_cache_sweeps = 0; // Total number of full sweeps of the code cache 147 long NMethodSweeper::_time_counter = 0; // Virtual time used to periodically invoke sweeper 148 long NMethodSweeper::_last_sweep = 0; // Value of _time_counter when the last sweep happened 149 int NMethodSweeper::_seen = 0; // Nof. nmethod we have currently processed in current pass of CodeCache 150 151 volatile bool NMethodSweeper::_should_sweep = false;// Indicates if we should invoke the sweeper 152 volatile bool NMethodSweeper::_force_sweep = false;// Indicates if we should force a sweep 153 volatile int NMethodSweeper::_bytes_changed = 0; // Counts the total nmethod size if the nmethod changed from: 154 // 1) alive -> not_entrant 155 // 2) not_entrant -> zombie 156 int NMethodSweeper::_hotness_counter_reset_val = 0; 157 158 long NMethodSweeper::_total_nof_methods_reclaimed = 0; // Accumulated nof methods flushed 159 long NMethodSweeper::_total_nof_c2_methods_reclaimed = 0; // Accumulated nof methods flushed 160 size_t NMethodSweeper::_total_flushed_size = 0; // Total number of bytes flushed from the code cache 161 Tickspan NMethodSweeper::_total_time_sweeping; // Accumulated time sweeping 162 Tickspan NMethodSweeper::_total_time_this_sweep; // Total time this sweep 163 Tickspan NMethodSweeper::_peak_sweep_time; // Peak time for a full sweep 164 Tickspan NMethodSweeper::_peak_sweep_fraction_time; // Peak time sweeping one fraction 165 166 class MarkActivationClosure: public CodeBlobClosure { 167 public: 168 virtual void do_code_blob(CodeBlob* cb) { 169 assert(cb->is_nmethod(), "CodeBlob should be nmethod"); 170 nmethod* nm = (nmethod*)cb; 171 nm->set_hotness_counter(NMethodSweeper::hotness_counter_reset_val()); 172 // If we see an activation belonging to a non_entrant nmethod, we mark it. 173 if (nm->is_not_entrant()) { 174 nm->mark_as_seen_on_stack(); 175 } 176 } 177 }; 178 static MarkActivationClosure mark_activation_closure; 179 180 class SetHotnessClosure: public CodeBlobClosure { 181 public: 182 virtual void do_code_blob(CodeBlob* cb) { 183 assert(cb->is_nmethod(), "CodeBlob should be nmethod"); 184 nmethod* nm = (nmethod*)cb; 185 nm->set_hotness_counter(NMethodSweeper::hotness_counter_reset_val()); 186 } 187 }; 188 static SetHotnessClosure set_hotness_closure; 189 190 191 int NMethodSweeper::hotness_counter_reset_val() { 192 if (_hotness_counter_reset_val == 0) { 193 _hotness_counter_reset_val = (ReservedCodeCacheSize < M) ? 1 : (ReservedCodeCacheSize / M) * 2; 194 } 195 return _hotness_counter_reset_val; 196 } 197 bool NMethodSweeper::wait_for_stack_scanning() { 198 return _current.end(); 199 } 200 201 class NMethodMarkingThreadClosure : public ThreadClosure { 202 private: 203 CodeBlobClosure* _cl; 204 public: 205 NMethodMarkingThreadClosure(CodeBlobClosure* cl) : _cl(cl) {} 206 void do_thread(Thread* thread) { 207 if (thread->is_Java_thread() && ! thread->is_Code_cache_sweeper_thread()) { 208 JavaThread* jt = (JavaThread*) thread; 209 jt->nmethods_do(_cl); 210 } 211 } 212 }; 213 214 class NMethodMarkingTask : public AbstractGangTask { 215 private: 216 NMethodMarkingThreadClosure* _cl; 217 public: 218 NMethodMarkingTask(NMethodMarkingThreadClosure* cl) : 219 AbstractGangTask("Parallel NMethod Marking"), 220 _cl(cl) { 221 Threads::change_thread_claim_token(); 222 } 223 224 ~NMethodMarkingTask() { 225 Threads::assert_all_threads_claimed(); 226 } 227 228 void work(uint worker_id) { 229 Threads::possibly_parallel_threads_do(true, _cl); 230 } 231 }; 232 233 /** 234 * Scans the stacks of all Java threads and marks activations of not-entrant methods. 235 * No need to synchronize access, since 'mark_active_nmethods' is always executed at a 236 * safepoint. 237 */ 238 void NMethodSweeper::mark_active_nmethods() { 239 CodeBlobClosure* cl = prepare_mark_active_nmethods(); 240 if (cl != NULL) { 241 WorkGang* workers = Universe::heap()->get_safepoint_workers(); 242 if (workers != NULL) { 243 NMethodMarkingThreadClosure tcl(cl); 244 NMethodMarkingTask task(&tcl); 245 workers->run_task(&task); 246 } else { 247 Threads::nmethods_do(cl); 248 } 249 } 250 } 251 252 CodeBlobClosure* NMethodSweeper::prepare_mark_active_nmethods() { 253 #ifdef ASSERT 254 if (ThreadLocalHandshakes) { 255 assert(Thread::current()->is_Code_cache_sweeper_thread(), "must be executed under CodeCache_lock and in sweeper thread"); 256 assert_lock_strong(CodeCache_lock); 257 } else { 258 assert(SafepointSynchronize::is_at_safepoint(), "must be executed at a safepoint"); 259 } 260 #endif 261 262 // If we do not want to reclaim not-entrant or zombie methods there is no need 263 // to scan stacks 264 if (!MethodFlushing) { 265 return NULL; 266 } 267 268 // Increase time so that we can estimate when to invoke the sweeper again. 269 _time_counter++; 270 271 // Check for restart 272 assert(_current.method() == NULL, "should only happen between sweeper cycles"); 273 assert(wait_for_stack_scanning(), "should only happen between sweeper cycles"); 274 275 _seen = 0; 276 _current = CompiledMethodIterator(CompiledMethodIterator::all_blobs); 277 // Initialize to first nmethod 278 _current.next(); 279 _traversals += 1; 280 _total_time_this_sweep = Tickspan(); 281 282 if (PrintMethodFlushing) { 283 tty->print_cr("### Sweep: stack traversal %ld", _traversals); 284 } 285 return &mark_activation_closure; 286 } 287 288 CodeBlobClosure* NMethodSweeper::prepare_reset_hotness_counters() { 289 assert(SafepointSynchronize::is_at_safepoint(), "must be executed at a safepoint"); 290 291 // If we do not want to reclaim not-entrant or zombie methods there is no need 292 // to scan stacks 293 if (!MethodFlushing) { 294 return NULL; 295 } 296 297 // Increase time so that we can estimate when to invoke the sweeper again. 298 _time_counter++; 299 300 // Check for restart 301 if (_current.method() != NULL) { 302 if (_current.method()->is_nmethod()) { 303 assert(CodeCache::find_blob_unsafe(_current.method()) == _current.method(), "Sweeper nmethod cached state invalid"); 304 } else if (_current.method()->is_aot()) { 305 assert(CodeCache::find_blob_unsafe(_current.method()->code_begin()) == _current.method(), "Sweeper AOT method cached state invalid"); 306 } else { 307 ShouldNotReachHere(); 308 } 309 } 310 311 return &set_hotness_closure; 312 } 313 314 class NMethodMarkingHandshake : public HandshakeOperation { 315 NMethodMarkingThreadClosure* _cl; 316 public: 317 NMethodMarkingHandshake(NMethodMarkingThreadClosure* cl) : _cl(cl) {} 318 const char* name() { return "NMethodMarking";}; 319 void do_thread(JavaThread* jt) { 320 _cl->do_thread(jt); 321 } 322 }; 323 324 /** 325 * This function triggers a VM operation that does stack scanning of active 326 * methods. Stack scanning is mandatory for the sweeper to make progress. 327 */ 328 void NMethodSweeper::do_stack_scanning() { 329 assert(!CodeCache_lock->owned_by_self(), "just checking"); 330 if (wait_for_stack_scanning()) { 331 if (ThreadLocalHandshakes) { 332 CodeBlobClosure* code_cl; 333 { 334 MutexLocker ccl(CodeCache_lock, Mutex::_no_safepoint_check_flag); 335 code_cl = prepare_mark_active_nmethods(); 336 } 337 if (code_cl != NULL) { 338 NMethodMarkingThreadClosure tcl(code_cl); 339 NMethodMarkingHandshake nm_hs(&tcl); 340 Handshake::execute(&nm_hs); 341 } 342 } else { 343 VM_MarkActiveNMethods op; 344 VMThread::execute(&op); 345 } 346 } 347 } 348 349 void NMethodSweeper::sweeper_loop() { 350 bool timeout; 351 while (true) { 352 { 353 ThreadBlockInVM tbivm(JavaThread::current()); 354 MonitorLocker waiter(CodeCache_lock, Mutex::_no_safepoint_check_flag); 355 const long wait_time = 60*60*24 * 1000; 356 timeout = waiter.wait(wait_time); 357 } 358 if (!timeout) { 359 possibly_sweep(); 360 } 361 } 362 } 363 364 /** 365 * Wakes up the sweeper thread to possibly sweep. 366 */ 367 void NMethodSweeper::notify(int code_blob_type) { 368 // Makes sure that we do not invoke the sweeper too often during startup. 369 double start_threshold = 100.0 / (double)StartAggressiveSweepingAt; 370 double aggressive_sweep_threshold = MIN2(start_threshold, 1.1); 371 if (CodeCache::reverse_free_ratio(code_blob_type) >= aggressive_sweep_threshold) { 372 assert_locked_or_safepoint(CodeCache_lock); 373 CodeCache_lock->notify(); 374 } 375 } 376 377 /** 378 * Wakes up the sweeper thread and forces a sweep. Blocks until it finished. 379 */ 380 void NMethodSweeper::force_sweep() { 381 ThreadBlockInVM tbivm(JavaThread::current()); 382 MonitorLocker waiter(CodeCache_lock, Mutex::_no_safepoint_check_flag); 383 // Request forced sweep 384 _force_sweep = true; 385 while (_force_sweep) { 386 // Notify sweeper that we want to force a sweep and wait for completion. 387 // In case a sweep currently takes place we timeout and try again because 388 // we want to enforce a full sweep. 389 CodeCache_lock->notify(); 390 waiter.wait(1000); 391 } 392 } 393 394 /** 395 * Handle a safepoint request 396 */ 397 void NMethodSweeper::handle_safepoint_request() { 398 JavaThread* thread = JavaThread::current(); 399 if (SafepointMechanism::should_block(thread)) { 400 if (PrintMethodFlushing && Verbose) { 401 tty->print_cr("### Sweep at %d out of %d, yielding to safepoint", _seen, CodeCache::nmethod_count()); 402 } 403 MutexUnlocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag); 404 405 ThreadBlockInVM tbivm(thread); 406 thread->java_suspend_self(); 407 } 408 } 409 410 /** 411 * This function invokes the sweeper if at least one of the three conditions is met: 412 * (1) The code cache is getting full 413 * (2) There are sufficient state changes in/since the last sweep. 414 * (3) We have not been sweeping for 'some time' 415 */ 416 void NMethodSweeper::possibly_sweep() { 417 assert(JavaThread::current()->thread_state() == _thread_in_vm, "must run in vm mode"); 418 // If there was no state change while nmethod sweeping, 'should_sweep' will be false. 419 // This is one of the two places where should_sweep can be set to true. The general 420 // idea is as follows: If there is enough free space in the code cache, there is no 421 // need to invoke the sweeper. The following formula (which determines whether to invoke 422 // the sweeper or not) depends on the assumption that for larger ReservedCodeCacheSizes 423 // we need less frequent sweeps than for smaller ReservedCodecCacheSizes. Furthermore, 424 // the formula considers how much space in the code cache is currently used. Here are 425 // some examples that will (hopefully) help in understanding. 426 // 427 // Small ReservedCodeCacheSizes: (e.g., < 16M) We invoke the sweeper every time, since 428 // the result of the division is 0. This 429 // keeps the used code cache size small 430 // (important for embedded Java) 431 // Large ReservedCodeCacheSize : (e.g., 256M + code cache is 10% full). The formula 432 // computes: (256 / 16) - 1 = 15 433 // As a result, we invoke the sweeper after 434 // 15 invocations of 'mark_active_nmethods. 435 // Large ReservedCodeCacheSize: (e.g., 256M + code Cache is 90% full). The formula 436 // computes: (256 / 16) - 10 = 6. 437 if (!_should_sweep) { 438 const int time_since_last_sweep = _time_counter - _last_sweep; 439 // ReservedCodeCacheSize has an 'unsigned' type. We need a 'signed' type for max_wait_time, 440 // since 'time_since_last_sweep' can be larger than 'max_wait_time'. If that happens using 441 // an unsigned type would cause an underflow (wait_until_next_sweep becomes a large positive 442 // value) that disables the intended periodic sweeps. 443 const int max_wait_time = ReservedCodeCacheSize / (16 * M); 444 double wait_until_next_sweep = max_wait_time - time_since_last_sweep - 445 MAX2(CodeCache::reverse_free_ratio(CodeBlobType::MethodProfiled), 446 CodeCache::reverse_free_ratio(CodeBlobType::MethodNonProfiled)); 447 assert(wait_until_next_sweep <= (double)max_wait_time, "Calculation of code cache sweeper interval is incorrect"); 448 449 if ((wait_until_next_sweep <= 0.0) || !CompileBroker::should_compile_new_jobs()) { 450 _should_sweep = true; 451 } 452 } 453 454 // Remember if this was a forced sweep 455 bool forced = _force_sweep; 456 457 // Force stack scanning if there is only 10% free space in the code cache. 458 // We force stack scanning only if the non-profiled code heap gets full, since critical 459 // allocations go to the non-profiled heap and we must be make sure that there is 460 // enough space. 461 double free_percent = 1 / CodeCache::reverse_free_ratio(CodeBlobType::MethodNonProfiled) * 100; 462 if (free_percent <= StartAggressiveSweepingAt || forced || _should_sweep) { 463 do_stack_scanning(); 464 } 465 466 if (_should_sweep || forced) { 467 init_sweeper_log(); 468 sweep_code_cache(); 469 } 470 471 // We are done with sweeping the code cache once. 472 _total_nof_code_cache_sweeps++; 473 _last_sweep = _time_counter; 474 // Reset flag; temporarily disables sweeper 475 _should_sweep = false; 476 // If there was enough state change, 'possibly_enable_sweeper()' 477 // sets '_should_sweep' to true 478 possibly_enable_sweeper(); 479 // Reset _bytes_changed only if there was enough state change. _bytes_changed 480 // can further increase by calls to 'report_state_change'. 481 if (_should_sweep) { 482 _bytes_changed = 0; 483 } 484 485 if (forced) { 486 // Notify requester that forced sweep finished 487 assert(_force_sweep, "Should be a forced sweep"); 488 MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag); 489 _force_sweep = false; 490 CodeCache_lock->notify(); 491 } 492 } 493 494 static void post_sweep_event(EventSweepCodeCache* event, 495 const Ticks& start, 496 const Ticks& end, 497 s4 traversals, 498 int swept, 499 int flushed, 500 int zombified) { 501 assert(event != NULL, "invariant"); 502 assert(event->should_commit(), "invariant"); 503 event->set_starttime(start); 504 event->set_endtime(end); 505 event->set_sweepId(traversals); 506 event->set_sweptCount(swept); 507 event->set_flushedCount(flushed); 508 event->set_zombifiedCount(zombified); 509 event->commit(); 510 } 511 512 void NMethodSweeper::sweep_code_cache() { 513 ResourceMark rm; 514 Ticks sweep_start_counter = Ticks::now(); 515 516 log_debug(codecache, sweep, start)("CodeCache flushing"); 517 518 int flushed_count = 0; 519 int zombified_count = 0; 520 int flushed_c2_count = 0; 521 522 if (PrintMethodFlushing && Verbose) { 523 tty->print_cr("### Sweep at %d out of %d", _seen, CodeCache::nmethod_count()); 524 } 525 526 int swept_count = 0; 527 assert(!SafepointSynchronize::is_at_safepoint(), "should not be in safepoint when we get here"); 528 assert(!CodeCache_lock->owned_by_self(), "just checking"); 529 530 int freed_memory = 0; 531 { 532 MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag); 533 534 while (!_current.end()) { 535 swept_count++; 536 // Since we will give up the CodeCache_lock, always skip ahead 537 // to the next nmethod. Other blobs can be deleted by other 538 // threads but nmethods are only reclaimed by the sweeper. 539 CompiledMethod* nm = _current.method(); 540 _current.next(); 541 542 // Now ready to process nmethod and give up CodeCache_lock 543 { 544 MutexUnlocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag); 545 // Save information before potentially flushing the nmethod 546 // Only flushing nmethods so size only matters for them. 547 int size = nm->is_nmethod() ? ((nmethod*)nm)->total_size() : 0; 548 bool is_c2_method = nm->is_compiled_by_c2(); 549 bool is_osr = nm->is_osr_method(); 550 int compile_id = nm->compile_id(); 551 intptr_t address = p2i(nm); 552 const char* state_before = nm->state(); 553 const char* state_after = ""; 554 555 MethodStateChange type = process_compiled_method(nm); 556 switch (type) { 557 case Flushed: 558 state_after = "flushed"; 559 freed_memory += size; 560 ++flushed_count; 561 if (is_c2_method) { 562 ++flushed_c2_count; 563 } 564 break; 565 case MadeZombie: 566 state_after = "made zombie"; 567 ++zombified_count; 568 break; 569 case None: 570 break; 571 default: 572 ShouldNotReachHere(); 573 } 574 if (PrintMethodFlushing && Verbose && type != None) { 575 tty->print_cr("### %s nmethod %3d/" PTR_FORMAT " (%s) %s", is_osr ? "osr" : "", compile_id, address, state_before, state_after); 576 } 577 } 578 579 _seen++; 580 handle_safepoint_request(); 581 } 582 } 583 584 assert(_current.end(), "must have scanned the whole cache"); 585 586 const Ticks sweep_end_counter = Ticks::now(); 587 const Tickspan sweep_time = sweep_end_counter - sweep_start_counter; 588 { 589 MutexLocker mu(NMethodSweeperStats_lock, Mutex::_no_safepoint_check_flag); 590 _total_time_sweeping += sweep_time; 591 _total_time_this_sweep += sweep_time; 592 _peak_sweep_fraction_time = MAX2(sweep_time, _peak_sweep_fraction_time); 593 _total_flushed_size += freed_memory; 594 _total_nof_methods_reclaimed += flushed_count; 595 _total_nof_c2_methods_reclaimed += flushed_c2_count; 596 _peak_sweep_time = MAX2(_peak_sweep_time, _total_time_this_sweep); 597 } 598 599 EventSweepCodeCache event(UNTIMED); 600 if (event.should_commit()) { 601 post_sweep_event(&event, sweep_start_counter, sweep_end_counter, (s4)_traversals, swept_count, flushed_count, zombified_count); 602 } 603 604 #ifdef ASSERT 605 if(PrintMethodFlushing) { 606 tty->print_cr("### sweeper: sweep time(" JLONG_FORMAT "): ", sweep_time.value()); 607 } 608 #endif 609 610 Log(codecache, sweep) log; 611 if (log.is_debug()) { 612 LogStream ls(log.debug()); 613 CodeCache::print_summary(&ls, false); 614 } 615 log_sweep("finished"); 616 617 // Sweeper is the only case where memory is released, check here if it 618 // is time to restart the compiler. Only checking if there is a certain 619 // amount of free memory in the code cache might lead to re-enabling 620 // compilation although no memory has been released. For example, there are 621 // cases when compilation was disabled although there is 4MB (or more) free 622 // memory in the code cache. The reason is code cache fragmentation. Therefore, 623 // it only makes sense to re-enable compilation if we have actually freed memory. 624 // Note that typically several kB are released for sweeping 16MB of the code 625 // cache. As a result, 'freed_memory' > 0 to restart the compiler. 626 if (!CompileBroker::should_compile_new_jobs() && (freed_memory > 0)) { 627 CompileBroker::set_should_compile_new_jobs(CompileBroker::run_compilation); 628 log.debug("restart compiler"); 629 log_sweep("restart_compiler"); 630 } 631 } 632 633 /** 634 * This function updates the sweeper statistics that keep track of nmethods 635 * state changes. If there is 'enough' state change, the sweeper is invoked 636 * as soon as possible. There can be data races on _bytes_changed. The data 637 * races are benign, since it does not matter if we loose a couple of bytes. 638 * In the worst case we call the sweeper a little later. Also, we are guaranteed 639 * to invoke the sweeper if the code cache gets full. 640 */ 641 void NMethodSweeper::report_state_change(nmethod* nm) { 642 _bytes_changed += nm->total_size(); 643 possibly_enable_sweeper(); 644 } 645 646 /** 647 * Function determines if there was 'enough' state change in the code cache to invoke 648 * the sweeper again. Currently, we determine 'enough' as more than 1% state change in 649 * the code cache since the last sweep. 650 */ 651 void NMethodSweeper::possibly_enable_sweeper() { 652 double percent_changed = ((double)_bytes_changed / (double)ReservedCodeCacheSize) * 100; 653 if (percent_changed > 1.0) { 654 _should_sweep = true; 655 } 656 } 657 658 class CompiledMethodMarker: public StackObj { 659 private: 660 CodeCacheSweeperThread* _thread; 661 public: 662 CompiledMethodMarker(CompiledMethod* cm) { 663 JavaThread* current = JavaThread::current(); 664 assert (current->is_Code_cache_sweeper_thread(), "Must be"); 665 _thread = (CodeCacheSweeperThread*)current; 666 if (!cm->is_zombie() && !cm->is_unloading()) { 667 // Only expose live nmethods for scanning 668 _thread->set_scanned_compiled_method(cm); 669 } 670 } 671 ~CompiledMethodMarker() { 672 _thread->set_scanned_compiled_method(NULL); 673 } 674 }; 675 676 NMethodSweeper::MethodStateChange NMethodSweeper::process_compiled_method(CompiledMethod* cm) { 677 assert(cm != NULL, "sanity"); 678 assert(!CodeCache_lock->owned_by_self(), "just checking"); 679 680 MethodStateChange result = None; 681 // Make sure this nmethod doesn't get unloaded during the scan, 682 // since safepoints may happen during acquired below locks. 683 CompiledMethodMarker nmm(cm); 684 SWEEP(cm); 685 686 // Skip methods that are currently referenced by the VM 687 if (cm->is_locked_by_vm()) { 688 // But still remember to clean-up inline caches for alive nmethods 689 if (cm->is_alive()) { 690 // Clean inline caches that point to zombie/non-entrant/unloaded nmethods 691 cm->cleanup_inline_caches(false); 692 SWEEP(cm); 693 } 694 return result; 695 } 696 697 if (cm->is_zombie()) { 698 // All inline caches that referred to this nmethod were cleaned in the 699 // previous sweeper cycle. Now flush the nmethod from the code cache. 700 assert(!cm->is_locked_by_vm(), "must not flush locked Compiled Methods"); 701 cm->flush(); 702 assert(result == None, "sanity"); 703 result = Flushed; 704 } else if (cm->is_not_entrant()) { 705 // If there are no current activations of this method on the 706 // stack we can safely convert it to a zombie method 707 OrderAccess::loadload(); // _stack_traversal_mark and _state 708 if (cm->can_convert_to_zombie()) { 709 // Code cache state change is tracked in make_zombie() 710 cm->make_zombie(); 711 SWEEP(cm); 712 assert(result == None, "sanity"); 713 result = MadeZombie; 714 assert(cm->is_zombie(), "nmethod must be zombie"); 715 } else { 716 // Still alive, clean up its inline caches 717 cm->cleanup_inline_caches(false); 718 SWEEP(cm); 719 } 720 } else if (cm->is_unloaded()) { 721 // Code is unloaded, so there are no activations on the stack. 722 // Convert the nmethod to zombie. 723 // Code cache state change is tracked in make_zombie() 724 cm->make_zombie(); 725 SWEEP(cm); 726 assert(result == None, "sanity"); 727 result = MadeZombie; 728 } else { 729 if (cm->is_nmethod()) { 730 possibly_flush((nmethod*)cm); 731 } 732 // Clean inline caches that point to zombie/non-entrant/unloaded nmethods 733 cm->cleanup_inline_caches(false); 734 SWEEP(cm); 735 } 736 return result; 737 } 738 739 740 void NMethodSweeper::possibly_flush(nmethod* nm) { 741 if (UseCodeCacheFlushing) { 742 if (!nm->is_locked_by_vm() && !nm->is_native_method() && !nm->is_not_installed() && !nm->is_unloading()) { 743 bool make_not_entrant = false; 744 745 // Do not make native methods not-entrant 746 nm->dec_hotness_counter(); 747 // Get the initial value of the hotness counter. This value depends on the 748 // ReservedCodeCacheSize 749 int reset_val = hotness_counter_reset_val(); 750 int time_since_reset = reset_val - nm->hotness_counter(); 751 int code_blob_type = CodeCache::get_code_blob_type(nm); 752 double threshold = -reset_val + (CodeCache::reverse_free_ratio(code_blob_type) * NmethodSweepActivity); 753 // The less free space in the code cache we have - the bigger reverse_free_ratio() is. 754 // I.e., 'threshold' increases with lower available space in the code cache and a higher 755 // NmethodSweepActivity. If the current hotness counter - which decreases from its initial 756 // value until it is reset by stack walking - is smaller than the computed threshold, the 757 // corresponding nmethod is considered for removal. 758 if ((NmethodSweepActivity > 0) && (nm->hotness_counter() < threshold) && (time_since_reset > MinPassesBeforeFlush)) { 759 // A method is marked as not-entrant if the method is 760 // 1) 'old enough': nm->hotness_counter() < threshold 761 // 2) The method was in_use for a minimum amount of time: (time_since_reset > MinPassesBeforeFlush) 762 // The second condition is necessary if we are dealing with very small code cache 763 // sizes (e.g., <10m) and the code cache size is too small to hold all hot methods. 764 // The second condition ensures that methods are not immediately made not-entrant 765 // after compilation. 766 make_not_entrant = true; 767 } 768 769 // The stack-scanning low-cost detection may not see the method was used (which can happen for 770 // flat profiles). Check the age counter for possible data. 771 if (UseCodeAging && make_not_entrant && (nm->is_compiled_by_c2() || nm->is_compiled_by_c1())) { 772 MethodCounters* mc = nm->method()->get_method_counters(Thread::current()); 773 if (mc != NULL) { 774 // Snapshot the value as it's changed concurrently 775 int age = mc->nmethod_age(); 776 if (MethodCounters::is_nmethod_hot(age)) { 777 // The method has gone through flushing, and it became relatively hot that it deopted 778 // before we could take a look at it. Give it more time to appear in the stack traces, 779 // proportional to the number of deopts. 780 MethodData* md = nm->method()->method_data(); 781 if (md != NULL && time_since_reset > (int)(MinPassesBeforeFlush * (md->tenure_traps() + 1))) { 782 // It's been long enough, we still haven't seen it on stack. 783 // Try to flush it, but enable counters the next time. 784 mc->reset_nmethod_age(); 785 } else { 786 make_not_entrant = false; 787 } 788 } else if (MethodCounters::is_nmethod_warm(age)) { 789 // Method has counters enabled, and the method was used within 790 // previous MinPassesBeforeFlush sweeps. Reset the counter. Stay in the existing 791 // compiled state. 792 mc->reset_nmethod_age(); 793 // delay the next check 794 nm->set_hotness_counter(NMethodSweeper::hotness_counter_reset_val()); 795 make_not_entrant = false; 796 } else if (MethodCounters::is_nmethod_age_unset(age)) { 797 // No counters were used before. Set the counters to the detection 798 // limit value. If the method is going to be used again it will be compiled 799 // with counters that we're going to use for analysis the the next time. 800 mc->reset_nmethod_age(); 801 } else { 802 // Method was totally idle for 10 sweeps 803 // The counter already has the initial value, flush it and may be recompile 804 // later with counters 805 } 806 } 807 } 808 809 if (make_not_entrant) { 810 nm->make_not_entrant(); 811 812 // Code cache state change is tracked in make_not_entrant() 813 if (PrintMethodFlushing && Verbose) { 814 tty->print_cr("### Nmethod %d/" PTR_FORMAT "made not-entrant: hotness counter %d/%d threshold %f", 815 nm->compile_id(), p2i(nm), nm->hotness_counter(), reset_val, threshold); 816 } 817 } 818 } 819 } 820 } 821 822 // Print out some state information about the current sweep and the 823 // state of the code cache if it's requested. 824 void NMethodSweeper::log_sweep(const char* msg, const char* format, ...) { 825 if (PrintMethodFlushing) { 826 ResourceMark rm; 827 stringStream s; 828 // Dump code cache state into a buffer before locking the tty, 829 // because log_state() will use locks causing lock conflicts. 830 CodeCache::log_state(&s); 831 832 ttyLocker ttyl; 833 tty->print("### sweeper: %s ", msg); 834 if (format != NULL) { 835 va_list ap; 836 va_start(ap, format); 837 tty->vprint(format, ap); 838 va_end(ap); 839 } 840 tty->print_cr("%s", s.as_string()); 841 } 842 843 if (LogCompilation && (xtty != NULL)) { 844 ResourceMark rm; 845 stringStream s; 846 // Dump code cache state into a buffer before locking the tty, 847 // because log_state() will use locks causing lock conflicts. 848 CodeCache::log_state(&s); 849 850 ttyLocker ttyl; 851 xtty->begin_elem("sweeper state='%s' traversals='" INTX_FORMAT "' ", msg, (intx)traversal_count()); 852 if (format != NULL) { 853 va_list ap; 854 va_start(ap, format); 855 xtty->vprint(format, ap); 856 va_end(ap); 857 } 858 xtty->print("%s", s.as_string()); 859 xtty->stamp(); 860 xtty->end_elem(); 861 } 862 } 863 864 void NMethodSweeper::print(outputStream* out) { 865 ttyLocker ttyl; 866 out = (out == NULL) ? tty : out; 867 out->print_cr("Code cache sweeper statistics:"); 868 out->print_cr(" Total sweep time: %1.0lf ms", (double)_total_time_sweeping.value()/1000000); 869 out->print_cr(" Total number of full sweeps: %ld", _total_nof_code_cache_sweeps); 870 out->print_cr(" Total number of flushed methods: %ld (thereof %ld C2 methods)", _total_nof_methods_reclaimed, 871 _total_nof_c2_methods_reclaimed); 872 out->print_cr(" Total size of flushed methods: " SIZE_FORMAT " kB", _total_flushed_size/K); 873 }