1 /* 2 * Copyright (c) 1997, 2018, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "code/codeCache.hpp" 27 #include "code/compiledIC.hpp" 28 #include "code/icBuffer.hpp" 29 #include "code/nmethod.hpp" 30 #include "compiler/compileBroker.hpp" 31 #include "gc/shared/collectedHeap.hpp" 32 #include "gc/shared/workgroup.hpp" 33 #include "jfr/jfrEvents.hpp" 34 #include "logging/log.hpp" 35 #include "logging/logStream.hpp" 36 #include "memory/allocation.inline.hpp" 37 #include "memory/resourceArea.hpp" 38 #include "memory/universe.hpp" 39 #include "oops/method.hpp" 40 #include "runtime/atomic.hpp" 41 #include "runtime/compilationPolicy.hpp" 42 #include "runtime/interfaceSupport.inline.hpp" 43 #include "runtime/handshake.hpp" 44 #include "runtime/mutexLocker.hpp" 45 #include "runtime/orderAccess.hpp" 46 #include "runtime/os.hpp" 47 #include "runtime/sweeper.hpp" 48 #include "runtime/thread.inline.hpp" 49 #include "runtime/vm_operations.hpp" 50 #include "runtime/vmThread.hpp" 51 #include "utilities/events.hpp" 52 #include "utilities/xmlstream.hpp" 53 54 #ifdef ASSERT 55 56 #define SWEEP(nm) record_sweep(nm, __LINE__) 57 // Sweeper logging code 58 class SweeperRecord { 59 public: 60 int traversal; 61 int compile_id; 62 long traversal_mark; 63 int state; 64 const char* kind; 65 address vep; 66 address uep; 67 int line; 68 69 void print() { 70 tty->print_cr("traversal = %d compile_id = %d %s uep = " PTR_FORMAT " vep = " 71 PTR_FORMAT " state = %d traversal_mark %ld line = %d", 72 traversal, 73 compile_id, 74 kind == NULL ? "" : kind, 75 p2i(uep), 76 p2i(vep), 77 state, 78 traversal_mark, 79 line); 80 } 81 }; 82 83 static int _sweep_index = 0; 84 static SweeperRecord* _records = NULL; 85 86 void NMethodSweeper::report_events(int id, address entry) { 87 if (_records != NULL) { 88 for (int i = _sweep_index; i < SweeperLogEntries; i++) { 89 if (_records[i].uep == entry || 90 _records[i].vep == entry || 91 _records[i].compile_id == id) { 92 _records[i].print(); 93 } 94 } 95 for (int i = 0; i < _sweep_index; i++) { 96 if (_records[i].uep == entry || 97 _records[i].vep == entry || 98 _records[i].compile_id == id) { 99 _records[i].print(); 100 } 101 } 102 } 103 } 104 105 void NMethodSweeper::report_events() { 106 if (_records != NULL) { 107 for (int i = _sweep_index; i < SweeperLogEntries; i++) { 108 // skip empty records 109 if (_records[i].vep == NULL) continue; 110 _records[i].print(); 111 } 112 for (int i = 0; i < _sweep_index; i++) { 113 // skip empty records 114 if (_records[i].vep == NULL) continue; 115 _records[i].print(); 116 } 117 } 118 } 119 120 void NMethodSweeper::record_sweep(CompiledMethod* nm, int line) { 121 if (_records != NULL) { 122 _records[_sweep_index].traversal = _traversals; 123 _records[_sweep_index].traversal_mark = nm->is_nmethod() ? ((nmethod*)nm)->stack_traversal_mark() : 0; 124 _records[_sweep_index].compile_id = nm->compile_id(); 125 _records[_sweep_index].kind = nm->compile_kind(); 126 _records[_sweep_index].state = nm->get_state(); 127 _records[_sweep_index].vep = nm->verified_entry_point(); 128 _records[_sweep_index].uep = nm->entry_point(); 129 _records[_sweep_index].line = line; 130 _sweep_index = (_sweep_index + 1) % SweeperLogEntries; 131 } 132 } 133 134 void NMethodSweeper::init_sweeper_log() { 135 if (LogSweeper && _records == NULL) { 136 // Create the ring buffer for the logging code 137 _records = NEW_C_HEAP_ARRAY(SweeperRecord, SweeperLogEntries, mtGC); 138 memset(_records, 0, sizeof(SweeperRecord) * SweeperLogEntries); 139 } 140 } 141 #else 142 #define SWEEP(nm) 143 #endif 144 145 CompiledMethodIterator NMethodSweeper::_current; // Current compiled method 146 long NMethodSweeper::_traversals = 0; // Stack scan count, also sweep ID. 147 long NMethodSweeper::_total_nof_code_cache_sweeps = 0; // Total number of full sweeps of the code cache 148 long NMethodSweeper::_time_counter = 0; // Virtual time used to periodically invoke sweeper 149 long NMethodSweeper::_last_sweep = 0; // Value of _time_counter when the last sweep happened 150 int NMethodSweeper::_seen = 0; // Nof. nmethod we have currently processed in current pass of CodeCache 151 152 volatile bool NMethodSweeper::_should_sweep = true; // Indicates if we should invoke the sweeper 153 volatile bool NMethodSweeper::_force_sweep = false;// Indicates if we should force a sweep 154 volatile int NMethodSweeper::_bytes_changed = 0; // Counts the total nmethod size if the nmethod changed from: 155 // 1) alive -> not_entrant 156 // 2) not_entrant -> zombie 157 int NMethodSweeper::_hotness_counter_reset_val = 0; 158 159 long NMethodSweeper::_total_nof_methods_reclaimed = 0; // Accumulated nof methods flushed 160 long NMethodSweeper::_total_nof_c2_methods_reclaimed = 0; // Accumulated nof methods flushed 161 size_t NMethodSweeper::_total_flushed_size = 0; // Total number of bytes flushed from the code cache 162 Tickspan NMethodSweeper::_total_time_sweeping; // Accumulated time sweeping 163 Tickspan NMethodSweeper::_total_time_this_sweep; // Total time this sweep 164 Tickspan NMethodSweeper::_peak_sweep_time; // Peak time for a full sweep 165 Tickspan NMethodSweeper::_peak_sweep_fraction_time; // Peak time sweeping one fraction 166 167 Monitor* NMethodSweeper::_stat_lock = new Monitor(Mutex::special, "Sweeper::Statistics", true, Monitor::_safepoint_check_sometimes); 168 169 class MarkActivationClosure: public CodeBlobClosure { 170 public: 171 virtual void do_code_blob(CodeBlob* cb) { 172 assert(cb->is_nmethod(), "CodeBlob should be nmethod"); 173 nmethod* nm = (nmethod*)cb; 174 nm->set_hotness_counter(NMethodSweeper::hotness_counter_reset_val()); 175 // If we see an activation belonging to a non_entrant nmethod, we mark it. 176 if (nm->is_not_entrant()) { 177 nm->mark_as_seen_on_stack(); 178 } 179 } 180 }; 181 static MarkActivationClosure mark_activation_closure; 182 183 class SetHotnessClosure: public CodeBlobClosure { 184 public: 185 virtual void do_code_blob(CodeBlob* cb) { 186 assert(cb->is_nmethod(), "CodeBlob should be nmethod"); 187 nmethod* nm = (nmethod*)cb; 188 nm->set_hotness_counter(NMethodSweeper::hotness_counter_reset_val()); 189 } 190 }; 191 static SetHotnessClosure set_hotness_closure; 192 193 194 int NMethodSweeper::hotness_counter_reset_val() { 195 if (_hotness_counter_reset_val == 0) { 196 _hotness_counter_reset_val = (ReservedCodeCacheSize < M) ? 1 : (ReservedCodeCacheSize / M) * 2; 197 } 198 return _hotness_counter_reset_val; 199 } 200 bool NMethodSweeper::wait_for_stack_scanning() { 201 return _current.end(); 202 } 203 204 class NMethodMarkingThreadClosure : public ThreadClosure { 205 private: 206 CodeBlobClosure* _cl; 207 public: 208 NMethodMarkingThreadClosure(CodeBlobClosure* cl) : _cl(cl) {} 209 void do_thread(Thread* thread) { 210 if (thread->is_Java_thread() && ! thread->is_Code_cache_sweeper_thread()) { 211 JavaThread* jt = (JavaThread*) thread; 212 jt->nmethods_do(_cl); 213 } 214 } 215 }; 216 217 class NMethodMarkingTask : public AbstractGangTask { 218 private: 219 NMethodMarkingThreadClosure* _cl; 220 public: 221 NMethodMarkingTask(NMethodMarkingThreadClosure* cl) : 222 AbstractGangTask("Parallel NMethod Marking"), 223 _cl(cl) { 224 Threads::change_thread_claim_parity(); 225 } 226 227 ~NMethodMarkingTask() { 228 Threads::assert_all_threads_claimed(); 229 } 230 231 void work(uint worker_id) { 232 Threads::possibly_parallel_threads_do(true, _cl); 233 } 234 }; 235 236 /** 237 * Scans the stacks of all Java threads and marks activations of not-entrant methods. 238 * No need to synchronize access, since 'mark_active_nmethods' is always executed at a 239 * safepoint. 240 */ 241 void NMethodSweeper::mark_active_nmethods() { 242 CodeBlobClosure* cl = prepare_mark_active_nmethods(); 243 if (cl != NULL) { 244 WorkGang* workers = Universe::heap()->get_safepoint_workers(); 245 if (workers != NULL) { 246 NMethodMarkingThreadClosure tcl(cl); 247 NMethodMarkingTask task(&tcl); 248 workers->run_task(&task); 249 } else { 250 Threads::nmethods_do(cl); 251 } 252 } 253 } 254 255 CodeBlobClosure* NMethodSweeper::prepare_mark_active_nmethods() { 256 #ifdef ASSERT 257 if (ThreadLocalHandshakes) { 258 assert(Thread::current()->is_Code_cache_sweeper_thread(), "must be executed under CodeCache_lock and in sweeper thread"); 259 assert_lock_strong(CodeCache_lock); 260 } else { 261 assert(SafepointSynchronize::is_at_safepoint(), "must be executed at a safepoint"); 262 } 263 #endif 264 265 // If we do not want to reclaim not-entrant or zombie methods there is no need 266 // to scan stacks 267 if (!MethodFlushing) { 268 return NULL; 269 } 270 271 // Increase time so that we can estimate when to invoke the sweeper again. 272 _time_counter++; 273 274 // Check for restart 275 assert(_current.method() == NULL, "should only happen between sweeper cycles"); 276 assert(wait_for_stack_scanning(), "should only happen between sweeper cycles"); 277 278 _seen = 0; 279 _current = CompiledMethodIterator(); 280 // Initialize to first nmethod 281 _current.next(); 282 _traversals += 1; 283 _total_time_this_sweep = Tickspan(); 284 285 if (PrintMethodFlushing) { 286 tty->print_cr("### Sweep: stack traversal %ld", _traversals); 287 } 288 return &mark_activation_closure; 289 } 290 291 CodeBlobClosure* NMethodSweeper::prepare_reset_hotness_counters() { 292 assert(SafepointSynchronize::is_at_safepoint(), "must be executed at a safepoint"); 293 294 // If we do not want to reclaim not-entrant or zombie methods there is no need 295 // to scan stacks 296 if (!MethodFlushing) { 297 return NULL; 298 } 299 300 // Increase time so that we can estimate when to invoke the sweeper again. 301 _time_counter++; 302 303 // Check for restart 304 if (_current.method() != NULL) { 305 if (_current.method()->is_nmethod()) { 306 assert(CodeCache::find_blob_unsafe(_current.method()) == _current.method(), "Sweeper nmethod cached state invalid"); 307 } else if (_current.method()->is_aot()) { 308 assert(CodeCache::find_blob_unsafe(_current.method()->code_begin()) == _current.method(), "Sweeper AOT method cached state invalid"); 309 } else { 310 ShouldNotReachHere(); 311 } 312 } 313 314 return &set_hotness_closure; 315 } 316 317 /** 318 * This function triggers a VM operation that does stack scanning of active 319 * methods. Stack scanning is mandatory for the sweeper to make progress. 320 */ 321 void NMethodSweeper::do_stack_scanning() { 322 assert(!CodeCache_lock->owned_by_self(), "just checking"); 323 if (wait_for_stack_scanning()) { 324 if (ThreadLocalHandshakes) { 325 CodeBlobClosure* code_cl; 326 { 327 MutexLockerEx ccl(CodeCache_lock, Mutex::_no_safepoint_check_flag); 328 code_cl = prepare_mark_active_nmethods(); 329 } 330 if (code_cl != NULL) { 331 NMethodMarkingThreadClosure tcl(code_cl); 332 Handshake::execute(&tcl); 333 } 334 } else { 335 VM_MarkActiveNMethods op; 336 VMThread::execute(&op); 337 } 338 } 339 } 340 341 void NMethodSweeper::sweeper_loop() { 342 bool timeout; 343 while (true) { 344 { 345 ThreadBlockInVM tbivm(JavaThread::current()); 346 MutexLockerEx waiter(CodeCache_lock, Mutex::_no_safepoint_check_flag); 347 const long wait_time = 60*60*24 * 1000; 348 timeout = CodeCache_lock->wait(Mutex::_no_safepoint_check_flag, wait_time); 349 } 350 if (!timeout) { 351 possibly_sweep(); 352 } 353 } 354 } 355 356 /** 357 * Wakes up the sweeper thread to possibly sweep. 358 */ 359 void NMethodSweeper::notify(int code_blob_type) { 360 // Makes sure that we do not invoke the sweeper too often during startup. 361 double start_threshold = 100.0 / (double)StartAggressiveSweepingAt; 362 double aggressive_sweep_threshold = MIN2(start_threshold, 1.1); 363 if (CodeCache::reverse_free_ratio(code_blob_type) >= aggressive_sweep_threshold) { 364 assert_locked_or_safepoint(CodeCache_lock); 365 CodeCache_lock->notify(); 366 } 367 } 368 369 /** 370 * Wakes up the sweeper thread and forces a sweep. Blocks until it finished. 371 */ 372 void NMethodSweeper::force_sweep() { 373 ThreadBlockInVM tbivm(JavaThread::current()); 374 MutexLockerEx waiter(CodeCache_lock, Mutex::_no_safepoint_check_flag); 375 // Request forced sweep 376 _force_sweep = true; 377 while (_force_sweep) { 378 // Notify sweeper that we want to force a sweep and wait for completion. 379 // In case a sweep currently takes place we timeout and try again because 380 // we want to enforce a full sweep. 381 CodeCache_lock->notify(); 382 CodeCache_lock->wait(Mutex::_no_safepoint_check_flag, 1000); 383 } 384 } 385 386 /** 387 * Handle a safepoint request 388 */ 389 void NMethodSweeper::handle_safepoint_request() { 390 if (SafepointSynchronize::is_synchronizing()) { 391 if (PrintMethodFlushing && Verbose) { 392 tty->print_cr("### Sweep at %d out of %d, yielding to safepoint", _seen, CodeCache::nmethod_count()); 393 } 394 MutexUnlockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag); 395 396 JavaThread* thread = JavaThread::current(); 397 ThreadBlockInVM tbivm(thread); 398 thread->java_suspend_self(); 399 } 400 } 401 402 /** 403 * This function invokes the sweeper if at least one of the three conditions is met: 404 * (1) The code cache is getting full 405 * (2) There are sufficient state changes in/since the last sweep. 406 * (3) We have not been sweeping for 'some time' 407 */ 408 void NMethodSweeper::possibly_sweep() { 409 assert(JavaThread::current()->thread_state() == _thread_in_vm, "must run in vm mode"); 410 // If there was no state change while nmethod sweeping, 'should_sweep' will be false. 411 // This is one of the two places where should_sweep can be set to true. The general 412 // idea is as follows: If there is enough free space in the code cache, there is no 413 // need to invoke the sweeper. The following formula (which determines whether to invoke 414 // the sweeper or not) depends on the assumption that for larger ReservedCodeCacheSizes 415 // we need less frequent sweeps than for smaller ReservedCodecCacheSizes. Furthermore, 416 // the formula considers how much space in the code cache is currently used. Here are 417 // some examples that will (hopefully) help in understanding. 418 // 419 // Small ReservedCodeCacheSizes: (e.g., < 16M) We invoke the sweeper every time, since 420 // the result of the division is 0. This 421 // keeps the used code cache size small 422 // (important for embedded Java) 423 // Large ReservedCodeCacheSize : (e.g., 256M + code cache is 10% full). The formula 424 // computes: (256 / 16) - 1 = 15 425 // As a result, we invoke the sweeper after 426 // 15 invocations of 'mark_active_nmethods. 427 // Large ReservedCodeCacheSize: (e.g., 256M + code Cache is 90% full). The formula 428 // computes: (256 / 16) - 10 = 6. 429 if (!_should_sweep) { 430 const int time_since_last_sweep = _time_counter - _last_sweep; 431 // ReservedCodeCacheSize has an 'unsigned' type. We need a 'signed' type for max_wait_time, 432 // since 'time_since_last_sweep' can be larger than 'max_wait_time'. If that happens using 433 // an unsigned type would cause an underflow (wait_until_next_sweep becomes a large positive 434 // value) that disables the intended periodic sweeps. 435 const int max_wait_time = ReservedCodeCacheSize / (16 * M); 436 double wait_until_next_sweep = max_wait_time - time_since_last_sweep - 437 MAX2(CodeCache::reverse_free_ratio(CodeBlobType::MethodProfiled), 438 CodeCache::reverse_free_ratio(CodeBlobType::MethodNonProfiled)); 439 assert(wait_until_next_sweep <= (double)max_wait_time, "Calculation of code cache sweeper interval is incorrect"); 440 441 if ((wait_until_next_sweep <= 0.0) || !CompileBroker::should_compile_new_jobs()) { 442 _should_sweep = true; 443 } 444 } 445 446 // Remember if this was a forced sweep 447 bool forced = _force_sweep; 448 449 // Force stack scanning if there is only 10% free space in the code cache. 450 // We force stack scanning only if the non-profiled code heap gets full, since critical 451 // allocations go to the non-profiled heap and we must be make sure that there is 452 // enough space. 453 double free_percent = 1 / CodeCache::reverse_free_ratio(CodeBlobType::MethodNonProfiled) * 100; 454 if (free_percent <= StartAggressiveSweepingAt || forced || _should_sweep) { 455 do_stack_scanning(); 456 } 457 458 if (_should_sweep || forced) { 459 init_sweeper_log(); 460 sweep_code_cache(); 461 } 462 463 // We are done with sweeping the code cache once. 464 _total_nof_code_cache_sweeps++; 465 _last_sweep = _time_counter; 466 // Reset flag; temporarily disables sweeper 467 _should_sweep = false; 468 // If there was enough state change, 'possibly_enable_sweeper()' 469 // sets '_should_sweep' to true 470 possibly_enable_sweeper(); 471 // Reset _bytes_changed only if there was enough state change. _bytes_changed 472 // can further increase by calls to 'report_state_change'. 473 if (_should_sweep) { 474 _bytes_changed = 0; 475 } 476 477 if (forced) { 478 // Notify requester that forced sweep finished 479 assert(_force_sweep, "Should be a forced sweep"); 480 MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag); 481 _force_sweep = false; 482 CodeCache_lock->notify(); 483 } 484 } 485 486 static void post_sweep_event(EventSweepCodeCache* event, 487 const Ticks& start, 488 const Ticks& end, 489 s4 traversals, 490 int swept, 491 int flushed, 492 int zombified) { 493 assert(event != NULL, "invariant"); 494 assert(event->should_commit(), "invariant"); 495 event->set_starttime(start); 496 event->set_endtime(end); 497 event->set_sweepId(traversals); 498 event->set_sweptCount(swept); 499 event->set_flushedCount(flushed); 500 event->set_zombifiedCount(zombified); 501 event->commit(); 502 } 503 504 void NMethodSweeper::sweep_code_cache() { 505 ResourceMark rm; 506 Ticks sweep_start_counter = Ticks::now(); 507 508 log_debug(codecache, sweep, start)("CodeCache flushing"); 509 510 int flushed_count = 0; 511 int zombified_count = 0; 512 int flushed_c2_count = 0; 513 514 if (PrintMethodFlushing && Verbose) { 515 tty->print_cr("### Sweep at %d out of %d", _seen, CodeCache::nmethod_count()); 516 } 517 518 int swept_count = 0; 519 assert(!SafepointSynchronize::is_at_safepoint(), "should not be in safepoint when we get here"); 520 assert(!CodeCache_lock->owned_by_self(), "just checking"); 521 522 int freed_memory = 0; 523 { 524 MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag); 525 526 while (!_current.end()) { 527 swept_count++; 528 // Since we will give up the CodeCache_lock, always skip ahead 529 // to the next nmethod. Other blobs can be deleted by other 530 // threads but nmethods are only reclaimed by the sweeper. 531 CompiledMethod* nm = _current.method(); 532 _current.next(); 533 534 // Now ready to process nmethod and give up CodeCache_lock 535 { 536 MutexUnlockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag); 537 // Save information before potentially flushing the nmethod 538 // Only flushing nmethods so size only matters for them. 539 int size = nm->is_nmethod() ? ((nmethod*)nm)->total_size() : 0; 540 bool is_c2_method = nm->is_compiled_by_c2(); 541 bool is_osr = nm->is_osr_method(); 542 int compile_id = nm->compile_id(); 543 intptr_t address = p2i(nm); 544 const char* state_before = nm->state(); 545 const char* state_after = ""; 546 547 MethodStateChange type = process_compiled_method(nm); 548 switch (type) { 549 case Flushed: 550 state_after = "flushed"; 551 freed_memory += size; 552 ++flushed_count; 553 if (is_c2_method) { 554 ++flushed_c2_count; 555 } 556 break; 557 case MadeZombie: 558 state_after = "made zombie"; 559 ++zombified_count; 560 break; 561 case None: 562 break; 563 default: 564 ShouldNotReachHere(); 565 } 566 if (PrintMethodFlushing && Verbose && type != None) { 567 tty->print_cr("### %s nmethod %3d/" PTR_FORMAT " (%s) %s", is_osr ? "osr" : "", compile_id, address, state_before, state_after); 568 } 569 } 570 571 _seen++; 572 handle_safepoint_request(); 573 } 574 } 575 576 assert(_current.end(), "must have scanned the whole cache"); 577 578 const Ticks sweep_end_counter = Ticks::now(); 579 const Tickspan sweep_time = sweep_end_counter - sweep_start_counter; 580 { 581 MutexLockerEx mu(_stat_lock, Mutex::_no_safepoint_check_flag); 582 _total_time_sweeping += sweep_time; 583 _total_time_this_sweep += sweep_time; 584 _peak_sweep_fraction_time = MAX2(sweep_time, _peak_sweep_fraction_time); 585 _total_flushed_size += freed_memory; 586 _total_nof_methods_reclaimed += flushed_count; 587 _total_nof_c2_methods_reclaimed += flushed_c2_count; 588 _peak_sweep_time = MAX2(_peak_sweep_time, _total_time_this_sweep); 589 } 590 591 EventSweepCodeCache event(UNTIMED); 592 if (event.should_commit()) { 593 post_sweep_event(&event, sweep_start_counter, sweep_end_counter, (s4)_traversals, swept_count, flushed_count, zombified_count); 594 } 595 596 #ifdef ASSERT 597 if(PrintMethodFlushing) { 598 tty->print_cr("### sweeper: sweep time(" JLONG_FORMAT "): ", sweep_time.value()); 599 } 600 #endif 601 602 Log(codecache, sweep) log; 603 if (log.is_debug()) { 604 LogStream ls(log.debug()); 605 CodeCache::print_summary(&ls, false); 606 } 607 log_sweep("finished"); 608 609 // Sweeper is the only case where memory is released, check here if it 610 // is time to restart the compiler. Only checking if there is a certain 611 // amount of free memory in the code cache might lead to re-enabling 612 // compilation although no memory has been released. For example, there are 613 // cases when compilation was disabled although there is 4MB (or more) free 614 // memory in the code cache. The reason is code cache fragmentation. Therefore, 615 // it only makes sense to re-enable compilation if we have actually freed memory. 616 // Note that typically several kB are released for sweeping 16MB of the code 617 // cache. As a result, 'freed_memory' > 0 to restart the compiler. 618 if (!CompileBroker::should_compile_new_jobs() && (freed_memory > 0)) { 619 CompileBroker::set_should_compile_new_jobs(CompileBroker::run_compilation); 620 log.debug("restart compiler"); 621 log_sweep("restart_compiler"); 622 } 623 } 624 625 /** 626 * This function updates the sweeper statistics that keep track of nmethods 627 * state changes. If there is 'enough' state change, the sweeper is invoked 628 * as soon as possible. There can be data races on _bytes_changed. The data 629 * races are benign, since it does not matter if we loose a couple of bytes. 630 * In the worst case we call the sweeper a little later. Also, we are guaranteed 631 * to invoke the sweeper if the code cache gets full. 632 */ 633 void NMethodSweeper::report_state_change(nmethod* nm) { 634 _bytes_changed += nm->total_size(); 635 possibly_enable_sweeper(); 636 } 637 638 /** 639 * Function determines if there was 'enough' state change in the code cache to invoke 640 * the sweeper again. Currently, we determine 'enough' as more than 1% state change in 641 * the code cache since the last sweep. 642 */ 643 void NMethodSweeper::possibly_enable_sweeper() { 644 double percent_changed = ((double)_bytes_changed / (double)ReservedCodeCacheSize) * 100; 645 if (percent_changed > 1.0) { 646 _should_sweep = true; 647 } 648 } 649 650 class CompiledMethodMarker: public StackObj { 651 private: 652 CodeCacheSweeperThread* _thread; 653 public: 654 CompiledMethodMarker(CompiledMethod* cm) { 655 JavaThread* current = JavaThread::current(); 656 assert (current->is_Code_cache_sweeper_thread(), "Must be"); 657 _thread = (CodeCacheSweeperThread*)current; 658 if (!cm->is_zombie() && !cm->is_unloaded()) { 659 // Only expose live nmethods for scanning 660 _thread->set_scanned_compiled_method(cm); 661 } 662 } 663 ~CompiledMethodMarker() { 664 _thread->set_scanned_compiled_method(NULL); 665 } 666 }; 667 668 void NMethodSweeper::release_compiled_method(CompiledMethod* nm) { 669 // Make sure the released nmethod is no longer referenced by the sweeper thread 670 CodeCacheSweeperThread* thread = (CodeCacheSweeperThread*)JavaThread::current(); 671 thread->set_scanned_compiled_method(NULL); 672 673 // Clean up any CompiledICHolders 674 { 675 ResourceMark rm; 676 MutexLocker ml_patch(CompiledIC_lock); 677 RelocIterator iter(nm); 678 while (iter.next()) { 679 if (iter.type() == relocInfo::virtual_call_type) { 680 CompiledIC::cleanup_call_site(iter.virtual_call_reloc(), nm); 681 } 682 } 683 } 684 685 MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag); 686 nm->flush(); 687 } 688 689 NMethodSweeper::MethodStateChange NMethodSweeper::process_compiled_method(CompiledMethod* cm) { 690 assert(cm != NULL, "sanity"); 691 assert(!CodeCache_lock->owned_by_self(), "just checking"); 692 693 MethodStateChange result = None; 694 // Make sure this nmethod doesn't get unloaded during the scan, 695 // since safepoints may happen during acquired below locks. 696 CompiledMethodMarker nmm(cm); 697 SWEEP(cm); 698 699 // Skip methods that are currently referenced by the VM 700 if (cm->is_locked_by_vm()) { 701 // But still remember to clean-up inline caches for alive nmethods 702 if (cm->is_alive()) { 703 // Clean inline caches that point to zombie/non-entrant/unloaded nmethods 704 MutexLocker cl(CompiledIC_lock); 705 cm->cleanup_inline_caches(); 706 SWEEP(cm); 707 } 708 return result; 709 } 710 711 if (cm->is_zombie()) { 712 // All inline caches that referred to this nmethod were cleaned in the 713 // previous sweeper cycle. Now flush the nmethod from the code cache. 714 assert(!cm->is_locked_by_vm(), "must not flush locked Compiled Methods"); 715 release_compiled_method(cm); 716 assert(result == None, "sanity"); 717 result = Flushed; 718 } else if (cm->is_not_entrant()) { 719 // If there are no current activations of this method on the 720 // stack we can safely convert it to a zombie method 721 OrderAccess::loadload(); // _stack_traversal_mark and _state 722 if (cm->can_convert_to_zombie()) { 723 // Clear ICStubs to prevent back patching stubs of zombie or flushed 724 // nmethods during the next safepoint (see ICStub::finalize). 725 { 726 MutexLocker cl(CompiledIC_lock); 727 cm->clear_ic_stubs(); 728 } 729 // Code cache state change is tracked in make_zombie() 730 cm->make_zombie(); 731 SWEEP(cm); 732 // The nmethod may have been locked by JVMTI after being made zombie (see 733 // JvmtiDeferredEvent::compiled_method_unload_event()). If so, we cannot 734 // flush the osr nmethod directly but have to wait for a later sweeper cycle. 735 if (cm->is_osr_method() && !cm->is_locked_by_vm()) { 736 // No inline caches will ever point to osr methods, so we can just remove it. 737 // Make sure that we unregistered the nmethod with the heap and flushed all 738 // dependencies before removing the nmethod (done in make_zombie()). 739 assert(cm->is_zombie(), "nmethod must be unregistered"); 740 release_compiled_method(cm); 741 assert(result == None, "sanity"); 742 result = Flushed; 743 } else { 744 assert(result == None, "sanity"); 745 result = MadeZombie; 746 assert(cm->is_zombie(), "nmethod must be zombie"); 747 } 748 } else { 749 // Still alive, clean up its inline caches 750 MutexLocker cl(CompiledIC_lock); 751 cm->cleanup_inline_caches(); 752 SWEEP(cm); 753 } 754 } else if (cm->is_unloaded()) { 755 // Code is unloaded, so there are no activations on the stack. 756 // Convert the nmethod to zombie or flush it directly in the OSR case. 757 { 758 // Clean ICs of unloaded nmethods as well because they may reference other 759 // unloaded nmethods that may be flushed earlier in the sweeper cycle. 760 MutexLocker cl(CompiledIC_lock); 761 cm->cleanup_inline_caches(); 762 } 763 if (cm->is_osr_method()) { 764 SWEEP(cm); 765 // No inline caches will ever point to osr methods, so we can just remove it 766 release_compiled_method(cm); 767 assert(result == None, "sanity"); 768 result = Flushed; 769 } else { 770 // Code cache state change is tracked in make_zombie() 771 cm->make_zombie(); 772 SWEEP(cm); 773 assert(result == None, "sanity"); 774 result = MadeZombie; 775 } 776 } else { 777 if (cm->is_nmethod()) { 778 possibly_flush((nmethod*)cm); 779 } 780 // Clean inline caches that point to zombie/non-entrant/unloaded nmethods 781 MutexLocker cl(CompiledIC_lock); 782 cm->cleanup_inline_caches(); 783 SWEEP(cm); 784 } 785 return result; 786 } 787 788 789 void NMethodSweeper::possibly_flush(nmethod* nm) { 790 if (UseCodeCacheFlushing) { 791 if (!nm->is_locked_by_vm() && !nm->is_native_method() && !nm->is_not_installed()) { 792 bool make_not_entrant = false; 793 794 // Do not make native methods not-entrant 795 nm->dec_hotness_counter(); 796 // Get the initial value of the hotness counter. This value depends on the 797 // ReservedCodeCacheSize 798 int reset_val = hotness_counter_reset_val(); 799 int time_since_reset = reset_val - nm->hotness_counter(); 800 int code_blob_type = CodeCache::get_code_blob_type(nm); 801 double threshold = -reset_val + (CodeCache::reverse_free_ratio(code_blob_type) * NmethodSweepActivity); 802 // The less free space in the code cache we have - the bigger reverse_free_ratio() is. 803 // I.e., 'threshold' increases with lower available space in the code cache and a higher 804 // NmethodSweepActivity. If the current hotness counter - which decreases from its initial 805 // value until it is reset by stack walking - is smaller than the computed threshold, the 806 // corresponding nmethod is considered for removal. 807 if ((NmethodSweepActivity > 0) && (nm->hotness_counter() < threshold) && (time_since_reset > MinPassesBeforeFlush)) { 808 // A method is marked as not-entrant if the method is 809 // 1) 'old enough': nm->hotness_counter() < threshold 810 // 2) The method was in_use for a minimum amount of time: (time_since_reset > MinPassesBeforeFlush) 811 // The second condition is necessary if we are dealing with very small code cache 812 // sizes (e.g., <10m) and the code cache size is too small to hold all hot methods. 813 // The second condition ensures that methods are not immediately made not-entrant 814 // after compilation. 815 make_not_entrant = true; 816 } 817 818 // The stack-scanning low-cost detection may not see the method was used (which can happen for 819 // flat profiles). Check the age counter for possible data. 820 if (UseCodeAging && make_not_entrant && (nm->is_compiled_by_c2() || nm->is_compiled_by_c1())) { 821 MethodCounters* mc = nm->method()->get_method_counters(Thread::current()); 822 if (mc != NULL) { 823 // Snapshot the value as it's changed concurrently 824 int age = mc->nmethod_age(); 825 if (MethodCounters::is_nmethod_hot(age)) { 826 // The method has gone through flushing, and it became relatively hot that it deopted 827 // before we could take a look at it. Give it more time to appear in the stack traces, 828 // proportional to the number of deopts. 829 MethodData* md = nm->method()->method_data(); 830 if (md != NULL && time_since_reset > (int)(MinPassesBeforeFlush * (md->tenure_traps() + 1))) { 831 // It's been long enough, we still haven't seen it on stack. 832 // Try to flush it, but enable counters the next time. 833 mc->reset_nmethod_age(); 834 } else { 835 make_not_entrant = false; 836 } 837 } else if (MethodCounters::is_nmethod_warm(age)) { 838 // Method has counters enabled, and the method was used within 839 // previous MinPassesBeforeFlush sweeps. Reset the counter. Stay in the existing 840 // compiled state. 841 mc->reset_nmethod_age(); 842 // delay the next check 843 nm->set_hotness_counter(NMethodSweeper::hotness_counter_reset_val()); 844 make_not_entrant = false; 845 } else if (MethodCounters::is_nmethod_age_unset(age)) { 846 // No counters were used before. Set the counters to the detection 847 // limit value. If the method is going to be used again it will be compiled 848 // with counters that we're going to use for analysis the the next time. 849 mc->reset_nmethod_age(); 850 } else { 851 // Method was totally idle for 10 sweeps 852 // The counter already has the initial value, flush it and may be recompile 853 // later with counters 854 } 855 } 856 } 857 858 if (make_not_entrant) { 859 nm->make_not_entrant(); 860 861 // Code cache state change is tracked in make_not_entrant() 862 if (PrintMethodFlushing && Verbose) { 863 tty->print_cr("### Nmethod %d/" PTR_FORMAT "made not-entrant: hotness counter %d/%d threshold %f", 864 nm->compile_id(), p2i(nm), nm->hotness_counter(), reset_val, threshold); 865 } 866 } 867 } 868 } 869 } 870 871 // Print out some state information about the current sweep and the 872 // state of the code cache if it's requested. 873 void NMethodSweeper::log_sweep(const char* msg, const char* format, ...) { 874 if (PrintMethodFlushing) { 875 ResourceMark rm; 876 stringStream s; 877 // Dump code cache state into a buffer before locking the tty, 878 // because log_state() will use locks causing lock conflicts. 879 CodeCache::log_state(&s); 880 881 ttyLocker ttyl; 882 tty->print("### sweeper: %s ", msg); 883 if (format != NULL) { 884 va_list ap; 885 va_start(ap, format); 886 tty->vprint(format, ap); 887 va_end(ap); 888 } 889 tty->print_cr("%s", s.as_string()); 890 } 891 892 if (LogCompilation && (xtty != NULL)) { 893 ResourceMark rm; 894 stringStream s; 895 // Dump code cache state into a buffer before locking the tty, 896 // because log_state() will use locks causing lock conflicts. 897 CodeCache::log_state(&s); 898 899 ttyLocker ttyl; 900 xtty->begin_elem("sweeper state='%s' traversals='" INTX_FORMAT "' ", msg, (intx)traversal_count()); 901 if (format != NULL) { 902 va_list ap; 903 va_start(ap, format); 904 xtty->vprint(format, ap); 905 va_end(ap); 906 } 907 xtty->print("%s", s.as_string()); 908 xtty->stamp(); 909 xtty->end_elem(); 910 } 911 } 912 913 void NMethodSweeper::print(outputStream* out) { 914 ttyLocker ttyl; 915 out = (out == NULL) ? tty : out; 916 out->print_cr("Code cache sweeper statistics:"); 917 out->print_cr(" Total sweep time: %1.0lf ms", (double)_total_time_sweeping.value()/1000000); 918 out->print_cr(" Total number of full sweeps: %ld", _total_nof_code_cache_sweeps); 919 out->print_cr(" Total number of flushed methods: %ld (thereof %ld C2 methods)", _total_nof_methods_reclaimed, 920 _total_nof_c2_methods_reclaimed); 921 out->print_cr(" Total size of flushed methods: " SIZE_FORMAT " kB", _total_flushed_size/K); 922 }