Print this page
Split |
Close |
Expand all |
Collapse all |
--- old/src/share/vm/runtime/sweeper.cpp
+++ new/src/share/vm/runtime/sweeper.cpp
1 1 /*
2 2 * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved.
3 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 4 *
5 5 * This code is free software; you can redistribute it and/or modify it
6 6 * under the terms of the GNU General Public License version 2 only, as
7 7 * published by the Free Software Foundation.
8 8 *
9 9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 12 * version 2 for more details (a copy is included in the LICENSE file that
13 13 * accompanied this code).
14 14 *
15 15 * You should have received a copy of the GNU General Public License version
16 16 * 2 along with this work; if not, write to the Free Software Foundation,
17 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 18 *
19 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 20 * or visit www.oracle.com if you need additional information or have any
21 21 * questions.
22 22 *
23 23 */
24 24
25 25 #include "precompiled.hpp"
26 26 #include "code/codeCache.hpp"
27 27 #include "code/nmethod.hpp"
28 28 #include "compiler/compileBroker.hpp"
29 29 #include "memory/resourceArea.hpp"
30 30 #include "oops/methodOop.hpp"
31 31 #include "runtime/atomic.hpp"
32 32 #include "runtime/compilationPolicy.hpp"
33 33 #include "runtime/mutexLocker.hpp"
34 34 #include "runtime/os.hpp"
35 35 #include "runtime/sweeper.hpp"
36 36 #include "runtime/vm_operations.hpp"
37 37 #include "utilities/events.hpp"
38 38 #include "utilities/xmlstream.hpp"
39 39
40 40 #ifdef ASSERT
41 41
42 42 #define SWEEP(nm) record_sweep(nm, __LINE__)
43 43 // Sweeper logging code
44 44 class SweeperRecord {
45 45 public:
46 46 int traversal;
47 47 int invocation;
48 48 int compile_id;
49 49 long traversal_mark;
50 50 int state;
51 51 const char* kind;
52 52 address vep;
53 53 address uep;
54 54 int line;
55 55
56 56 void print() {
57 57 tty->print_cr("traversal = %d invocation = %d compile_id = %d %s uep = " PTR_FORMAT " vep = "
58 58 PTR_FORMAT " state = %d traversal_mark %d line = %d",
59 59 traversal,
60 60 invocation,
61 61 compile_id,
62 62 kind == NULL ? "" : kind,
63 63 uep,
64 64 vep,
65 65 state,
66 66 traversal_mark,
67 67 line);
68 68 }
69 69 };
70 70
71 71 static int _sweep_index = 0;
72 72 static SweeperRecord* _records = NULL;
73 73
74 74 void NMethodSweeper::report_events(int id, address entry) {
75 75 if (_records != NULL) {
76 76 for (int i = _sweep_index; i < SweeperLogEntries; i++) {
77 77 if (_records[i].uep == entry ||
78 78 _records[i].vep == entry ||
79 79 _records[i].compile_id == id) {
80 80 _records[i].print();
81 81 }
82 82 }
83 83 for (int i = 0; i < _sweep_index; i++) {
84 84 if (_records[i].uep == entry ||
85 85 _records[i].vep == entry ||
86 86 _records[i].compile_id == id) {
87 87 _records[i].print();
88 88 }
89 89 }
90 90 }
91 91 }
92 92
93 93 void NMethodSweeper::report_events() {
94 94 if (_records != NULL) {
95 95 for (int i = _sweep_index; i < SweeperLogEntries; i++) {
96 96 // skip empty records
97 97 if (_records[i].vep == NULL) continue;
98 98 _records[i].print();
99 99 }
100 100 for (int i = 0; i < _sweep_index; i++) {
101 101 // skip empty records
102 102 if (_records[i].vep == NULL) continue;
103 103 _records[i].print();
104 104 }
105 105 }
106 106 }
107 107
108 108 void NMethodSweeper::record_sweep(nmethod* nm, int line) {
109 109 if (_records != NULL) {
110 110 _records[_sweep_index].traversal = _traversals;
111 111 _records[_sweep_index].traversal_mark = nm->_stack_traversal_mark;
112 112 _records[_sweep_index].invocation = _invocations;
113 113 _records[_sweep_index].compile_id = nm->compile_id();
114 114 _records[_sweep_index].kind = nm->compile_kind();
115 115 _records[_sweep_index].state = nm->_state;
116 116 _records[_sweep_index].vep = nm->verified_entry_point();
117 117 _records[_sweep_index].uep = nm->entry_point();
118 118 _records[_sweep_index].line = line;
↓ open down ↓ |
118 lines elided |
↑ open up ↑ |
119 119
120 120 _sweep_index = (_sweep_index + 1) % SweeperLogEntries;
121 121 }
122 122 }
123 123 #else
124 124 #define SWEEP(nm)
125 125 #endif
126 126
127 127
128 128 long NMethodSweeper::_traversals = 0; // No. of stack traversals performed
129 -nmethod* NMethodSweeper::_current = NULL; // Current nmethod
130 129 int NMethodSweeper::_seen = 0 ; // No. of nmethods we have currently processed in current pass of CodeCache
130 +int NMethodSweeper::_invocations = 0; // No. of invocations of the sweeper
131 131
132 -volatile int NMethodSweeper::_invocations = 0; // No. of invocations left until we are completed with this pass
133 -volatile int NMethodSweeper::_sweep_started = 0; // Whether a sweep is in progress.
132 +volatile int NMethodSweeper::_sweep_started = 0; // Whether a sweep is in progress.
133 +volatile nmethod* NMethodSweeper::_current = NULL; // Current nmethod
134 134
135 135 jint NMethodSweeper::_locked_seen = 0;
136 136 jint NMethodSweeper::_not_entrant_seen_on_stack = 0;
137 137 bool NMethodSweeper::_rescan = false;
138 138 bool NMethodSweeper::_do_sweep = false;
139 139 bool NMethodSweeper::_was_full = false;
140 140 jint NMethodSweeper::_advise_to_sweep = 0;
141 141 jlong NMethodSweeper::_last_was_full = 0;
142 142 uint NMethodSweeper::_highest_marked = 0;
143 143 long NMethodSweeper::_was_full_traversal = 0;
144 144
145 145 class MarkActivationClosure: public CodeBlobClosure {
146 146 public:
147 147 virtual void do_code_blob(CodeBlob* cb) {
148 148 // If we see an activation belonging to a non_entrant nmethod, we mark it.
149 149 if (cb->is_nmethod() && ((nmethod*)cb)->is_not_entrant()) {
150 150 ((nmethod*)cb)->mark_as_seen_on_stack();
151 151 }
152 152 }
153 153 };
154 154 static MarkActivationClosure mark_activation_closure;
155 155
156 156 void NMethodSweeper::scan_stacks() {
157 157 assert(SafepointSynchronize::is_at_safepoint(), "must be executed at a safepoint");
158 158 if (!MethodFlushing) return;
159 159 _do_sweep = true;
160 160
161 161 // No need to synchronize access, since this is always executed at a
162 162 // safepoint. If we aren't in the middle of scan and a rescan
163 163 // hasn't been requested then just return. If UseCodeCacheFlushing is on and
164 164 // code cache flushing is in progress, don't skip sweeping to help make progress
165 165 // clearing space in the code cache.
166 166 if ((_current == NULL && !_rescan) && !(UseCodeCacheFlushing && !CompileBroker::should_compile_new_jobs())) {
167 167 _do_sweep = false;
168 168 return;
↓ open down ↓ |
25 lines elided |
↑ open up ↑ |
169 169 }
170 170
171 171 // Make sure CompiledIC_lock in unlocked, since we might update some
172 172 // inline caches. If it is, we just bail-out and try later.
173 173 if (CompiledIC_lock->is_locked() || Patching_lock->is_locked()) return;
174 174
175 175 // Check for restart
176 176 assert(CodeCache::find_blob_unsafe(_current) == _current, "Sweeper nmethod cached state invalid");
177 177 if (_current == NULL) {
178 178 _seen = 0;
179 - _invocations = NmethodSweepFraction;
179 + _invocations = 0;
180 180 _current = CodeCache::first_nmethod();
181 181 _traversals += 1;
182 182 if (PrintMethodFlushing) {
183 183 tty->print_cr("### Sweep: stack traversal %d", _traversals);
184 184 }
185 185 Threads::nmethods_do(&mark_activation_closure);
186 186
187 187 // reset the flags since we started a scan from the beginning.
188 188 _rescan = false;
189 189 _locked_seen = 0;
190 190 _not_entrant_seen_on_stack = 0;
191 191 }
192 192
193 193 if (UseCodeCacheFlushing) {
194 194 if (!CodeCache::needs_flushing()) {
195 195 // scan_stacks() runs during a safepoint, no race with setters
196 196 _advise_to_sweep = 0;
197 197 }
198 198
199 199 if (was_full()) {
200 200 // There was some progress so attempt to restart the compiler
201 201 jlong now = os::javaTimeMillis();
202 202 jlong max_interval = (jlong)MinCodeCacheFlushingInterval * (jlong)1000;
203 203 jlong curr_interval = now - _last_was_full;
204 204 if ((!CodeCache::needs_flushing()) && (curr_interval > max_interval)) {
205 205 CompileBroker::set_should_compile_new_jobs(CompileBroker::run_compilation);
206 206 set_was_full(false);
207 207
208 208 // Update the _last_was_full time so we can tell how fast the
209 209 // code cache is filling up
210 210 _last_was_full = os::javaTimeMillis();
211 211
↓ open down ↓ |
22 lines elided |
↑ open up ↑ |
212 212 log_sweep("restart_compiler");
213 213 }
214 214 }
215 215 }
216 216 }
217 217
218 218 void NMethodSweeper::possibly_sweep() {
219 219 assert(JavaThread::current()->thread_state() == _thread_in_vm, "must run in vm mode");
220 220 if ((!MethodFlushing) || (!_do_sweep)) return;
221 221
222 - if (_invocations > 0) {
222 + if (_current != NULL) {
223 223 // Only one thread at a time will sweep
224 224 jint old = Atomic::cmpxchg( 1, &_sweep_started, 0 );
225 225 if (old != 0) {
226 226 return;
227 227 }
228 228 #ifdef ASSERT
229 229 if (LogSweeper && _records == NULL) {
230 230 // Create the ring buffer for the logging code
231 231 _records = NEW_C_HEAP_ARRAY(SweeperRecord, SweeperLogEntries);
232 232 memset(_records, 0, sizeof(SweeperRecord) * SweeperLogEntries);
233 233 }
234 234 #endif
235 - if (_invocations > 0) {
235 + if (_current != NULL) {
236 236 sweep_code_cache();
237 - _invocations--;
238 237 }
238 + _invocations++;
239 239 _sweep_started = 0;
240 240 }
241 241 }
242 242
243 -void NMethodSweeper::sweep_code_cache() {
243 +void NMethodSweeper::sweep_code_cache(void) {
244 244 #ifdef ASSERT
245 245 jlong sweep_start;
246 246 if (PrintMethodFlushing) {
247 247 sweep_start = os::javaTimeMillis();
248 248 }
249 249 #endif
250 250 if (PrintMethodFlushing && Verbose) {
251 - tty->print_cr("### Sweep at %d out of %d. Invocations left: %d", _seen, CodeCache::nof_nmethods(), _invocations);
251 + tty->print_cr("### Sweep at %d out of %d, invocations %d", _seen, CodeCache::nof_nmethods(), _invocations);
252 252 }
253 253
254 - // We want to visit all nmethods after NmethodSweepFraction
255 - // invocations so divide the remaining number of nmethods by the
256 - // remaining number of invocations. This is only an estimate since
257 - // the number of nmethods changes during the sweep so the final
258 - // stage must iterate until it there are no more nmethods.
259 - int todo = (CodeCache::nof_nmethods() - _seen) / _invocations;
260 -
261 254 assert(!SafepointSynchronize::is_at_safepoint(), "should not be in safepoint when we get here");
262 255 assert(!CodeCache_lock->owned_by_self(), "just checking");
263 256
264 257 {
265 258 MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
266 259
267 260 // The last invocation iterates until there are no more nmethods
268 - for (int i = 0; (i < todo || _invocations == 1) && _current != NULL; i++) {
261 + for (int i = 0; (i < NMethodSweepLimit || CompileTheWorld) && _current != NULL; i++) {
262 + if (!CompileTheWorld && SafepointSynchronize::is_synchronizing()) { // Safepoint request
263 + if (PrintMethodFlushing && Verbose) {
264 + tty->print_cr("### Sweep at %d out of %d, safepoint", _seen, CodeCache::nof_nmethods());
265 + }
266 + MutexUnlockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
269 267
268 + assert(Thread::current()->is_Java_thread(), "should be java thread");
269 + JavaThread* thread = (JavaThread*)Thread::current();
270 + ThreadBlockInVM tbivm(thread);
271 + thread->java_suspend_self();
272 + }
270 273 // Since we will give up the CodeCache_lock, always skip ahead
271 274 // to the next nmethod. Other blobs can be deleted by other
272 275 // threads but nmethods are only reclaimed by the sweeper.
273 276 nmethod* next = CodeCache::next_nmethod(_current);
274 277
275 278 // Now ready to process nmethod and give up CodeCache_lock
276 279 {
277 280 MutexUnlockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
278 281 process_nmethod(_current);
279 282 }
280 283 _seen++;
281 284 _current = next;
282 285 }
283 286 }
284 287
285 - assert(_invocations > 1 || _current == NULL, "must have scanned the whole cache");
286 -
287 288 if (_current == NULL && !_rescan && (_locked_seen || _not_entrant_seen_on_stack)) {
288 289 // we've completed a scan without making progress but there were
289 290 // nmethods we were unable to process either because they were
290 291 // locked or were still on stack. We don't have to aggresively
291 292 // clean them up so just stop scanning. We could scan once more
292 293 // but that complicates the control logic and it's unlikely to
293 294 // matter much.
294 295 if (PrintMethodFlushing) {
295 296 tty->print_cr("### Couldn't make progress on some nmethods so stopping sweep");
296 297 }
297 298 }
298 299
299 300 #ifdef ASSERT
300 301 if(PrintMethodFlushing) {
301 302 jlong sweep_end = os::javaTimeMillis();
302 303 tty->print_cr("### sweeper: sweep time(%d): " INT64_FORMAT, _invocations, sweep_end - sweep_start);
303 304 }
304 305 #endif
305 306
306 - if (_invocations == 1) {
307 + if (_current == NULL) {
307 308 log_sweep("finished");
308 309 }
309 310 }
310 311
311 312 class NMethodMarker: public StackObj {
312 313 private:
313 314 CompilerThread* _thread;
314 315 public:
315 316 NMethodMarker(nmethod* nm) {
316 317 _thread = CompilerThread::current();
317 318 _thread->set_scanned_nmethod(nm);
318 319 }
319 320 ~NMethodMarker() {
320 321 _thread->set_scanned_nmethod(NULL);
321 322 }
322 323 };
323 324
324 325
325 326 void NMethodSweeper::process_nmethod(nmethod *nm) {
326 327 assert(!CodeCache_lock->owned_by_self(), "just checking");
327 328
328 329 // Make sure this nmethod doesn't get unloaded during the scan,
329 330 // since the locks acquired below might safepoint.
330 331 NMethodMarker nmm(nm);
331 332
332 333 SWEEP(nm);
333 334
334 335 // Skip methods that are currently referenced by the VM
335 336 if (nm->is_locked_by_vm()) {
336 337 // But still remember to clean-up inline caches for alive nmethods
337 338 if (nm->is_alive()) {
338 339 // Clean-up all inline caches that points to zombie/non-reentrant methods
339 340 MutexLocker cl(CompiledIC_lock);
340 341 nm->cleanup_inline_caches();
341 342 SWEEP(nm);
342 343 } else {
343 344 _locked_seen++;
344 345 SWEEP(nm);
345 346 }
346 347 return;
347 348 }
348 349
349 350 if (nm->is_zombie()) {
350 351 // If it is first time, we see nmethod then we mark it. Otherwise,
351 352 // we reclame it. When we have seen a zombie method twice, we know that
352 353 // there are no inline caches that refer to it.
353 354 if (nm->is_marked_for_reclamation()) {
354 355 assert(!nm->is_locked_by_vm(), "must not flush locked nmethods");
355 356 if (PrintMethodFlushing && Verbose) {
356 357 tty->print_cr("### Nmethod %3d/" PTR_FORMAT " (marked for reclamation) being flushed", nm->compile_id(), nm);
357 358 }
358 359 MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
359 360 nm->flush();
360 361 } else {
361 362 if (PrintMethodFlushing && Verbose) {
362 363 tty->print_cr("### Nmethod %3d/" PTR_FORMAT " (zombie) being marked for reclamation", nm->compile_id(), nm);
363 364 }
364 365 nm->mark_for_reclamation();
365 366 _rescan = true;
366 367 SWEEP(nm);
367 368 }
368 369 } else if (nm->is_not_entrant()) {
369 370 // If there is no current activations of this method on the
370 371 // stack we can safely convert it to a zombie method
371 372 if (nm->can_not_entrant_be_converted()) {
372 373 if (PrintMethodFlushing && Verbose) {
373 374 tty->print_cr("### Nmethod %3d/" PTR_FORMAT " (not entrant) being made zombie", nm->compile_id(), nm);
374 375 }
375 376 nm->make_zombie();
376 377 _rescan = true;
377 378 SWEEP(nm);
378 379 } else {
379 380 // Still alive, clean up its inline caches
380 381 MutexLocker cl(CompiledIC_lock);
381 382 nm->cleanup_inline_caches();
382 383 // we coudn't transition this nmethod so don't immediately
383 384 // request a rescan. If this method stays on the stack for a
384 385 // long time we don't want to keep rescanning the code cache.
385 386 _not_entrant_seen_on_stack++;
386 387 SWEEP(nm);
387 388 }
388 389 } else if (nm->is_unloaded()) {
389 390 // Unloaded code, just make it a zombie
390 391 if (PrintMethodFlushing && Verbose)
391 392 tty->print_cr("### Nmethod %3d/" PTR_FORMAT " (unloaded) being made zombie", nm->compile_id(), nm);
392 393 if (nm->is_osr_method()) {
393 394 // No inline caches will ever point to osr methods, so we can just remove it
394 395 MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
395 396 SWEEP(nm);
396 397 nm->flush();
397 398 } else {
398 399 nm->make_zombie();
399 400 _rescan = true;
400 401 SWEEP(nm);
401 402 }
402 403 } else {
403 404 assert(nm->is_alive(), "should be alive");
404 405
405 406 if (UseCodeCacheFlushing) {
406 407 if ((nm->method()->code() != nm) && !(nm->is_locked_by_vm()) && !(nm->is_osr_method()) &&
407 408 (_traversals > _was_full_traversal+2) && (((uint)nm->compile_id()) < _highest_marked) &&
408 409 CodeCache::needs_flushing()) {
409 410 // This method has not been called since the forced cleanup happened
410 411 nm->make_not_entrant();
411 412 }
412 413 }
413 414
414 415 // Clean-up all inline caches that points to zombie/non-reentrant methods
415 416 MutexLocker cl(CompiledIC_lock);
416 417 nm->cleanup_inline_caches();
417 418 SWEEP(nm);
418 419 }
419 420 }
420 421
421 422 // Code cache unloading: when compilers notice the code cache is getting full,
422 423 // they will call a vm op that comes here. This code attempts to speculatively
423 424 // unload the oldest half of the nmethods (based on the compile job id) by
424 425 // saving the old code in a list in the CodeCache. Then
425 426 // execution resumes. If a method so marked is not called by the second sweeper
426 427 // stack traversal after the current one, the nmethod will be marked non-entrant and
427 428 // got rid of by normal sweeping. If the method is called, the methodOop's
428 429 // _code field is restored and the methodOop/nmethod
429 430 // go back to their normal state.
430 431 void NMethodSweeper::handle_full_code_cache(bool is_full) {
431 432 // Only the first one to notice can advise us to start early cleaning
432 433 if (!is_full){
433 434 jint old = Atomic::cmpxchg( 1, &_advise_to_sweep, 0 );
434 435 if (old != 0) {
435 436 return;
436 437 }
437 438 }
438 439
439 440 if (is_full) {
440 441 // Since code cache is full, immediately stop new compiles
441 442 bool did_set = CompileBroker::set_should_compile_new_jobs(CompileBroker::stop_compilation);
442 443 if (!did_set) {
443 444 // only the first to notice can start the cleaning,
444 445 // others will go back and block
445 446 return;
446 447 }
447 448 set_was_full(true);
448 449
449 450 // If we run out within MinCodeCacheFlushingInterval of the last unload time, give up
450 451 jlong now = os::javaTimeMillis();
451 452 jlong max_interval = (jlong)MinCodeCacheFlushingInterval * (jlong)1000;
452 453 jlong curr_interval = now - _last_was_full;
453 454 if (curr_interval < max_interval) {
454 455 _rescan = true;
455 456 log_sweep("disable_compiler", "flushing_interval='" UINT64_FORMAT "'",
456 457 curr_interval/1000);
457 458 return;
458 459 }
459 460 }
460 461
461 462 VM_HandleFullCodeCache op(is_full);
462 463 VMThread::execute(&op);
463 464
464 465 // rescan again as soon as possible
465 466 _rescan = true;
466 467 }
467 468
468 469 void NMethodSweeper::speculative_disconnect_nmethods(bool is_full) {
469 470 // If there was a race in detecting full code cache, only run
470 471 // one vm op for it or keep the compiler shut off
471 472
472 473 debug_only(jlong start = os::javaTimeMillis();)
473 474
474 475 if ((!was_full()) && (is_full)) {
475 476 if (!CodeCache::needs_flushing()) {
476 477 log_sweep("restart_compiler");
477 478 CompileBroker::set_should_compile_new_jobs(CompileBroker::run_compilation);
478 479 return;
479 480 }
480 481 }
481 482
482 483 // Traverse the code cache trying to dump the oldest nmethods
483 484 uint curr_max_comp_id = CompileBroker::get_compilation_id();
484 485 uint flush_target = ((curr_max_comp_id - _highest_marked) >> 1) + _highest_marked;
485 486 log_sweep("start_cleaning");
486 487
487 488 nmethod* nm = CodeCache::alive_nmethod(CodeCache::first());
488 489 jint disconnected = 0;
489 490 jint made_not_entrant = 0;
490 491 while ((nm != NULL)){
491 492 uint curr_comp_id = nm->compile_id();
492 493
493 494 // OSR methods cannot be flushed like this. Also, don't flush native methods
494 495 // since they are part of the JDK in most cases
495 496 if (nm->is_in_use() && (!nm->is_osr_method()) && (!nm->is_locked_by_vm()) &&
496 497 (!nm->is_native_method()) && ((curr_comp_id < flush_target))) {
497 498
498 499 if ((nm->method()->code() == nm)) {
499 500 // This method has not been previously considered for
500 501 // unloading or it was restored already
501 502 CodeCache::speculatively_disconnect(nm);
502 503 disconnected++;
503 504 } else if (nm->is_speculatively_disconnected()) {
504 505 // This method was previously considered for preemptive unloading and was not called since then
505 506 CompilationPolicy::policy()->delay_compilation(nm->method());
506 507 nm->make_not_entrant();
507 508 made_not_entrant++;
508 509 }
509 510
510 511 if (curr_comp_id > _highest_marked) {
511 512 _highest_marked = curr_comp_id;
512 513 }
513 514 }
514 515 nm = CodeCache::alive_nmethod(CodeCache::next(nm));
515 516 }
516 517
517 518 log_sweep("stop_cleaning",
518 519 "disconnected='" UINT32_FORMAT "' made_not_entrant='" UINT32_FORMAT "'",
519 520 disconnected, made_not_entrant);
520 521
521 522 // Shut off compiler. Sweeper will start over with a new stack scan and
522 523 // traversal cycle and turn it back on if it clears enough space.
523 524 if (was_full()) {
524 525 _last_was_full = os::javaTimeMillis();
525 526 CompileBroker::set_should_compile_new_jobs(CompileBroker::stop_compilation);
526 527 }
527 528
528 529 // After two more traversals the sweeper will get rid of unrestored nmethods
529 530 _was_full_traversal = _traversals;
530 531 #ifdef ASSERT
531 532 jlong end = os::javaTimeMillis();
532 533 if(PrintMethodFlushing && Verbose) {
533 534 tty->print_cr("### sweeper: unload time: " INT64_FORMAT, end-start);
534 535 }
535 536 #endif
536 537 }
537 538
538 539
539 540 // Print out some state information about the current sweep and the
540 541 // state of the code cache if it's requested.
541 542 void NMethodSweeper::log_sweep(const char* msg, const char* format, ...) {
542 543 if (PrintMethodFlushing) {
543 544 stringStream s;
544 545 // Dump code cache state into a buffer before locking the tty,
545 546 // because log_state() will use locks causing lock conflicts.
546 547 CodeCache::log_state(&s);
547 548
548 549 ttyLocker ttyl;
549 550 tty->print("### sweeper: %s ", msg);
550 551 if (format != NULL) {
551 552 va_list ap;
552 553 va_start(ap, format);
553 554 tty->vprint(format, ap);
554 555 va_end(ap);
555 556 }
556 557 tty->print_cr(s.as_string());
557 558 }
558 559
559 560 if (LogCompilation && (xtty != NULL)) {
560 561 stringStream s;
561 562 // Dump code cache state into a buffer before locking the tty,
562 563 // because log_state() will use locks causing lock conflicts.
563 564 CodeCache::log_state(&s);
564 565
565 566 ttyLocker ttyl;
566 567 xtty->begin_elem("sweeper state='%s' traversals='" INTX_FORMAT "' ", msg, (intx)traversal_count());
567 568 if (format != NULL) {
568 569 va_list ap;
569 570 va_start(ap, format);
570 571 xtty->vprint(format, ap);
571 572 va_end(ap);
572 573 }
573 574 xtty->print(s.as_string());
574 575 xtty->stamp();
575 576 xtty->end_elem();
576 577 }
577 578 }
↓ open down ↓ |
261 lines elided |
↑ open up ↑ |
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX