35 #include "runtime/mutexLocker.hpp"
36 #include "runtime/orderAccess.inline.hpp"
37 #include "runtime/os.hpp"
38 #include "runtime/sweeper.hpp"
39 #include "runtime/thread.inline.hpp"
40 #include "runtime/vm_operations.hpp"
41 #include "trace/tracing.hpp"
42 #include "utilities/events.hpp"
43 #include "utilities/ticks.inline.hpp"
44 #include "utilities/xmlstream.hpp"
45
46 PRAGMA_FORMAT_MUTE_WARNINGS_FOR_GCC
47
48 #ifdef ASSERT
49
50 #define SWEEP(nm) record_sweep(nm, __LINE__)
51 // Sweeper logging code
52 class SweeperRecord {
53 public:
54 int traversal;
55 int invocation;
56 int compile_id;
57 long traversal_mark;
58 int state;
59 const char* kind;
60 address vep;
61 address uep;
62 int line;
63
64 void print() {
65 tty->print_cr("traversal = %d invocation = %d compile_id = %d %s uep = " PTR_FORMAT " vep = "
66 PTR_FORMAT " state = %d traversal_mark %d line = %d",
67 traversal,
68 invocation,
69 compile_id,
70 kind == NULL ? "" : kind,
71 uep,
72 vep,
73 state,
74 traversal_mark,
75 line);
76 }
77 };
78
79 static int _sweep_index = 0;
80 static SweeperRecord* _records = NULL;
81
82 void NMethodSweeper::report_events(int id, address entry) {
83 if (_records != NULL) {
84 for (int i = _sweep_index; i < SweeperLogEntries; i++) {
85 if (_records[i].uep == entry ||
86 _records[i].vep == entry ||
87 _records[i].compile_id == id) {
88 _records[i].print();
100
101 void NMethodSweeper::report_events() {
102 if (_records != NULL) {
103 for (int i = _sweep_index; i < SweeperLogEntries; i++) {
104 // skip empty records
105 if (_records[i].vep == NULL) continue;
106 _records[i].print();
107 }
108 for (int i = 0; i < _sweep_index; i++) {
109 // skip empty records
110 if (_records[i].vep == NULL) continue;
111 _records[i].print();
112 }
113 }
114 }
115
116 void NMethodSweeper::record_sweep(nmethod* nm, int line) {
117 if (_records != NULL) {
118 _records[_sweep_index].traversal = _traversals;
119 _records[_sweep_index].traversal_mark = nm->_stack_traversal_mark;
120 _records[_sweep_index].invocation = _sweep_fractions_left;
121 _records[_sweep_index].compile_id = nm->compile_id();
122 _records[_sweep_index].kind = nm->compile_kind();
123 _records[_sweep_index].state = nm->_state;
124 _records[_sweep_index].vep = nm->verified_entry_point();
125 _records[_sweep_index].uep = nm->entry_point();
126 _records[_sweep_index].line = line;
127 _sweep_index = (_sweep_index + 1) % SweeperLogEntries;
128 }
129 }
130 #else
131 #define SWEEP(nm)
132 #endif
133
134 NMethodIterator NMethodSweeper::_current; // Current nmethod
135 long NMethodSweeper::_traversals = 0; // Stack scan count, also sweep ID.
136 long NMethodSweeper::_total_nof_code_cache_sweeps = 0; // Total number of full sweeps of the code cache
137 long NMethodSweeper::_time_counter = 0; // Virtual time used to periodically invoke sweeper
138 long NMethodSweeper::_last_sweep = 0; // Value of _time_counter when the last sweep happened
139 int NMethodSweeper::_seen = 0; // Nof. nmethod we have currently processed in current pass of CodeCache
140 int NMethodSweeper::_flushed_count = 0; // Nof. nmethods flushed in current sweep
141 int NMethodSweeper::_zombified_count = 0; // Nof. nmethods made zombie in current sweep
142 int NMethodSweeper::_marked_for_reclamation_count = 0; // Nof. nmethods marked for reclaim in current sweep
143
144 volatile bool NMethodSweeper::_should_sweep = true; // Indicates if we should invoke the sweeper
145 volatile int NMethodSweeper::_sweep_fractions_left = 0; // Nof. invocations left until we are completed with this pass
146 volatile int NMethodSweeper::_sweep_started = 0; // Flag to control conc sweeper
147 volatile int NMethodSweeper::_bytes_changed = 0; // Counts the total nmethod size if the nmethod changed from:
148 // 1) alive -> not_entrant
149 // 2) not_entrant -> zombie
150 // 3) zombie -> marked_for_reclamation
151 int NMethodSweeper::_hotness_counter_reset_val = 0;
152
153 long NMethodSweeper::_total_nof_methods_reclaimed = 0; // Accumulated nof methods flushed
154 long NMethodSweeper::_total_nof_c2_methods_reclaimed = 0; // Accumulated nof methods flushed
155 size_t NMethodSweeper::_total_flushed_size = 0; // Total number of bytes flushed from the code cache
156 Tickspan NMethodSweeper::_total_time_sweeping; // Accumulated time sweeping
157 Tickspan NMethodSweeper::_total_time_this_sweep; // Total time this sweep
158 Tickspan NMethodSweeper::_peak_sweep_time; // Peak time for a full sweep
159 Tickspan NMethodSweeper::_peak_sweep_fraction_time; // Peak time sweeping one fraction
160
161
162 class MarkActivationClosure: public CodeBlobClosure {
163 public:
164 virtual void do_code_blob(CodeBlob* cb) {
165 assert(cb->is_nmethod(), "CodeBlob should be nmethod");
166 nmethod* nm = (nmethod*)cb;
173 };
174 static MarkActivationClosure mark_activation_closure;
175
176 class SetHotnessClosure: public CodeBlobClosure {
177 public:
178 virtual void do_code_blob(CodeBlob* cb) {
179 assert(cb->is_nmethod(), "CodeBlob should be nmethod");
180 nmethod* nm = (nmethod*)cb;
181 nm->set_hotness_counter(NMethodSweeper::hotness_counter_reset_val());
182 }
183 };
184 static SetHotnessClosure set_hotness_closure;
185
186
187 int NMethodSweeper::hotness_counter_reset_val() {
188 if (_hotness_counter_reset_val == 0) {
189 _hotness_counter_reset_val = (ReservedCodeCacheSize < M) ? 1 : (ReservedCodeCacheSize / M) * 2;
190 }
191 return _hotness_counter_reset_val;
192 }
193 bool NMethodSweeper::sweep_in_progress() {
194 return !_current.end();
195 }
196
197 // Scans the stacks of all Java threads and marks activations of not-entrant methods.
198 // No need to synchronize access, since 'mark_active_nmethods' is always executed at a
199 // safepoint.
200 void NMethodSweeper::mark_active_nmethods() {
201 assert(SafepointSynchronize::is_at_safepoint(), "must be executed at a safepoint");
202 // If we do not want to reclaim not-entrant or zombie methods there is no need
203 // to scan stacks
204 if (!MethodFlushing) {
205 return;
206 }
207
208 // Increase time so that we can estimate when to invoke the sweeper again.
209 _time_counter++;
210
211 // Check for restart
212 assert(CodeCache::find_blob_unsafe(_current.method()) == _current.method(), "Sweeper nmethod cached state invalid");
213 if (!sweep_in_progress()) {
214 _seen = 0;
215 _sweep_fractions_left = NmethodSweepFraction;
216 _current = NMethodIterator();
217 // Initialize to first nmethod
218 _current.next();
219 _traversals += 1;
220 _total_time_this_sweep = Tickspan();
221
222 if (PrintMethodFlushing) {
223 tty->print_cr("### Sweep: stack traversal %d", _traversals);
224 }
225 Threads::nmethods_do(&mark_activation_closure);
226
227 } else {
228 // Only set hotness counter
229 Threads::nmethods_do(&set_hotness_closure);
230 }
231
232 OrderAccess::storestore();
233 }
234 /**
235 * This function invokes the sweeper if at least one of the three conditions is met:
236 * (1) The code cache is getting full
237 * (2) There are sufficient state changes in/since the last sweep.
238 * (3) We have not been sweeping for 'some time'
239 */
240 void NMethodSweeper::possibly_sweep() {
241 assert(JavaThread::current()->thread_state() == _thread_in_vm, "must run in vm mode");
242 // Only compiler threads are allowed to sweep
243 if (!MethodFlushing || !sweep_in_progress() || !Thread::current()->is_Compiler_thread()) {
244 return;
245 }
246
247 // If there was no state change while nmethod sweeping, 'should_sweep' will be false.
248 // This is one of the two places where should_sweep can be set to true. The general
249 // idea is as follows: If there is enough free space in the code cache, there is no
250 // need to invoke the sweeper. The following formula (which determines whether to invoke
251 // the sweeper or not) depends on the assumption that for larger ReservedCodeCacheSizes
252 // we need less frequent sweeps than for smaller ReservedCodecCacheSizes. Furthermore,
253 // the formula considers how much space in the code cache is currently used. Here are
254 // some examples that will (hopefully) help in understanding.
255 //
256 // Small ReservedCodeCacheSizes: (e.g., < 16M) We invoke the sweeper every time, since
257 // the result of the division is 0. This
258 // keeps the used code cache size small
259 // (important for embedded Java)
260 // Large ReservedCodeCacheSize : (e.g., 256M + code cache is 10% full). The formula
261 // computes: (256 / 16) - 1 = 15
262 // As a result, we invoke the sweeper after
263 // 15 invocations of 'mark_active_nmethods.
264 // Large ReservedCodeCacheSize: (e.g., 256M + code Cache is 90% full). The formula
265 // computes: (256 / 16) - 10 = 6.
266 if (!_should_sweep) {
267 const int time_since_last_sweep = _time_counter - _last_sweep;
268 // ReservedCodeCacheSize has an 'unsigned' type. We need a 'signed' type for max_wait_time,
269 // since 'time_since_last_sweep' can be larger than 'max_wait_time'. If that happens using
270 // an unsigned type would cause an underflow (wait_until_next_sweep becomes a large positive
271 // value) that disables the intended periodic sweeps.
272 const int max_wait_time = ReservedCodeCacheSize / (16 * M);
273 double wait_until_next_sweep = max_wait_time - time_since_last_sweep -
274 MAX2(CodeCache::reverse_free_ratio(CodeBlobType::MethodProfiled),
275 CodeCache::reverse_free_ratio(CodeBlobType::MethodNonProfiled));
276 assert(wait_until_next_sweep <= (double)max_wait_time, "Calculation of code cache sweeper interval is incorrect");
277
278 if ((wait_until_next_sweep <= 0.0) || !CompileBroker::should_compile_new_jobs()) {
279 _should_sweep = true;
280 }
281 }
282
283 if (_should_sweep && _sweep_fractions_left > 0) {
284 // Only one thread at a time will sweep
285 jint old = Atomic::cmpxchg( 1, &_sweep_started, 0 );
286 if (old != 0) {
287 return;
288 }
289 #ifdef ASSERT
290 if (LogSweeper && _records == NULL) {
291 // Create the ring buffer for the logging code
292 _records = NEW_C_HEAP_ARRAY(SweeperRecord, SweeperLogEntries, mtGC);
293 memset(_records, 0, sizeof(SweeperRecord) * SweeperLogEntries);
294 }
295 #endif
296
297 if (_sweep_fractions_left > 0) {
298 sweep_code_cache();
299 _sweep_fractions_left--;
300 }
301
302 // We are done with sweeping the code cache once.
303 if (_sweep_fractions_left == 0) {
304 _total_nof_code_cache_sweeps++;
305 _last_sweep = _time_counter;
306 // Reset flag; temporarily disables sweeper
307 _should_sweep = false;
308 // If there was enough state change, 'possibly_enable_sweeper()'
309 // sets '_should_sweep' to true
310 possibly_enable_sweeper();
311 // Reset _bytes_changed only if there was enough state change. _bytes_changed
312 // can further increase by calls to 'report_state_change'.
313 if (_should_sweep) {
314 _bytes_changed = 0;
315 }
316 }
317 // Release work, because another compiler thread could continue.
318 OrderAccess::release_store((int*)&_sweep_started, 0);
319 }
320 }
321
322 void NMethodSweeper::sweep_code_cache() {
323 Ticks sweep_start_counter = Ticks::now();
324
325 _flushed_count = 0;
326 _zombified_count = 0;
327 _marked_for_reclamation_count = 0;
328
329 if (PrintMethodFlushing && Verbose) {
330 tty->print_cr("### Sweep at %d out of %d. Invocations left: %d", _seen, CodeCache::nof_nmethods(), _sweep_fractions_left);
331 }
332
333 if (!CompileBroker::should_compile_new_jobs()) {
334 // If we have turned off compilations we might as well do full sweeps
335 // in order to reach the clean state faster. Otherwise the sleeping compiler
336 // threads will slow down sweeping.
337 _sweep_fractions_left = 1;
338 }
339
340 // We want to visit all nmethods after NmethodSweepFraction
341 // invocations so divide the remaining number of nmethods by the
342 // remaining number of invocations. This is only an estimate since
343 // the number of nmethods changes during the sweep so the final
344 // stage must iterate until it there are no more nmethods.
345 int todo = (CodeCache::nof_nmethods() - _seen) / _sweep_fractions_left;
346 int swept_count = 0;
347
348
349 assert(!SafepointSynchronize::is_at_safepoint(), "should not be in safepoint when we get here");
350 assert(!CodeCache_lock->owned_by_self(), "just checking");
351
352 int freed_memory = 0;
353 {
354 MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
355
356 // The last invocation iterates until there are no more nmethods
357 while ((swept_count < todo || _sweep_fractions_left == 1) && !_current.end()) {
358 swept_count++;
359 if (SafepointSynchronize::is_synchronizing()) { // Safepoint request
360 if (PrintMethodFlushing && Verbose) {
361 tty->print_cr("### Sweep at %d out of %d, invocation: %d, yielding to safepoint", _seen, CodeCache::nof_nmethods(), _sweep_fractions_left);
362 }
363 MutexUnlockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
364
365 assert(Thread::current()->is_Java_thread(), "should be java thread");
366 JavaThread* thread = (JavaThread*)Thread::current();
367 ThreadBlockInVM tbivm(thread);
368 thread->java_suspend_self();
369 }
370 // Since we will give up the CodeCache_lock, always skip ahead
371 // to the next nmethod. Other blobs can be deleted by other
372 // threads but nmethods are only reclaimed by the sweeper.
373 nmethod* nm = _current.method();
374 _current.next();
375
376 // Now ready to process nmethod and give up CodeCache_lock
377 {
378 MutexUnlockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
379 freed_memory += process_nmethod(nm);
380 }
381 _seen++;
382 }
383 }
384
385 assert(_sweep_fractions_left > 1 || _current.end(), "must have scanned the whole cache");
386
387 const Ticks sweep_end_counter = Ticks::now();
388 const Tickspan sweep_time = sweep_end_counter - sweep_start_counter;
389 _total_time_sweeping += sweep_time;
390 _total_time_this_sweep += sweep_time;
391 _peak_sweep_fraction_time = MAX2(sweep_time, _peak_sweep_fraction_time);
392 _total_flushed_size += freed_memory;
393 _total_nof_methods_reclaimed += _flushed_count;
394
395 EventSweepCodeCache event(UNTIMED);
396 if (event.should_commit()) {
397 event.set_starttime(sweep_start_counter);
398 event.set_endtime(sweep_end_counter);
399 event.set_sweepIndex(_traversals);
400 event.set_sweepFractionIndex(NmethodSweepFraction - _sweep_fractions_left + 1);
401 event.set_sweptCount(swept_count);
402 event.set_flushedCount(_flushed_count);
403 event.set_markedCount(_marked_for_reclamation_count);
404 event.set_zombifiedCount(_zombified_count);
405 event.commit();
406 }
407
408 #ifdef ASSERT
409 if(PrintMethodFlushing) {
410 tty->print_cr("### sweeper: sweep time(%d): "
411 INT64_FORMAT, _sweep_fractions_left, (jlong)sweep_time.value());
412 }
413 #endif
414
415 if (_sweep_fractions_left == 1) {
416 _peak_sweep_time = MAX2(_peak_sweep_time, _total_time_this_sweep);
417 log_sweep("finished");
418 }
419
420 // Sweeper is the only case where memory is released, check here if it
421 // is time to restart the compiler. Only checking if there is a certain
422 // amount of free memory in the code cache might lead to re-enabling
423 // compilation although no memory has been released. For example, there are
424 // cases when compilation was disabled although there is 4MB (or more) free
425 // memory in the code cache. The reason is code cache fragmentation. Therefore,
426 // it only makes sense to re-enable compilation if we have actually freed memory.
427 // Note that typically several kB are released for sweeping 16MB of the code
428 // cache. As a result, 'freed_memory' > 0 to restart the compiler.
429 if (!CompileBroker::should_compile_new_jobs() && (freed_memory > 0)) {
430 CompileBroker::set_should_compile_new_jobs(CompileBroker::run_compilation);
431 log_sweep("restart_compiler");
432 }
433 }
434
435 /**
436 * This function updates the sweeper statistics that keep track of nmethods
437 * state changes. If there is 'enough' state change, the sweeper is invoked
438 * as soon as possible. There can be data races on _bytes_changed. The data
442 */
443 void NMethodSweeper::report_state_change(nmethod* nm) {
444 _bytes_changed += nm->total_size();
445 possibly_enable_sweeper();
446 }
447
448 /**
449 * Function determines if there was 'enough' state change in the code cache to invoke
450 * the sweeper again. Currently, we determine 'enough' as more than 1% state change in
451 * the code cache since the last sweep.
452 */
453 void NMethodSweeper::possibly_enable_sweeper() {
454 double percent_changed = ((double)_bytes_changed / (double)ReservedCodeCacheSize) * 100;
455 if (percent_changed > 1.0) {
456 _should_sweep = true;
457 }
458 }
459
460 class NMethodMarker: public StackObj {
461 private:
462 CompilerThread* _thread;
463 public:
464 NMethodMarker(nmethod* nm) {
465 _thread = CompilerThread::current();
466 if (!nm->is_zombie() && !nm->is_unloaded()) {
467 // Only expose live nmethods for scanning
468 _thread->set_scanned_nmethod(nm);
469 }
470 }
471 ~NMethodMarker() {
472 _thread->set_scanned_nmethod(NULL);
473 }
474 };
475
476 void NMethodSweeper::release_nmethod(nmethod *nm) {
477 // Clean up any CompiledICHolders
478 {
479 ResourceMark rm;
480 MutexLocker ml_patch(CompiledIC_lock);
481 RelocIterator iter(nm);
482 while (iter.next()) {
483 if (iter.type() == relocInfo::virtual_call_type) {
484 CompiledIC::cleanup_call_site(iter.virtual_call_reloc());
485 }
486 }
487 }
488
489 MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
490 nm->flush();
491 }
492
493 int NMethodSweeper::process_nmethod(nmethod *nm) {
494 assert(!CodeCache_lock->owned_by_self(), "just checking");
495
496 int freed_memory = 0;
497 // Make sure this nmethod doesn't get unloaded during the scan,
498 // since safepoints may happen during acquired below locks.
499 NMethodMarker nmm(nm);
500 SWEEP(nm);
501
502 // Skip methods that are currently referenced by the VM
503 if (nm->is_locked_by_vm()) {
504 // But still remember to clean-up inline caches for alive nmethods
505 if (nm->is_alive()) {
506 // Clean inline caches that point to zombie/non-entrant methods
507 MutexLocker cl(CompiledIC_lock);
508 nm->cleanup_inline_caches();
509 SWEEP(nm);
510 }
511 return freed_memory;
512 }
513
|
35 #include "runtime/mutexLocker.hpp"
36 #include "runtime/orderAccess.inline.hpp"
37 #include "runtime/os.hpp"
38 #include "runtime/sweeper.hpp"
39 #include "runtime/thread.inline.hpp"
40 #include "runtime/vm_operations.hpp"
41 #include "trace/tracing.hpp"
42 #include "utilities/events.hpp"
43 #include "utilities/ticks.inline.hpp"
44 #include "utilities/xmlstream.hpp"
45
46 PRAGMA_FORMAT_MUTE_WARNINGS_FOR_GCC
47
48 #ifdef ASSERT
49
50 #define SWEEP(nm) record_sweep(nm, __LINE__)
51 // Sweeper logging code
52 class SweeperRecord {
53 public:
54 int traversal;
55 int compile_id;
56 long traversal_mark;
57 int state;
58 const char* kind;
59 address vep;
60 address uep;
61 int line;
62
63 void print() {
64 tty->print_cr("traversal = %d compile_id = %d %s uep = " PTR_FORMAT " vep = "
65 PTR_FORMAT " state = %d traversal_mark %d line = %d",
66 traversal,
67 compile_id,
68 kind == NULL ? "" : kind,
69 uep,
70 vep,
71 state,
72 traversal_mark,
73 line);
74 }
75 };
76
77 static int _sweep_index = 0;
78 static SweeperRecord* _records = NULL;
79
80 void NMethodSweeper::report_events(int id, address entry) {
81 if (_records != NULL) {
82 for (int i = _sweep_index; i < SweeperLogEntries; i++) {
83 if (_records[i].uep == entry ||
84 _records[i].vep == entry ||
85 _records[i].compile_id == id) {
86 _records[i].print();
98
99 void NMethodSweeper::report_events() {
100 if (_records != NULL) {
101 for (int i = _sweep_index; i < SweeperLogEntries; i++) {
102 // skip empty records
103 if (_records[i].vep == NULL) continue;
104 _records[i].print();
105 }
106 for (int i = 0; i < _sweep_index; i++) {
107 // skip empty records
108 if (_records[i].vep == NULL) continue;
109 _records[i].print();
110 }
111 }
112 }
113
114 void NMethodSweeper::record_sweep(nmethod* nm, int line) {
115 if (_records != NULL) {
116 _records[_sweep_index].traversal = _traversals;
117 _records[_sweep_index].traversal_mark = nm->_stack_traversal_mark;
118 _records[_sweep_index].compile_id = nm->compile_id();
119 _records[_sweep_index].kind = nm->compile_kind();
120 _records[_sweep_index].state = nm->_state;
121 _records[_sweep_index].vep = nm->verified_entry_point();
122 _records[_sweep_index].uep = nm->entry_point();
123 _records[_sweep_index].line = line;
124 _sweep_index = (_sweep_index + 1) % SweeperLogEntries;
125 }
126 }
127
128 void NMethodSweeper::init_log_sweeer() {
129 if (LogSweeper && _records == NULL) {
130 // Create the ring buffer for the logging code
131 _records = NEW_C_HEAP_ARRAY(SweeperRecord, SweeperLogEntries, mtGC);
132 memset(_records, 0, sizeof(SweeperRecord) * SweeperLogEntries);
133 }
134 }
135 #else
136 #define SWEEP(nm)
137 #endif
138
139 NMethodIterator NMethodSweeper::_current; // Current nmethod
140 long NMethodSweeper::_traversals = 0; // Stack scan count, also sweep ID.
141 long NMethodSweeper::_total_nof_code_cache_sweeps = 0; // Total number of full sweeps of the code cache
142 long NMethodSweeper::_time_counter = 0; // Virtual time used to periodically invoke sweeper
143 long NMethodSweeper::_last_sweep = 0; // Value of _time_counter when the last sweep happened
144 int NMethodSweeper::_seen = 0; // Nof. nmethod we have currently processed in current pass of CodeCache
145 int NMethodSweeper::_flushed_count = 0; // Nof. nmethods flushed in current sweep
146 int NMethodSweeper::_zombified_count = 0; // Nof. nmethods made zombie in current sweep
147 int NMethodSweeper::_marked_for_reclamation_count = 0; // Nof. nmethods marked for reclaim in current sweep
148
149 volatile bool NMethodSweeper::_should_sweep = true; // Indicates if we should invoke the sweeper
150 volatile int NMethodSweeper::_bytes_changed = 0; // Counts the total nmethod size if the nmethod changed from:
151 // 1) alive -> not_entrant
152 // 2) not_entrant -> zombie
153 // 3) zombie -> marked_for_reclamation
154 int NMethodSweeper::_hotness_counter_reset_val = 0;
155
156 long NMethodSweeper::_total_nof_methods_reclaimed = 0; // Accumulated nof methods flushed
157 long NMethodSweeper::_total_nof_c2_methods_reclaimed = 0; // Accumulated nof methods flushed
158 size_t NMethodSweeper::_total_flushed_size = 0; // Total number of bytes flushed from the code cache
159 Tickspan NMethodSweeper::_total_time_sweeping; // Accumulated time sweeping
160 Tickspan NMethodSweeper::_total_time_this_sweep; // Total time this sweep
161 Tickspan NMethodSweeper::_peak_sweep_time; // Peak time for a full sweep
162 Tickspan NMethodSweeper::_peak_sweep_fraction_time; // Peak time sweeping one fraction
163
164
165 class MarkActivationClosure: public CodeBlobClosure {
166 public:
167 virtual void do_code_blob(CodeBlob* cb) {
168 assert(cb->is_nmethod(), "CodeBlob should be nmethod");
169 nmethod* nm = (nmethod*)cb;
176 };
177 static MarkActivationClosure mark_activation_closure;
178
179 class SetHotnessClosure: public CodeBlobClosure {
180 public:
181 virtual void do_code_blob(CodeBlob* cb) {
182 assert(cb->is_nmethod(), "CodeBlob should be nmethod");
183 nmethod* nm = (nmethod*)cb;
184 nm->set_hotness_counter(NMethodSweeper::hotness_counter_reset_val());
185 }
186 };
187 static SetHotnessClosure set_hotness_closure;
188
189
190 int NMethodSweeper::hotness_counter_reset_val() {
191 if (_hotness_counter_reset_val == 0) {
192 _hotness_counter_reset_val = (ReservedCodeCacheSize < M) ? 1 : (ReservedCodeCacheSize / M) * 2;
193 }
194 return _hotness_counter_reset_val;
195 }
196 bool NMethodSweeper::wait_for_stack_scanning() {
197 return _current.end();
198 }
199
200 /**
201 * Scans the stacks of all Java threads and marks activations of not-entrant methods.
202 * No need to synchronize access, since 'mark_active_nmethods' is always executed at a
203 * safepoint.
204 */
205 void NMethodSweeper::mark_active_nmethods() {
206 assert(SafepointSynchronize::is_at_safepoint(), "must be executed at a safepoint");
207 // If we do not want to reclaim not-entrant or zombie methods there is no need
208 // to scan stacks
209 if (!MethodFlushing) {
210 return;
211 }
212
213 // Increase time so that we can estimate when to invoke the sweeper again.
214 _time_counter++;
215
216 // Check for restart
217 assert(CodeCache::find_blob_unsafe(_current.method()) == _current.method(), "Sweeper nmethod cached state invalid");
218 if (wait_for_stack_scanning()) {
219 _seen = 0;
220 _current = NMethodIterator();
221 // Initialize to first nmethod
222 _current.next();
223 _traversals += 1;
224 _total_time_this_sweep = Tickspan();
225
226 if (PrintMethodFlushing) {
227 tty->print_cr("### Sweep: stack traversal %d", _traversals);
228 }
229 Threads::nmethods_do(&mark_activation_closure);
230
231 } else {
232 // Only set hotness counter
233 Threads::nmethods_do(&set_hotness_closure);
234 }
235
236 OrderAccess::storestore();
237 }
238
239 /**
240 * This function triggers a VM operation that does stack scanning of active
241 * methods. Stack scanning is mandatory for the sweeper to make progress.
242 */
243 void NMethodSweeper::do_stack_scanning() {
244 assert(!CodeCache_lock->owned_by_self(), "just checking");
245 if (wait_for_stack_scanning()) {
246 VM_MarkActiveNMethods op;
247 VMThread::execute(&op);
248 _should_sweep = true;
249 }
250 }
251
252 void NMethodSweeper::sweeper_loop() {
253 bool timeout;
254 while (true) {
255 {
256 ThreadBlockInVM tbivm(JavaThread::current());
257 MutexLockerEx waiter(CodeCache_lock, Mutex::_no_safepoint_check_flag);
258 const long wait_time = 60*60*24 * 1000;
259 timeout = CodeCache_lock->wait(Mutex::_no_safepoint_check_flag, wait_time);
260 }
261 if (!timeout) {
262 possibly_sweep();
263 }
264 }
265 }
266
267 /**
268 * Wakes up the sweeper thread to possibly sweep.
269 */
270 void NMethodSweeper::notify(int code_blob_type) {
271 // Don't notify the sweeper if we less than 10% of the code cache is used.
272 // This makes sure that we do not invoke the sweeper too often during startup.
273 if (CodeCache::reverse_free_ratio(code_blob_type) > 1.1) {
274 assert_locked_or_safepoint(CodeCache_lock);
275 CodeCache_lock->notify();
276 }
277 }
278
279 /**
280 * Handle a safepoint request
281 */
282 void NMethodSweeper::handle_safepoint_request() {
283 if (SafepointSynchronize::is_synchronizing()) {
284 if (PrintMethodFlushing && Verbose) {
285 tty->print_cr("### Sweep at %d out of %d, yielding to safepoint", _seen, CodeCache::nof_nmethods());
286 }
287 MutexUnlockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
288
289 JavaThread* thread = JavaThread::current();
290 ThreadBlockInVM tbivm(thread);
291 thread->java_suspend_self();
292 }
293 }
294
295 /**
296 * This function invokes the sweeper if at least one of the three conditions is met:
297 * (1) The code cache is getting full
298 * (2) There are sufficient state changes in/since the last sweep.
299 * (3) We have not been sweeping for 'some time'
300 */
301 void NMethodSweeper::possibly_sweep() {
302 assert(JavaThread::current()->thread_state() == _thread_in_vm, "must run in vm mode");
303 // If there was no state change while nmethod sweeping, 'should_sweep' will be false.
304 // This is one of the two places where should_sweep can be set to true. The general
305 // idea is as follows: If there is enough free space in the code cache, there is no
306 // need to invoke the sweeper. The following formula (which determines whether to invoke
307 // the sweeper or not) depends on the assumption that for larger ReservedCodeCacheSizes
308 // we need less frequent sweeps than for smaller ReservedCodecCacheSizes. Furthermore,
309 // the formula considers how much space in the code cache is currently used. Here are
310 // some examples that will (hopefully) help in understanding.
311 //
312 // Small ReservedCodeCacheSizes: (e.g., < 16M) We invoke the sweeper every time, since
313 // the result of the division is 0. This
314 // keeps the used code cache size small
315 // (important for embedded Java)
316 // Large ReservedCodeCacheSize : (e.g., 256M + code cache is 10% full). The formula
317 // computes: (256 / 16) - 1 = 15
318 // As a result, we invoke the sweeper after
319 // 15 invocations of 'mark_active_nmethods.
320 // Large ReservedCodeCacheSize: (e.g., 256M + code Cache is 90% full). The formula
321 // computes: (256 / 16) - 10 = 6.
322 if (!_should_sweep) {
323 const int time_since_last_sweep = _time_counter - _last_sweep;
324 // ReservedCodeCacheSize has an 'unsigned' type. We need a 'signed' type for max_wait_time,
325 // since 'time_since_last_sweep' can be larger than 'max_wait_time'. If that happens using
326 // an unsigned type would cause an underflow (wait_until_next_sweep becomes a large positive
327 // value) that disables the intended periodic sweeps.
328 const int max_wait_time = ReservedCodeCacheSize / (16 * M);
329 double wait_until_next_sweep = max_wait_time - time_since_last_sweep -
330 MAX2(CodeCache::reverse_free_ratio(CodeBlobType::MethodProfiled),
331 CodeCache::reverse_free_ratio(CodeBlobType::MethodNonProfiled));
332 assert(wait_until_next_sweep <= (double)max_wait_time, "Calculation of code cache sweeper interval is incorrect");
333
334 if ((wait_until_next_sweep <= 0.0) || !CompileBroker::should_compile_new_jobs()) {
335 _should_sweep = true;
336 }
337 }
338
339 // Force stack scanning if there is only 10% free space in the code cache.
340 // We force stack scanning only non-profiled code heap gets full, since critical
341 // allocation go to the non-profiled heap and we must be make sure that there is
342 // enough space.
343 if (CodeCache::reverse_free_ratio(CodeBlobType::MethodNonProfiled) > 10.0) {
344 do_stack_scanning();
345 }
346
347 if (_should_sweep) {
348 init_log_sweeer();
349 sweep_code_cache();
350 }
351
352 // We are done with sweeping the code cache once.
353 _total_nof_code_cache_sweeps++;
354 _last_sweep = _time_counter;
355 // Reset flag; temporarily disables sweeper
356 _should_sweep = false;
357 // If there was enough state change, 'possibly_enable_sweeper()'
358 // sets '_should_sweep' to true
359 possibly_enable_sweeper();
360 // Reset _bytes_changed only if there was enough state change. _bytes_changed
361 // can further increase by calls to 'report_state_change'.
362 if (_should_sweep) {
363 _bytes_changed = 0;
364 }
365 }
366
367 void NMethodSweeper::sweep_code_cache() {
368 ResourceMark rm;
369 Ticks sweep_start_counter = Ticks::now();
370
371 _flushed_count = 0;
372 _zombified_count = 0;
373 _marked_for_reclamation_count = 0;
374
375 if (PrintMethodFlushing && Verbose) {
376 tty->print_cr("### Sweep at %d out of %d", _seen, CodeCache::nof_nmethods());
377 }
378
379 int swept_count = 0;
380 assert(!SafepointSynchronize::is_at_safepoint(), "should not be in safepoint when we get here");
381 assert(!CodeCache_lock->owned_by_self(), "just checking");
382
383 int freed_memory = 0;
384 {
385 MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
386
387 // The last invocation iterates until there are no more nmethods
388 while (!_current.end()) {
389 swept_count++;
390 handle_safepoint_request();
391 // Since we will give up the CodeCache_lock, always skip ahead
392 // to the next nmethod. Other blobs can be deleted by other
393 // threads but nmethods are only reclaimed by the sweeper.
394 nmethod* nm = _current.method();
395 _current.next();
396
397 // Now ready to process nmethod and give up CodeCache_lock
398 {
399 MutexUnlockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
400 freed_memory += process_nmethod(nm);
401 }
402 _seen++;
403 }
404 }
405
406 assert(_current.end(), "must have scanned the whole cache");
407
408 const Ticks sweep_end_counter = Ticks::now();
409 const Tickspan sweep_time = sweep_end_counter - sweep_start_counter;
410 _total_time_sweeping += sweep_time;
411 _total_time_this_sweep += sweep_time;
412 _peak_sweep_fraction_time = MAX2(sweep_time, _peak_sweep_fraction_time);
413 _total_flushed_size += freed_memory;
414 _total_nof_methods_reclaimed += _flushed_count;
415
416 EventSweepCodeCache event(UNTIMED);
417 if (event.should_commit()) {
418 event.set_starttime(sweep_start_counter);
419 event.set_endtime(sweep_end_counter);
420 event.set_sweepIndex(_traversals);
421 event.set_sweptCount(swept_count);
422 event.set_flushedCount(_flushed_count);
423 event.set_markedCount(_marked_for_reclamation_count);
424 event.set_zombifiedCount(_zombified_count);
425 event.commit();
426 }
427
428 #ifdef ASSERT
429 if(PrintMethodFlushing) {
430 tty->print_cr("### sweeper: sweep time(%d): ", (jlong)sweep_time.value());
431 }
432 #endif
433
434 _peak_sweep_time = MAX2(_peak_sweep_time, _total_time_this_sweep);
435 log_sweep("finished");
436
437 // Sweeper is the only case where memory is released, check here if it
438 // is time to restart the compiler. Only checking if there is a certain
439 // amount of free memory in the code cache might lead to re-enabling
440 // compilation although no memory has been released. For example, there are
441 // cases when compilation was disabled although there is 4MB (or more) free
442 // memory in the code cache. The reason is code cache fragmentation. Therefore,
443 // it only makes sense to re-enable compilation if we have actually freed memory.
444 // Note that typically several kB are released for sweeping 16MB of the code
445 // cache. As a result, 'freed_memory' > 0 to restart the compiler.
446 if (!CompileBroker::should_compile_new_jobs() && (freed_memory > 0)) {
447 CompileBroker::set_should_compile_new_jobs(CompileBroker::run_compilation);
448 log_sweep("restart_compiler");
449 }
450 }
451
452 /**
453 * This function updates the sweeper statistics that keep track of nmethods
454 * state changes. If there is 'enough' state change, the sweeper is invoked
455 * as soon as possible. There can be data races on _bytes_changed. The data
459 */
460 void NMethodSweeper::report_state_change(nmethod* nm) {
461 _bytes_changed += nm->total_size();
462 possibly_enable_sweeper();
463 }
464
465 /**
466 * Function determines if there was 'enough' state change in the code cache to invoke
467 * the sweeper again. Currently, we determine 'enough' as more than 1% state change in
468 * the code cache since the last sweep.
469 */
470 void NMethodSweeper::possibly_enable_sweeper() {
471 double percent_changed = ((double)_bytes_changed / (double)ReservedCodeCacheSize) * 100;
472 if (percent_changed > 1.0) {
473 _should_sweep = true;
474 }
475 }
476
477 class NMethodMarker: public StackObj {
478 private:
479 CodeCacheSweeperThread* _thread;
480 public:
481 NMethodMarker(nmethod* nm) {
482 JavaThread* current = JavaThread::current();
483 assert (current->is_Code_cache_sweeper_thread(), "Must be");
484 _thread = (CodeCacheSweeperThread*)JavaThread::current();
485 if (!nm->is_zombie() && !nm->is_unloaded()) {
486 // Only expose live nmethods for scanning
487 _thread->set_scanned_nmethod(nm);
488 }
489 }
490 ~NMethodMarker() {
491 _thread->set_scanned_nmethod(NULL);
492 }
493 };
494
495 void NMethodSweeper::release_nmethod(nmethod* nm) {
496 // Clean up any CompiledICHolders
497 {
498 ResourceMark rm;
499 MutexLocker ml_patch(CompiledIC_lock);
500 RelocIterator iter(nm);
501 while (iter.next()) {
502 if (iter.type() == relocInfo::virtual_call_type) {
503 CompiledIC::cleanup_call_site(iter.virtual_call_reloc());
504 }
505 }
506 }
507
508 MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
509 nm->flush();
510 }
511
512 int NMethodSweeper::process_nmethod(nmethod* nm) {
513 assert(!CodeCache_lock->owned_by_self(), "just checking");
514
515 int freed_memory = 0;
516 // Make sure this nmethod doesn't get unloaded during the scan,
517 // since safepoints may happen during acquired below locks.
518 NMethodMarker nmm(nm);
519 SWEEP(nm);
520
521 // Skip methods that are currently referenced by the VM
522 if (nm->is_locked_by_vm()) {
523 // But still remember to clean-up inline caches for alive nmethods
524 if (nm->is_alive()) {
525 // Clean inline caches that point to zombie/non-entrant methods
526 MutexLocker cl(CompiledIC_lock);
527 nm->cleanup_inline_caches();
528 SWEEP(nm);
529 }
530 return freed_memory;
531 }
532
|