114 }
115
116 void NMethodSweeper::record_sweep(nmethod* nm, int line) {
117 if (_records != NULL) {
118 _records[_sweep_index].traversal = _traversals;
119 _records[_sweep_index].traversal_mark = nm->_stack_traversal_mark;
120 _records[_sweep_index].invocation = _sweep_fractions_left;
121 _records[_sweep_index].compile_id = nm->compile_id();
122 _records[_sweep_index].kind = nm->compile_kind();
123 _records[_sweep_index].state = nm->_state;
124 _records[_sweep_index].vep = nm->verified_entry_point();
125 _records[_sweep_index].uep = nm->entry_point();
126 _records[_sweep_index].line = line;
127 _sweep_index = (_sweep_index + 1) % SweeperLogEntries;
128 }
129 }
130 #else
131 #define SWEEP(nm)
132 #endif
133
134 nmethod* NMethodSweeper::_current = NULL; // Current nmethod
135 long NMethodSweeper::_traversals = 0; // Stack scan count, also sweep ID.
136 long NMethodSweeper::_total_nof_code_cache_sweeps = 0; // Total number of full sweeps of the code cache
137 long NMethodSweeper::_time_counter = 0; // Virtual time used to periodically invoke sweeper
138 long NMethodSweeper::_last_sweep = 0; // Value of _time_counter when the last sweep happened
139 int NMethodSweeper::_seen = 0; // Nof. nmethod we have currently processed in current pass of CodeCache
140 int NMethodSweeper::_flushed_count = 0; // Nof. nmethods flushed in current sweep
141 int NMethodSweeper::_zombified_count = 0; // Nof. nmethods made zombie in current sweep
142 int NMethodSweeper::_marked_for_reclamation_count = 0; // Nof. nmethods marked for reclaim in current sweep
143
144 volatile bool NMethodSweeper::_should_sweep = true; // Indicates if we should invoke the sweeper
145 volatile int NMethodSweeper::_sweep_fractions_left = 0; // Nof. invocations left until we are completed with this pass
146 volatile int NMethodSweeper::_sweep_started = 0; // Flag to control conc sweeper
147 volatile int NMethodSweeper::_bytes_changed = 0; // Counts the total nmethod size if the nmethod changed from:
148 // 1) alive -> not_entrant
149 // 2) not_entrant -> zombie
150 // 3) zombie -> marked_for_reclamation
151 int NMethodSweeper::_hotness_counter_reset_val = 0;
152
153 long NMethodSweeper::_total_nof_methods_reclaimed = 0; // Accumulated nof methods flushed
154 long NMethodSweeper::_total_nof_c2_methods_reclaimed = 0; // Accumulated nof methods flushed
155 size_t NMethodSweeper::_total_flushed_size = 0; // Total number of bytes flushed from the code cache
156 Tickspan NMethodSweeper::_total_time_sweeping; // Accumulated time sweeping
157 Tickspan NMethodSweeper::_total_time_this_sweep; // Total time this sweep
158 Tickspan NMethodSweeper::_peak_sweep_time; // Peak time for a full sweep
159 Tickspan NMethodSweeper::_peak_sweep_fraction_time; // Peak time sweeping one fraction
160
161
162
163 class MarkActivationClosure: public CodeBlobClosure {
164 public:
165 virtual void do_code_blob(CodeBlob* cb) {
166 if (cb->is_nmethod()) {
167 nmethod* nm = (nmethod*)cb;
168 nm->set_hotness_counter(NMethodSweeper::hotness_counter_reset_val());
169 // If we see an activation belonging to a non_entrant nmethod, we mark it.
170 if (nm->is_not_entrant()) {
171 nm->mark_as_seen_on_stack();
172 }
173 }
174 }
175 };
176 static MarkActivationClosure mark_activation_closure;
177
178 class SetHotnessClosure: public CodeBlobClosure {
179 public:
180 virtual void do_code_blob(CodeBlob* cb) {
181 if (cb->is_nmethod()) {
182 nmethod* nm = (nmethod*)cb;
183 nm->set_hotness_counter(NMethodSweeper::hotness_counter_reset_val());
184 }
185 }
186 };
187 static SetHotnessClosure set_hotness_closure;
188
189
190 int NMethodSweeper::hotness_counter_reset_val() {
191 if (_hotness_counter_reset_val == 0) {
192 _hotness_counter_reset_val = (ReservedCodeCacheSize < M) ? 1 : (ReservedCodeCacheSize / M) * 2;
193 }
194 return _hotness_counter_reset_val;
195 }
196 bool NMethodSweeper::sweep_in_progress() {
197 return (_current != NULL);
198 }
199
200 // Scans the stacks of all Java threads and marks activations of not-entrant methods.
201 // No need to synchronize access, since 'mark_active_nmethods' is always executed at a
202 // safepoint.
203 void NMethodSweeper::mark_active_nmethods() {
204 assert(SafepointSynchronize::is_at_safepoint(), "must be executed at a safepoint");
205 // If we do not want to reclaim not-entrant or zombie methods there is no need
206 // to scan stacks
207 if (!MethodFlushing) {
208 return;
209 }
210
211 // Increase time so that we can estimate when to invoke the sweeper again.
212 _time_counter++;
213
214 // Check for restart
215 assert(CodeCache::find_blob_unsafe(_current) == _current, "Sweeper nmethod cached state invalid");
216 if (!sweep_in_progress()) {
217 _seen = 0;
218 _sweep_fractions_left = NmethodSweepFraction;
219 _current = CodeCache::first_nmethod();
220 _traversals += 1;
221 _total_time_this_sweep = Tickspan();
222
223 if (PrintMethodFlushing) {
224 tty->print_cr("### Sweep: stack traversal %d", _traversals);
225 }
226 Threads::nmethods_do(&mark_activation_closure);
227
228 } else {
229 // Only set hotness counter
230 Threads::nmethods_do(&set_hotness_closure);
231 }
232
233 OrderAccess::storestore();
234 }
235 /**
236 * This function invokes the sweeper if at least one of the three conditions is met:
237 * (1) The code cache is getting full
238 * (2) There are sufficient state changes in/since the last sweep.
239 * (3) We have not been sweeping for 'some time'
254 // the formula considers how much space in the code cache is currently used. Here are
255 // some examples that will (hopefully) help in understanding.
256 //
257 // Small ReservedCodeCacheSizes: (e.g., < 16M) We invoke the sweeper every time, since
258 // the result of the division is 0. This
259 // keeps the used code cache size small
260 // (important for embedded Java)
261 // Large ReservedCodeCacheSize : (e.g., 256M + code cache is 10% full). The formula
262 // computes: (256 / 16) - 1 = 15
263 // As a result, we invoke the sweeper after
264 // 15 invocations of 'mark_active_nmethods.
265 // Large ReservedCodeCacheSize: (e.g., 256M + code Cache is 90% full). The formula
266 // computes: (256 / 16) - 10 = 6.
267 if (!_should_sweep) {
268 const int time_since_last_sweep = _time_counter - _last_sweep;
269 // ReservedCodeCacheSize has an 'unsigned' type. We need a 'signed' type for max_wait_time,
270 // since 'time_since_last_sweep' can be larger than 'max_wait_time'. If that happens using
271 // an unsigned type would cause an underflow (wait_until_next_sweep becomes a large positive
272 // value) that disables the intended periodic sweeps.
273 const int max_wait_time = ReservedCodeCacheSize / (16 * M);
274 double wait_until_next_sweep = max_wait_time - time_since_last_sweep - CodeCache::reverse_free_ratio();
275 assert(wait_until_next_sweep <= (double)max_wait_time, "Calculation of code cache sweeper interval is incorrect");
276
277 if ((wait_until_next_sweep <= 0.0) || !CompileBroker::should_compile_new_jobs()) {
278 _should_sweep = true;
279 }
280 }
281
282 if (_should_sweep && _sweep_fractions_left > 0) {
283 // Only one thread at a time will sweep
284 jint old = Atomic::cmpxchg( 1, &_sweep_started, 0 );
285 if (old != 0) {
286 return;
287 }
288 #ifdef ASSERT
289 if (LogSweeper && _records == NULL) {
290 // Create the ring buffer for the logging code
291 _records = NEW_C_HEAP_ARRAY(SweeperRecord, SweeperLogEntries, mtGC);
292 memset(_records, 0, sizeof(SweeperRecord) * SweeperLogEntries);
293 }
294 #endif
336 _sweep_fractions_left = 1;
337 }
338
339 // We want to visit all nmethods after NmethodSweepFraction
340 // invocations so divide the remaining number of nmethods by the
341 // remaining number of invocations. This is only an estimate since
342 // the number of nmethods changes during the sweep so the final
343 // stage must iterate until it there are no more nmethods.
344 int todo = (CodeCache::nof_nmethods() - _seen) / _sweep_fractions_left;
345 int swept_count = 0;
346
347
348 assert(!SafepointSynchronize::is_at_safepoint(), "should not be in safepoint when we get here");
349 assert(!CodeCache_lock->owned_by_self(), "just checking");
350
351 int freed_memory = 0;
352 {
353 MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
354
355 // The last invocation iterates until there are no more nmethods
356 for (int i = 0; (i < todo || _sweep_fractions_left == 1) && _current != NULL; i++) {
357 swept_count++;
358 if (SafepointSynchronize::is_synchronizing()) { // Safepoint request
359 if (PrintMethodFlushing && Verbose) {
360 tty->print_cr("### Sweep at %d out of %d, invocation: %d, yielding to safepoint", _seen, CodeCache::nof_nmethods(), _sweep_fractions_left);
361 }
362 MutexUnlockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
363
364 assert(Thread::current()->is_Java_thread(), "should be java thread");
365 JavaThread* thread = (JavaThread*)Thread::current();
366 ThreadBlockInVM tbivm(thread);
367 thread->java_suspend_self();
368 }
369 // Since we will give up the CodeCache_lock, always skip ahead
370 // to the next nmethod. Other blobs can be deleted by other
371 // threads but nmethods are only reclaimed by the sweeper.
372 nmethod* next = CodeCache::next_nmethod(_current);
373
374 // Now ready to process nmethod and give up CodeCache_lock
375 {
376 MutexUnlockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
377 freed_memory += process_nmethod(_current);
378 }
379 _seen++;
380 _current = next;
381 }
382 }
383
384 assert(_sweep_fractions_left > 1 || _current == NULL, "must have scanned the whole cache");
385
386 const Ticks sweep_end_counter = Ticks::now();
387 const Tickspan sweep_time = sweep_end_counter - sweep_start_counter;
388 _total_time_sweeping += sweep_time;
389 _total_time_this_sweep += sweep_time;
390 _peak_sweep_fraction_time = MAX2(sweep_time, _peak_sweep_fraction_time);
391 _total_flushed_size += freed_memory;
392 _total_nof_methods_reclaimed += _flushed_count;
393
394 EventSweepCodeCache event(UNTIMED);
395 if (event.should_commit()) {
396 event.set_starttime(sweep_start_counter);
397 event.set_endtime(sweep_end_counter);
398 event.set_sweepIndex(_traversals);
399 event.set_sweepFractionIndex(NmethodSweepFraction - _sweep_fractions_left + 1);
400 event.set_sweptCount(swept_count);
401 event.set_flushedCount(_flushed_count);
402 event.set_markedCount(_marked_for_reclamation_count);
403 event.set_zombifiedCount(_zombified_count);
404 event.commit();
577 // Clean-up all inline caches that point to zombie/non-reentrant methods
578 MutexLocker cl(CompiledIC_lock);
579 nm->cleanup_inline_caches();
580 SWEEP(nm);
581 }
582 return freed_memory;
583 }
584
585
586 void NMethodSweeper::possibly_flush(nmethod* nm) {
587 if (UseCodeCacheFlushing) {
588 if (!nm->is_locked_by_vm() && !nm->is_osr_method() && !nm->is_native_method()) {
589 bool make_not_entrant = false;
590
591 // Do not make native methods and OSR-methods not-entrant
592 nm->dec_hotness_counter();
593 // Get the initial value of the hotness counter. This value depends on the
594 // ReservedCodeCacheSize
595 int reset_val = hotness_counter_reset_val();
596 int time_since_reset = reset_val - nm->hotness_counter();
597 double threshold = -reset_val + (CodeCache::reverse_free_ratio() * NmethodSweepActivity);
598 // The less free space in the code cache we have - the bigger reverse_free_ratio() is.
599 // I.e., 'threshold' increases with lower available space in the code cache and a higher
600 // NmethodSweepActivity. If the current hotness counter - which decreases from its initial
601 // value until it is reset by stack walking - is smaller than the computed threshold, the
602 // corresponding nmethod is considered for removal.
603 if ((NmethodSweepActivity > 0) && (nm->hotness_counter() < threshold) && (time_since_reset > MinPassesBeforeFlush)) {
604 // A method is marked as not-entrant if the method is
605 // 1) 'old enough': nm->hotness_counter() < threshold
606 // 2) The method was in_use for a minimum amount of time: (time_since_reset > MinPassesBeforeFlush)
607 // The second condition is necessary if we are dealing with very small code cache
608 // sizes (e.g., <10m) and the code cache size is too small to hold all hot methods.
609 // The second condition ensures that methods are not immediately made not-entrant
610 // after compilation.
611 make_not_entrant = true;
612 }
613
614 // The stack-scanning low-cost detection may not see the method was used (which can happen for
615 // flat profiles). Check the age counter for possible data.
616 if (UseCodeAging && make_not_entrant && (nm->is_compiled_by_c2() || nm->is_compiled_by_c1())) {
617 MethodCounters* mc = nm->method()->method_counters();
|
114 }
115
116 void NMethodSweeper::record_sweep(nmethod* nm, int line) {
117 if (_records != NULL) {
118 _records[_sweep_index].traversal = _traversals;
119 _records[_sweep_index].traversal_mark = nm->_stack_traversal_mark;
120 _records[_sweep_index].invocation = _sweep_fractions_left;
121 _records[_sweep_index].compile_id = nm->compile_id();
122 _records[_sweep_index].kind = nm->compile_kind();
123 _records[_sweep_index].state = nm->_state;
124 _records[_sweep_index].vep = nm->verified_entry_point();
125 _records[_sweep_index].uep = nm->entry_point();
126 _records[_sweep_index].line = line;
127 _sweep_index = (_sweep_index + 1) % SweeperLogEntries;
128 }
129 }
130 #else
131 #define SWEEP(nm)
132 #endif
133
134 NMethodIterator NMethodSweeper::_current; // Current nmethod
135 long NMethodSweeper::_traversals = 0; // Stack scan count, also sweep ID.
136 long NMethodSweeper::_total_nof_code_cache_sweeps = 0; // Total number of full sweeps of the code cache
137 long NMethodSweeper::_time_counter = 0; // Virtual time used to periodically invoke sweeper
138 long NMethodSweeper::_last_sweep = 0; // Value of _time_counter when the last sweep happened
139 int NMethodSweeper::_seen = 0; // Nof. nmethod we have currently processed in current pass of CodeCache
140 int NMethodSweeper::_flushed_count = 0; // Nof. nmethods flushed in current sweep
141 int NMethodSweeper::_zombified_count = 0; // Nof. nmethods made zombie in current sweep
142 int NMethodSweeper::_marked_for_reclamation_count = 0; // Nof. nmethods marked for reclaim in current sweep
143
144 volatile bool NMethodSweeper::_should_sweep = true; // Indicates if we should invoke the sweeper
145 volatile int NMethodSweeper::_sweep_fractions_left = 0; // Nof. invocations left until we are completed with this pass
146 volatile int NMethodSweeper::_sweep_started = 0; // Flag to control conc sweeper
147 volatile int NMethodSweeper::_bytes_changed = 0; // Counts the total nmethod size if the nmethod changed from:
148 // 1) alive -> not_entrant
149 // 2) not_entrant -> zombie
150 // 3) zombie -> marked_for_reclamation
151 int NMethodSweeper::_hotness_counter_reset_val = 0;
152
153 long NMethodSweeper::_total_nof_methods_reclaimed = 0; // Accumulated nof methods flushed
154 long NMethodSweeper::_total_nof_c2_methods_reclaimed = 0; // Accumulated nof methods flushed
155 size_t NMethodSweeper::_total_flushed_size = 0; // Total number of bytes flushed from the code cache
156 Tickspan NMethodSweeper::_total_time_sweeping; // Accumulated time sweeping
157 Tickspan NMethodSweeper::_total_time_this_sweep; // Total time this sweep
158 Tickspan NMethodSweeper::_peak_sweep_time; // Peak time for a full sweep
159 Tickspan NMethodSweeper::_peak_sweep_fraction_time; // Peak time sweeping one fraction
160
161
162 class MarkActivationClosure: public CodeBlobClosure {
163 public:
164 virtual void do_code_blob(CodeBlob* cb) {
165 assert(cb->is_nmethod(), "CodeBlob should be nmethod");
166 nmethod* nm = (nmethod*)cb;
167 nm->set_hotness_counter(NMethodSweeper::hotness_counter_reset_val());
168 // If we see an activation belonging to a non_entrant nmethod, we mark it.
169 if (nm->is_not_entrant()) {
170 nm->mark_as_seen_on_stack();
171 }
172 }
173 };
174 static MarkActivationClosure mark_activation_closure;
175
176 class SetHotnessClosure: public CodeBlobClosure {
177 public:
178 virtual void do_code_blob(CodeBlob* cb) {
179 assert(cb->is_nmethod(), "CodeBlob should be nmethod");
180 nmethod* nm = (nmethod*)cb;
181 nm->set_hotness_counter(NMethodSweeper::hotness_counter_reset_val());
182 }
183 };
184 static SetHotnessClosure set_hotness_closure;
185
186
187 int NMethodSweeper::hotness_counter_reset_val() {
188 if (_hotness_counter_reset_val == 0) {
189 _hotness_counter_reset_val = (ReservedCodeCacheSize < M) ? 1 : (ReservedCodeCacheSize / M) * 2;
190 }
191 return _hotness_counter_reset_val;
192 }
193 bool NMethodSweeper::sweep_in_progress() {
194 return !_current.end();
195 }
196
197 // Scans the stacks of all Java threads and marks activations of not-entrant methods.
198 // No need to synchronize access, since 'mark_active_nmethods' is always executed at a
199 // safepoint.
200 void NMethodSweeper::mark_active_nmethods() {
201 assert(SafepointSynchronize::is_at_safepoint(), "must be executed at a safepoint");
202 // If we do not want to reclaim not-entrant or zombie methods there is no need
203 // to scan stacks
204 if (!MethodFlushing) {
205 return;
206 }
207
208 // Increase time so that we can estimate when to invoke the sweeper again.
209 _time_counter++;
210
211 // Check for restart
212 assert(CodeCache::find_blob_unsafe(_current.method()) == _current.method(), "Sweeper nmethod cached state invalid");
213 if (!sweep_in_progress()) {
214 _seen = 0;
215 _sweep_fractions_left = NmethodSweepFraction;
216 _current = NMethodIterator();
217 // Initialize to first nmethod
218 _current.next();
219 _traversals += 1;
220 _total_time_this_sweep = Tickspan();
221
222 if (PrintMethodFlushing) {
223 tty->print_cr("### Sweep: stack traversal %d", _traversals);
224 }
225 Threads::nmethods_do(&mark_activation_closure);
226
227 } else {
228 // Only set hotness counter
229 Threads::nmethods_do(&set_hotness_closure);
230 }
231
232 OrderAccess::storestore();
233 }
234 /**
235 * This function invokes the sweeper if at least one of the three conditions is met:
236 * (1) The code cache is getting full
237 * (2) There are sufficient state changes in/since the last sweep.
238 * (3) We have not been sweeping for 'some time'
253 // the formula considers how much space in the code cache is currently used. Here are
254 // some examples that will (hopefully) help in understanding.
255 //
256 // Small ReservedCodeCacheSizes: (e.g., < 16M) We invoke the sweeper every time, since
257 // the result of the division is 0. This
258 // keeps the used code cache size small
259 // (important for embedded Java)
260 // Large ReservedCodeCacheSize : (e.g., 256M + code cache is 10% full). The formula
261 // computes: (256 / 16) - 1 = 15
262 // As a result, we invoke the sweeper after
263 // 15 invocations of 'mark_active_nmethods.
264 // Large ReservedCodeCacheSize: (e.g., 256M + code Cache is 90% full). The formula
265 // computes: (256 / 16) - 10 = 6.
266 if (!_should_sweep) {
267 const int time_since_last_sweep = _time_counter - _last_sweep;
268 // ReservedCodeCacheSize has an 'unsigned' type. We need a 'signed' type for max_wait_time,
269 // since 'time_since_last_sweep' can be larger than 'max_wait_time'. If that happens using
270 // an unsigned type would cause an underflow (wait_until_next_sweep becomes a large positive
271 // value) that disables the intended periodic sweeps.
272 const int max_wait_time = ReservedCodeCacheSize / (16 * M);
273 double wait_until_next_sweep = max_wait_time - time_since_last_sweep -
274 MAX2(CodeCache::reverse_free_ratio(CodeBlobType::MethodProfiled),
275 CodeCache::reverse_free_ratio(CodeBlobType::MethodNonProfiled));
276 assert(wait_until_next_sweep <= (double)max_wait_time, "Calculation of code cache sweeper interval is incorrect");
277
278 if ((wait_until_next_sweep <= 0.0) || !CompileBroker::should_compile_new_jobs()) {
279 _should_sweep = true;
280 }
281 }
282
283 if (_should_sweep && _sweep_fractions_left > 0) {
284 // Only one thread at a time will sweep
285 jint old = Atomic::cmpxchg( 1, &_sweep_started, 0 );
286 if (old != 0) {
287 return;
288 }
289 #ifdef ASSERT
290 if (LogSweeper && _records == NULL) {
291 // Create the ring buffer for the logging code
292 _records = NEW_C_HEAP_ARRAY(SweeperRecord, SweeperLogEntries, mtGC);
293 memset(_records, 0, sizeof(SweeperRecord) * SweeperLogEntries);
294 }
295 #endif
337 _sweep_fractions_left = 1;
338 }
339
340 // We want to visit all nmethods after NmethodSweepFraction
341 // invocations so divide the remaining number of nmethods by the
342 // remaining number of invocations. This is only an estimate since
343 // the number of nmethods changes during the sweep so the final
344 // stage must iterate until it there are no more nmethods.
345 int todo = (CodeCache::nof_nmethods() - _seen) / _sweep_fractions_left;
346 int swept_count = 0;
347
348
349 assert(!SafepointSynchronize::is_at_safepoint(), "should not be in safepoint when we get here");
350 assert(!CodeCache_lock->owned_by_self(), "just checking");
351
352 int freed_memory = 0;
353 {
354 MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
355
356 // The last invocation iterates until there are no more nmethods
357 while ((swept_count < todo || _sweep_fractions_left == 1) && !_current.end()) {
358 swept_count++;
359 if (SafepointSynchronize::is_synchronizing()) { // Safepoint request
360 if (PrintMethodFlushing && Verbose) {
361 tty->print_cr("### Sweep at %d out of %d, invocation: %d, yielding to safepoint", _seen, CodeCache::nof_nmethods(), _sweep_fractions_left);
362 }
363 MutexUnlockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
364
365 assert(Thread::current()->is_Java_thread(), "should be java thread");
366 JavaThread* thread = (JavaThread*)Thread::current();
367 ThreadBlockInVM tbivm(thread);
368 thread->java_suspend_self();
369 }
370 // Since we will give up the CodeCache_lock, always skip ahead
371 // to the next nmethod. Other blobs can be deleted by other
372 // threads but nmethods are only reclaimed by the sweeper.
373 nmethod* nm = _current.method();
374 _current.next();
375
376 // Now ready to process nmethod and give up CodeCache_lock
377 {
378 MutexUnlockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
379 freed_memory += process_nmethod(nm);
380 }
381 _seen++;
382 }
383 }
384
385 assert(_sweep_fractions_left > 1 || _current.end(), "must have scanned the whole cache");
386
387 const Ticks sweep_end_counter = Ticks::now();
388 const Tickspan sweep_time = sweep_end_counter - sweep_start_counter;
389 _total_time_sweeping += sweep_time;
390 _total_time_this_sweep += sweep_time;
391 _peak_sweep_fraction_time = MAX2(sweep_time, _peak_sweep_fraction_time);
392 _total_flushed_size += freed_memory;
393 _total_nof_methods_reclaimed += _flushed_count;
394
395 EventSweepCodeCache event(UNTIMED);
396 if (event.should_commit()) {
397 event.set_starttime(sweep_start_counter);
398 event.set_endtime(sweep_end_counter);
399 event.set_sweepIndex(_traversals);
400 event.set_sweepFractionIndex(NmethodSweepFraction - _sweep_fractions_left + 1);
401 event.set_sweptCount(swept_count);
402 event.set_flushedCount(_flushed_count);
403 event.set_markedCount(_marked_for_reclamation_count);
404 event.set_zombifiedCount(_zombified_count);
405 event.commit();
578 // Clean-up all inline caches that point to zombie/non-reentrant methods
579 MutexLocker cl(CompiledIC_lock);
580 nm->cleanup_inline_caches();
581 SWEEP(nm);
582 }
583 return freed_memory;
584 }
585
586
587 void NMethodSweeper::possibly_flush(nmethod* nm) {
588 if (UseCodeCacheFlushing) {
589 if (!nm->is_locked_by_vm() && !nm->is_osr_method() && !nm->is_native_method()) {
590 bool make_not_entrant = false;
591
592 // Do not make native methods and OSR-methods not-entrant
593 nm->dec_hotness_counter();
594 // Get the initial value of the hotness counter. This value depends on the
595 // ReservedCodeCacheSize
596 int reset_val = hotness_counter_reset_val();
597 int time_since_reset = reset_val - nm->hotness_counter();
598 int code_blob_type = (CodeCache::get_code_blob_type(nm->comp_level()));
599 double threshold = -reset_val + (CodeCache::reverse_free_ratio(code_blob_type) * NmethodSweepActivity);
600 // The less free space in the code cache we have - the bigger reverse_free_ratio() is.
601 // I.e., 'threshold' increases with lower available space in the code cache and a higher
602 // NmethodSweepActivity. If the current hotness counter - which decreases from its initial
603 // value until it is reset by stack walking - is smaller than the computed threshold, the
604 // corresponding nmethod is considered for removal.
605 if ((NmethodSweepActivity > 0) && (nm->hotness_counter() < threshold) && (time_since_reset > MinPassesBeforeFlush)) {
606 // A method is marked as not-entrant if the method is
607 // 1) 'old enough': nm->hotness_counter() < threshold
608 // 2) The method was in_use for a minimum amount of time: (time_since_reset > MinPassesBeforeFlush)
609 // The second condition is necessary if we are dealing with very small code cache
610 // sizes (e.g., <10m) and the code cache size is too small to hold all hot methods.
611 // The second condition ensures that methods are not immediately made not-entrant
612 // after compilation.
613 make_not_entrant = true;
614 }
615
616 // The stack-scanning low-cost detection may not see the method was used (which can happen for
617 // flat profiles). Check the age counter for possible data.
618 if (UseCodeAging && make_not_entrant && (nm->is_compiled_by_c2() || nm->is_compiled_by_c1())) {
619 MethodCounters* mc = nm->method()->method_counters();
|