217 assert((nextAddr == end_addr) ||
218 isUnmarked(nextAddr), "get_next_zero postcondition");
219 return nextAddr;
220 }
221
222 inline bool CMSBitMap::isAllClear() const {
223 assert_locked();
224 return getNextMarkedWordAddress(startWord()) >= endWord();
225 }
226
227 inline void CMSBitMap::iterate(BitMapClosure* cl, HeapWord* left,
228 HeapWord* right) {
229 assert_locked();
230 left = MAX2(_bmStartWord, left);
231 right = MIN2(_bmStartWord + _bmWordSize, right);
232 if (right > left) {
233 _bm.iterate(cl, heapWordToOffset(left), heapWordToOffset(right));
234 }
235 }
236
237 inline void CMSCollector::start_icms() {
238 if (CMSIncrementalMode) {
239 ConcurrentMarkSweepThread::start_icms();
240 }
241 }
242
243 inline void CMSCollector::stop_icms() {
244 if (CMSIncrementalMode) {
245 ConcurrentMarkSweepThread::stop_icms();
246 }
247 }
248
249 inline void CMSCollector::disable_icms() {
250 if (CMSIncrementalMode) {
251 ConcurrentMarkSweepThread::disable_icms();
252 }
253 }
254
255 inline void CMSCollector::enable_icms() {
256 if (CMSIncrementalMode) {
257 ConcurrentMarkSweepThread::enable_icms();
258 }
259 }
260
261 inline void CMSCollector::icms_wait() {
262 if (CMSIncrementalMode) {
263 cmsThread()->icms_wait();
264 }
265 }
266
267 inline void CMSCollector::save_sweep_limits() {
268 _cmsGen->save_sweep_limit();
269 }
270
271 inline bool CMSCollector::is_dead_obj(oop obj) const {
272 HeapWord* addr = (HeapWord*)obj;
273 assert((_cmsGen->cmsSpace()->is_in_reserved(addr)
274 && _cmsGen->cmsSpace()->block_is_obj(addr)),
275 "must be object");
276 return should_unload_classes() &&
277 _collectorState == Sweeping &&
278 !_markBitMap.isMarked(addr);
279 }
280
281 inline bool CMSCollector::should_abort_preclean() const {
282 // We are in the midst of an "abortable preclean" and either
283 // scavenge is done or foreground GC wants to take over collection
284 return _collectorState == AbortablePreclean &&
285 (_abort_preclean || _foregroundGCIsActive ||
286 GenCollectedHeap::heap()->incremental_collection_will_fail(true /* consult_young */));
346 _cms_timer.stop();
347
348 // This is just an approximate value, but is good enough.
349 _cms_used_at_cms_begin = _cms_used_at_gc0_end;
350
351 _cms_period = AdaptiveWeightedAverage::exp_avg((float)_cms_period,
352 (float) _cms_timer.seconds(), _cms_alpha);
353 _cms_begin_time.update();
354
355 _cms_timer.reset();
356 _cms_timer.start();
357 }
358
359 inline void CMSStats::record_cms_end() {
360 _cms_timer.stop();
361
362 float cur_duration = _cms_timer.seconds();
363 _cms_duration = AdaptiveWeightedAverage::exp_avg(_cms_duration,
364 cur_duration, _cms_alpha);
365
366 // Avoid division by 0.
367 const size_t cms_used_mb = MAX2(_cms_used_at_cms_begin / M, (size_t)1);
368 _cms_duration_per_mb = AdaptiveWeightedAverage::exp_avg(_cms_duration_per_mb,
369 cur_duration / cms_used_mb,
370 _cms_alpha);
371
372 _cms_end_time.update();
373 _cms_alpha = _saved_alpha;
374 _allow_duty_cycle_reduction = true;
375 _valid_bits |= _CMS_VALID;
376
377 _cms_timer.start();
378 }
379
380 inline double CMSStats::cms_time_since_begin() const {
381 return _cms_begin_time.seconds();
382 }
383
384 inline double CMSStats::cms_time_since_end() const {
385 return _cms_end_time.seconds();
386 }
387
388 inline double CMSStats::promotion_rate() const {
389 assert(valid(), "statistics not valid yet");
390 return gc0_promoted() / gc0_period();
391 }
392
393 inline double CMSStats::cms_allocation_rate() const {
394 assert(valid(), "statistics not valid yet");
395 return cms_allocated() / gc0_period();
396 }
397
398 inline double CMSStats::cms_consumption_rate() const {
399 assert(valid(), "statistics not valid yet");
400 return (gc0_promoted() + cms_allocated()) / gc0_period();
401 }
402
403 inline unsigned int CMSStats::icms_update_duty_cycle() {
404 // Update the duty cycle only if pacing is enabled and the stats are valid
405 // (after at least one young gen gc and one cms cycle have completed).
406 if (CMSIncrementalPacing && valid()) {
407 return icms_update_duty_cycle_impl();
408 }
409 return _icms_duty_cycle;
410 }
411
412 inline void ConcurrentMarkSweepGeneration::save_sweep_limit() {
413 cmsSpace()->save_sweep_limit();
414 }
415
416 inline size_t ConcurrentMarkSweepGeneration::capacity() const {
417 return _cmsSpace->capacity();
418 }
419
420 inline size_t ConcurrentMarkSweepGeneration::used() const {
421 return _cmsSpace->used();
422 }
423
424 inline size_t ConcurrentMarkSweepGeneration::free() const {
425 return _cmsSpace->free();
426 }
427
428 inline MemRegion ConcurrentMarkSweepGeneration::used_region() const {
429 return _cmsSpace->used_region();
|
217 assert((nextAddr == end_addr) ||
218 isUnmarked(nextAddr), "get_next_zero postcondition");
219 return nextAddr;
220 }
221
222 inline bool CMSBitMap::isAllClear() const {
223 assert_locked();
224 return getNextMarkedWordAddress(startWord()) >= endWord();
225 }
226
227 inline void CMSBitMap::iterate(BitMapClosure* cl, HeapWord* left,
228 HeapWord* right) {
229 assert_locked();
230 left = MAX2(_bmStartWord, left);
231 right = MIN2(_bmStartWord + _bmWordSize, right);
232 if (right > left) {
233 _bm.iterate(cl, heapWordToOffset(left), heapWordToOffset(right));
234 }
235 }
236
237 inline void CMSCollector::save_sweep_limits() {
238 _cmsGen->save_sweep_limit();
239 }
240
241 inline bool CMSCollector::is_dead_obj(oop obj) const {
242 HeapWord* addr = (HeapWord*)obj;
243 assert((_cmsGen->cmsSpace()->is_in_reserved(addr)
244 && _cmsGen->cmsSpace()->block_is_obj(addr)),
245 "must be object");
246 return should_unload_classes() &&
247 _collectorState == Sweeping &&
248 !_markBitMap.isMarked(addr);
249 }
250
251 inline bool CMSCollector::should_abort_preclean() const {
252 // We are in the midst of an "abortable preclean" and either
253 // scavenge is done or foreground GC wants to take over collection
254 return _collectorState == AbortablePreclean &&
255 (_abort_preclean || _foregroundGCIsActive ||
256 GenCollectedHeap::heap()->incremental_collection_will_fail(true /* consult_young */));
316 _cms_timer.stop();
317
318 // This is just an approximate value, but is good enough.
319 _cms_used_at_cms_begin = _cms_used_at_gc0_end;
320
321 _cms_period = AdaptiveWeightedAverage::exp_avg((float)_cms_period,
322 (float) _cms_timer.seconds(), _cms_alpha);
323 _cms_begin_time.update();
324
325 _cms_timer.reset();
326 _cms_timer.start();
327 }
328
329 inline void CMSStats::record_cms_end() {
330 _cms_timer.stop();
331
332 float cur_duration = _cms_timer.seconds();
333 _cms_duration = AdaptiveWeightedAverage::exp_avg(_cms_duration,
334 cur_duration, _cms_alpha);
335
336 _cms_end_time.update();
337 _cms_alpha = _saved_alpha;
338 _allow_duty_cycle_reduction = true;
339 _valid_bits |= _CMS_VALID;
340
341 _cms_timer.start();
342 }
343
344 inline double CMSStats::cms_time_since_begin() const {
345 return _cms_begin_time.seconds();
346 }
347
348 inline double CMSStats::cms_time_since_end() const {
349 return _cms_end_time.seconds();
350 }
351
352 inline double CMSStats::promotion_rate() const {
353 assert(valid(), "statistics not valid yet");
354 return gc0_promoted() / gc0_period();
355 }
356
357 inline double CMSStats::cms_allocation_rate() const {
358 assert(valid(), "statistics not valid yet");
359 return cms_allocated() / gc0_period();
360 }
361
362 inline double CMSStats::cms_consumption_rate() const {
363 assert(valid(), "statistics not valid yet");
364 return (gc0_promoted() + cms_allocated()) / gc0_period();
365 }
366
367 inline void ConcurrentMarkSweepGeneration::save_sweep_limit() {
368 cmsSpace()->save_sweep_limit();
369 }
370
371 inline size_t ConcurrentMarkSweepGeneration::capacity() const {
372 return _cmsSpace->capacity();
373 }
374
375 inline size_t ConcurrentMarkSweepGeneration::used() const {
376 return _cmsSpace->used();
377 }
378
379 inline size_t ConcurrentMarkSweepGeneration::free() const {
380 return _cmsSpace->free();
381 }
382
383 inline MemRegion ConcurrentMarkSweepGeneration::used_region() const {
384 return _cmsSpace->used_region();
|