< prev index next >

src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.inline.hpp

Print this page
rev 7209 : [mq]: inccms


 217   assert((nextAddr == end_addr) ||
 218           isUnmarked(nextAddr), "get_next_zero postcondition");
 219   return nextAddr;
 220 }
 221 
 222 inline bool CMSBitMap::isAllClear() const {
 223   assert_locked();
 224   return getNextMarkedWordAddress(startWord()) >= endWord();
 225 }
 226 
 227 inline void CMSBitMap::iterate(BitMapClosure* cl, HeapWord* left,
 228                             HeapWord* right) {
 229   assert_locked();
 230   left = MAX2(_bmStartWord, left);
 231   right = MIN2(_bmStartWord + _bmWordSize, right);
 232   if (right > left) {
 233     _bm.iterate(cl, heapWordToOffset(left), heapWordToOffset(right));
 234   }
 235 }
 236 
 237 inline void CMSCollector::start_icms() {
 238   if (CMSIncrementalMode) {
 239     ConcurrentMarkSweepThread::start_icms();
 240   }
 241 }
 242 
 243 inline void CMSCollector::stop_icms() {
 244   if (CMSIncrementalMode) {
 245     ConcurrentMarkSweepThread::stop_icms();
 246   }
 247 }
 248 
 249 inline void CMSCollector::disable_icms() {
 250   if (CMSIncrementalMode) {
 251     ConcurrentMarkSweepThread::disable_icms();
 252   }
 253 }
 254 
 255 inline void CMSCollector::enable_icms() {
 256   if (CMSIncrementalMode) {
 257     ConcurrentMarkSweepThread::enable_icms();
 258   }
 259 }
 260 
 261 inline void CMSCollector::icms_wait() {
 262   if (CMSIncrementalMode) {
 263     cmsThread()->icms_wait();
 264   }
 265 }
 266 
 267 inline void CMSCollector::save_sweep_limits() {
 268   _cmsGen->save_sweep_limit();
 269 }
 270 
 271 inline bool CMSCollector::is_dead_obj(oop obj) const {
 272   HeapWord* addr = (HeapWord*)obj;
 273   assert((_cmsGen->cmsSpace()->is_in_reserved(addr)
 274           && _cmsGen->cmsSpace()->block_is_obj(addr)),
 275          "must be object");
 276   return  should_unload_classes() &&
 277           _collectorState == Sweeping &&
 278          !_markBitMap.isMarked(addr);
 279 }
 280 
 281 inline bool CMSCollector::should_abort_preclean() const {
 282   // We are in the midst of an "abortable preclean" and either
 283   // scavenge is done or foreground GC wants to take over collection
 284   return _collectorState == AbortablePreclean &&
 285          (_abort_preclean || _foregroundGCIsActive ||
 286           GenCollectedHeap::heap()->incremental_collection_will_fail(true /* consult_young */));


 381   return _cms_begin_time.seconds();
 382 }
 383 
 384 inline double CMSStats::cms_time_since_end() const {
 385   return _cms_end_time.seconds();
 386 }
 387 
 388 inline double CMSStats::promotion_rate() const {
 389   assert(valid(), "statistics not valid yet");
 390   return gc0_promoted() / gc0_period();
 391 }
 392 
 393 inline double CMSStats::cms_allocation_rate() const {
 394   assert(valid(), "statistics not valid yet");
 395   return cms_allocated() / gc0_period();
 396 }
 397 
 398 inline double CMSStats::cms_consumption_rate() const {
 399   assert(valid(), "statistics not valid yet");
 400   return (gc0_promoted() + cms_allocated()) / gc0_period();
 401 }
 402 
 403 inline unsigned int CMSStats::icms_update_duty_cycle() {
 404   // Update the duty cycle only if pacing is enabled and the stats are valid
 405   // (after at least one young gen gc and one cms cycle have completed).
 406   if (CMSIncrementalPacing && valid()) {
 407     return icms_update_duty_cycle_impl();
 408   }
 409   return _icms_duty_cycle;
 410 }
 411 
 412 inline void ConcurrentMarkSweepGeneration::save_sweep_limit() {
 413   cmsSpace()->save_sweep_limit();
 414 }
 415 
 416 inline size_t ConcurrentMarkSweepGeneration::capacity() const {
 417   return _cmsSpace->capacity();
 418 }
 419 
 420 inline size_t ConcurrentMarkSweepGeneration::used() const {
 421   return _cmsSpace->used();
 422 }
 423 
 424 inline size_t ConcurrentMarkSweepGeneration::free() const {
 425   return _cmsSpace->free();
 426 }
 427 
 428 inline MemRegion ConcurrentMarkSweepGeneration::used_region() const {
 429   return _cmsSpace->used_region();




 217   assert((nextAddr == end_addr) ||
 218           isUnmarked(nextAddr), "get_next_zero postcondition");
 219   return nextAddr;
 220 }
 221 
 222 inline bool CMSBitMap::isAllClear() const {
 223   assert_locked();
 224   return getNextMarkedWordAddress(startWord()) >= endWord();
 225 }
 226 
 227 inline void CMSBitMap::iterate(BitMapClosure* cl, HeapWord* left,
 228                             HeapWord* right) {
 229   assert_locked();
 230   left = MAX2(_bmStartWord, left);
 231   right = MIN2(_bmStartWord + _bmWordSize, right);
 232   if (right > left) {
 233     _bm.iterate(cl, heapWordToOffset(left), heapWordToOffset(right));
 234   }
 235 }
 236 






























 237 inline void CMSCollector::save_sweep_limits() {
 238   _cmsGen->save_sweep_limit();
 239 }
 240 
 241 inline bool CMSCollector::is_dead_obj(oop obj) const {
 242   HeapWord* addr = (HeapWord*)obj;
 243   assert((_cmsGen->cmsSpace()->is_in_reserved(addr)
 244           && _cmsGen->cmsSpace()->block_is_obj(addr)),
 245          "must be object");
 246   return  should_unload_classes() &&
 247           _collectorState == Sweeping &&
 248          !_markBitMap.isMarked(addr);
 249 }
 250 
 251 inline bool CMSCollector::should_abort_preclean() const {
 252   // We are in the midst of an "abortable preclean" and either
 253   // scavenge is done or foreground GC wants to take over collection
 254   return _collectorState == AbortablePreclean &&
 255          (_abort_preclean || _foregroundGCIsActive ||
 256           GenCollectedHeap::heap()->incremental_collection_will_fail(true /* consult_young */));


 351   return _cms_begin_time.seconds();
 352 }
 353 
 354 inline double CMSStats::cms_time_since_end() const {
 355   return _cms_end_time.seconds();
 356 }
 357 
 358 inline double CMSStats::promotion_rate() const {
 359   assert(valid(), "statistics not valid yet");
 360   return gc0_promoted() / gc0_period();
 361 }
 362 
 363 inline double CMSStats::cms_allocation_rate() const {
 364   assert(valid(), "statistics not valid yet");
 365   return cms_allocated() / gc0_period();
 366 }
 367 
 368 inline double CMSStats::cms_consumption_rate() const {
 369   assert(valid(), "statistics not valid yet");
 370   return (gc0_promoted() + cms_allocated()) / gc0_period();









 371 }
 372 
 373 inline void ConcurrentMarkSweepGeneration::save_sweep_limit() {
 374   cmsSpace()->save_sweep_limit();
 375 }
 376 
 377 inline size_t ConcurrentMarkSweepGeneration::capacity() const {
 378   return _cmsSpace->capacity();
 379 }
 380 
 381 inline size_t ConcurrentMarkSweepGeneration::used() const {
 382   return _cmsSpace->used();
 383 }
 384 
 385 inline size_t ConcurrentMarkSweepGeneration::free() const {
 386   return _cmsSpace->free();
 387 }
 388 
 389 inline MemRegion ConcurrentMarkSweepGeneration::used_region() const {
 390   return _cmsSpace->used_region();


< prev index next >