< prev index next >

src/hotspot/share/gc/parallel/psParallelCompact.hpp

Print this page




 519 }
 520 
 521 inline void
 522 ParallelCompactData::RegionData::set_destination_count(uint count)
 523 {
 524   assert(count <= (dc_completed >> dc_shift), "count too large");
 525   const region_sz_t live_sz = (region_sz_t) live_obj_size();
 526   _dc_and_los = (count << dc_shift) | live_sz;
 527 }
 528 
 529 inline void ParallelCompactData::RegionData::set_live_obj_size(size_t words)
 530 {
 531   assert(words <= los_mask, "would overflow");
 532   _dc_and_los = destination_count_raw() | (region_sz_t)words;
 533 }
 534 
 535 inline void ParallelCompactData::RegionData::decrement_destination_count()
 536 {
 537   assert(_dc_and_los < dc_claimed, "already claimed");
 538   assert(_dc_and_los >= dc_one, "count would go negative");
 539   Atomic::add(dc_mask, &_dc_and_los);
 540 }
 541 
 542 inline HeapWord* ParallelCompactData::RegionData::data_location() const
 543 {
 544   DEBUG_ONLY(return _data_location;)
 545   NOT_DEBUG(return NULL;)
 546 }
 547 
 548 inline HeapWord* ParallelCompactData::RegionData::highest_ref() const
 549 {
 550   DEBUG_ONLY(return _highest_ref;)
 551   NOT_DEBUG(return NULL;)
 552 }
 553 
 554 inline void ParallelCompactData::RegionData::set_data_location(HeapWord* addr)
 555 {
 556   DEBUG_ONLY(_data_location = addr;)
 557 }
 558 
 559 inline void ParallelCompactData::RegionData::set_completed()
 560 {
 561   assert(claimed(), "must be claimed first");
 562   _dc_and_los = dc_completed | (region_sz_t) live_obj_size();
 563 }
 564 
 565 // MT-unsafe claiming of a region.  Should only be used during single threaded
 566 // execution.
 567 inline bool ParallelCompactData::RegionData::claim_unsafe()
 568 {
 569   if (available()) {
 570     _dc_and_los |= dc_claimed;
 571     return true;
 572   }
 573   return false;
 574 }
 575 
 576 inline void ParallelCompactData::RegionData::add_live_obj(size_t words)
 577 {
 578   assert(words <= (size_t)los_mask - live_obj_size(), "overflow");
 579   Atomic::add(static_cast<region_sz_t>(words), &_dc_and_los);
 580 }
 581 
 582 inline void ParallelCompactData::RegionData::set_highest_ref(HeapWord* addr)
 583 {
 584 #ifdef ASSERT
 585   HeapWord* tmp = _highest_ref;
 586   while (addr > tmp) {
 587     tmp = Atomic::cmpxchg(addr, &_highest_ref, tmp);
 588   }
 589 #endif  // #ifdef ASSERT
 590 }
 591 
 592 inline bool ParallelCompactData::RegionData::claim()
 593 {
 594   const region_sz_t los = static_cast<region_sz_t>(live_obj_size());
 595   const region_sz_t old = Atomic::cmpxchg(dc_claimed | los, &_dc_and_los, los);
 596   return old == los;
 597 }
 598 
 599 inline ParallelCompactData::RegionData*
 600 ParallelCompactData::region(size_t region_idx) const
 601 {
 602   assert(region_idx <= region_count(), "bad arg");
 603   return _region_data + region_idx;
 604 }
 605 
 606 inline size_t
 607 ParallelCompactData::region(const RegionData* const region_ptr) const
 608 {
 609   assert(region_ptr >= _region_data, "bad arg");
 610   assert(region_ptr <= _region_data + region_count(), "bad arg");
 611   return pointer_delta(region_ptr, _region_data, sizeof(RegionData));
 612 }
 613 
 614 inline ParallelCompactData::BlockData*
 615 ParallelCompactData::block(size_t n) const {




 519 }
 520 
 521 inline void
 522 ParallelCompactData::RegionData::set_destination_count(uint count)
 523 {
 524   assert(count <= (dc_completed >> dc_shift), "count too large");
 525   const region_sz_t live_sz = (region_sz_t) live_obj_size();
 526   _dc_and_los = (count << dc_shift) | live_sz;
 527 }
 528 
 529 inline void ParallelCompactData::RegionData::set_live_obj_size(size_t words)
 530 {
 531   assert(words <= los_mask, "would overflow");
 532   _dc_and_los = destination_count_raw() | (region_sz_t)words;
 533 }
 534 
 535 inline void ParallelCompactData::RegionData::decrement_destination_count()
 536 {
 537   assert(_dc_and_los < dc_claimed, "already claimed");
 538   assert(_dc_and_los >= dc_one, "count would go negative");
 539   Atomic::add(&_dc_and_los, dc_mask);
 540 }
 541 
 542 inline HeapWord* ParallelCompactData::RegionData::data_location() const
 543 {
 544   DEBUG_ONLY(return _data_location;)
 545   NOT_DEBUG(return NULL;)
 546 }
 547 
 548 inline HeapWord* ParallelCompactData::RegionData::highest_ref() const
 549 {
 550   DEBUG_ONLY(return _highest_ref;)
 551   NOT_DEBUG(return NULL;)
 552 }
 553 
 554 inline void ParallelCompactData::RegionData::set_data_location(HeapWord* addr)
 555 {
 556   DEBUG_ONLY(_data_location = addr;)
 557 }
 558 
 559 inline void ParallelCompactData::RegionData::set_completed()
 560 {
 561   assert(claimed(), "must be claimed first");
 562   _dc_and_los = dc_completed | (region_sz_t) live_obj_size();
 563 }
 564 
 565 // MT-unsafe claiming of a region.  Should only be used during single threaded
 566 // execution.
 567 inline bool ParallelCompactData::RegionData::claim_unsafe()
 568 {
 569   if (available()) {
 570     _dc_and_los |= dc_claimed;
 571     return true;
 572   }
 573   return false;
 574 }
 575 
 576 inline void ParallelCompactData::RegionData::add_live_obj(size_t words)
 577 {
 578   assert(words <= (size_t)los_mask - live_obj_size(), "overflow");
 579   Atomic::add(&_dc_and_los, static_cast<region_sz_t>(words));
 580 }
 581 
 582 inline void ParallelCompactData::RegionData::set_highest_ref(HeapWord* addr)
 583 {
 584 #ifdef ASSERT
 585   HeapWord* tmp = _highest_ref;
 586   while (addr > tmp) {
 587     tmp = Atomic::cmpxchg(&_highest_ref, tmp, addr);
 588   }
 589 #endif  // #ifdef ASSERT
 590 }
 591 
 592 inline bool ParallelCompactData::RegionData::claim()
 593 {
 594   const region_sz_t los = static_cast<region_sz_t>(live_obj_size());
 595   const region_sz_t old = Atomic::cmpxchg(&_dc_and_los, los, dc_claimed | los);
 596   return old == los;
 597 }
 598 
 599 inline ParallelCompactData::RegionData*
 600 ParallelCompactData::region(size_t region_idx) const
 601 {
 602   assert(region_idx <= region_count(), "bad arg");
 603   return _region_data + region_idx;
 604 }
 605 
 606 inline size_t
 607 ParallelCompactData::region(const RegionData* const region_ptr) const
 608 {
 609   assert(region_ptr >= _region_data, "bad arg");
 610   assert(region_ptr <= _region_data + region_count(), "bad arg");
 611   return pointer_delta(region_ptr, _region_data, sizeof(RegionData));
 612 }
 613 
 614 inline ParallelCompactData::BlockData*
 615 ParallelCompactData::block(size_t n) const {


< prev index next >