< prev index next >

src/hotspot/share/gc/shared/oopStorage.cpp

Print this page




 127 }
 128 
 129 void OopStorage::ActiveArray::destroy(ActiveArray* ba) {
 130   ba->~ActiveArray();
 131   FREE_C_HEAP_ARRAY(char, ba);
 132 }
 133 
 134 size_t OopStorage::ActiveArray::size() const {
 135   return _size;
 136 }
 137 
 138 size_t OopStorage::ActiveArray::block_count() const {
 139   return _block_count;
 140 }
 141 
 142 size_t OopStorage::ActiveArray::block_count_acquire() const {
 143   return Atomic::load_acquire(&_block_count);
 144 }
 145 
 146 void OopStorage::ActiveArray::increment_refcount() const {
 147   int new_value = Atomic::add(1, &_refcount);
 148   assert(new_value >= 1, "negative refcount %d", new_value - 1);
 149 }
 150 
 151 bool OopStorage::ActiveArray::decrement_refcount() const {
 152   int new_value = Atomic::sub(1, &_refcount);
 153   assert(new_value >= 0, "negative refcount %d", new_value);
 154   return new_value == 0;
 155 }
 156 
 157 bool OopStorage::ActiveArray::push(Block* block) {
 158   size_t index = _block_count;
 159   if (index < _size) {
 160     block->set_active_index(index);
 161     *block_ptr(index) = block;
 162     // Use a release_store to ensure all the setup is complete before
 163     // making the block visible.
 164     Atomic::release_store(&_block_count, index + 1);
 165     return true;
 166   } else {
 167     return false;
 168   }
 169 }
 170 
 171 void OopStorage::ActiveArray::remove(Block* block) {
 172   assert(_block_count > 0, "array is empty");


 290 }
 291 
 292 size_t OopStorage::Block::active_index_safe(const Block* block) {
 293   STATIC_ASSERT(sizeof(intptr_t) == sizeof(block->_active_index));
 294   assert(CanUseSafeFetchN(), "precondition");
 295   return SafeFetchN((intptr_t*)&block->_active_index, 0);
 296 }
 297 
 298 unsigned OopStorage::Block::get_index(const oop* ptr) const {
 299   assert(contains(ptr), PTR_FORMAT " not in block " PTR_FORMAT, p2i(ptr), p2i(this));
 300   return static_cast<unsigned>(ptr - get_pointer(0));
 301 }
 302 
 303 oop* OopStorage::Block::allocate() {
 304   // Use CAS loop because release may change bitmask outside of lock.
 305   uintx allocated = allocated_bitmask();
 306   while (true) {
 307     assert(!is_full_bitmask(allocated), "attempt to allocate from full block");
 308     unsigned index = count_trailing_zeros(~allocated);
 309     uintx new_value = allocated | bitmask_for_index(index);
 310     uintx fetched = Atomic::cmpxchg(new_value, &_allocated_bitmask, allocated);
 311     if (fetched == allocated) {
 312       return get_pointer(index); // CAS succeeded; return entry for index.
 313     }
 314     allocated = fetched;       // CAS failed; retry with latest value.
 315   }
 316 }
 317 
 318 OopStorage::Block* OopStorage::Block::new_block(const OopStorage* owner) {
 319   // _data must be first member: aligning block => aligning _data.
 320   STATIC_ASSERT(_data_pos == 0);
 321   size_t size_needed = allocation_size();
 322   void* memory = NEW_C_HEAP_ARRAY_RETURN_NULL(char, size_needed, mtGC);
 323   if (memory == NULL) {
 324     return NULL;
 325   }
 326   void* block_mem = align_up(memory, block_alignment);
 327   assert(sizeof(Block) + pointer_delta(block_mem, memory, 1) <= size_needed,
 328          "allocated insufficient space for aligned block");
 329   return ::new (block_mem) Block(owner, memory);
 330 }


 578     LogStream ls(lt);
 579     if (is_full_bitmask(old_allocated)) {
 580       ls.print_cr("%s: block not full " PTR_FORMAT, owner->name(), p2i(block));
 581     }
 582     if (releasing == old_allocated) {
 583       ls.print_cr("%s: block empty " PTR_FORMAT, owner->name(), p2i(block));
 584     }
 585   }
 586 }
 587 
 588 void OopStorage::Block::release_entries(uintx releasing, OopStorage* owner) {
 589   assert(releasing != 0, "preconditon");
 590   // Prevent empty block deletion when transitioning to empty.
 591   Atomic::inc(&_release_refcount);
 592 
 593   // Atomically update allocated bitmask.
 594   uintx old_allocated = _allocated_bitmask;
 595   while (true) {
 596     assert((releasing & ~old_allocated) == 0, "releasing unallocated entries");
 597     uintx new_value = old_allocated ^ releasing;
 598     uintx fetched = Atomic::cmpxchg(new_value, &_allocated_bitmask, old_allocated);
 599     if (fetched == old_allocated) break; // Successful update.
 600     old_allocated = fetched;             // Retry with updated bitmask.
 601   }
 602 
 603   // Now that the bitmask has been updated, if we have a state transition
 604   // (updated bitmask is empty or old bitmask was full), atomically push
 605   // this block onto the deferred updates list.  Some future call to
 606   // reduce_deferred_updates will make any needed changes related to this
 607   // block and _allocation_list.  This deferral avoids _allocation_list
 608   // updates and the associated locking here.
 609   if ((releasing == old_allocated) || is_full_bitmask(old_allocated)) {
 610     // Log transitions.  Both transitions are possible in a single update.
 611     log_release_transitions(releasing, old_allocated, owner, this);
 612     // Attempt to claim responsibility for adding this block to the deferred
 613     // list, by setting the link to non-NULL by self-looping.  If this fails,
 614     // then someone else has made such a claim and the deferred update has not
 615     // yet been processed and will include our change, so we don't need to do
 616     // anything further.
 617     if (Atomic::replace_if_null(this, &_deferred_updates_next)) {
 618       // Successfully claimed.  Push, with self-loop for end-of-list.
 619       Block* head = owner->_deferred_updates;
 620       while (true) {
 621         _deferred_updates_next = (head == NULL) ? this : head;
 622         Block* fetched = Atomic::cmpxchg(this, &owner->_deferred_updates, head);
 623         if (fetched == head) break; // Successful update.
 624         head = fetched;             // Retry with updated head.
 625       }
 626       // Only request cleanup for to-empty transitions, not for from-full.
 627       // There isn't any rush to process from-full transitions.  Allocation
 628       // will reduce deferrals before allocating new blocks, so may process
 629       // some.  And the service thread will drain the entire deferred list
 630       // if there are any pending to-empty transitions.
 631       if (releasing == old_allocated) {
 632         owner->record_needs_cleanup();
 633       }
 634       log_trace(oopstorage, blocks)("%s: deferred update " PTR_FORMAT,
 635                                     owner->name(), p2i(this));
 636     }
 637   }
 638   // Release hold on empty block deletion.
 639   Atomic::dec(&_release_refcount);
 640 }
 641 
 642 // Process one available deferred update.  Returns true if one was processed.
 643 bool OopStorage::reduce_deferred_updates() {
 644   assert_lock_strong(_allocation_mutex);
 645   // Atomically pop a block off the list, if any available.
 646   // No ABA issue because this is only called by one thread at a time.
 647   // The atomicity is wrto pushes by release().
 648   Block* block = Atomic::load_acquire(&_deferred_updates);
 649   while (true) {
 650     if (block == NULL) return false;
 651     // Try atomic pop of block from list.
 652     Block* tail = block->deferred_updates_next();
 653     if (block == tail) tail = NULL; // Handle self-loop end marker.
 654     Block* fetched = Atomic::cmpxchg(tail, &_deferred_updates, block);
 655     if (fetched == block) break; // Update successful.
 656     block = fetched;             // Retry with updated block.
 657   }
 658   block->set_deferred_updates_next(NULL); // Clear tail after updating head.
 659   // Ensure bitmask read after pop is complete, including clearing tail, for
 660   // ordering with release().  Without this, we may be processing a stale
 661   // bitmask state here while blocking a release() operation from recording
 662   // the deferred update needed for its bitmask change.
 663   OrderAccess::fence();
 664   // Process popped block.
 665   uintx allocated = block->allocated_bitmask();
 666 
 667   // Make membership in list consistent with bitmask state.
 668   if ((_allocation_list.ctail() != NULL) &&
 669       ((_allocation_list.ctail() == block) ||
 670        (_allocation_list.next(*block) != NULL))) {
 671     // Block is in the _allocation_list.
 672     assert(!is_full_bitmask(allocated), "invariant");
 673   } else if (!is_full_bitmask(allocated)) {
 674     // Block is not in the _allocation_list, but now should be.


 707     Block* block = find_block_or_null(ptrs[i]);
 708     assert(block != NULL, "%s: invalid release " PTR_FORMAT, name(), p2i(ptrs[i]));
 709     log_trace(oopstorage, ref)("%s: released " PTR_FORMAT, name(), p2i(ptrs[i]));
 710     size_t count = 0;
 711     uintx releasing = 0;
 712     for ( ; i < size; ++i) {
 713       const oop* entry = ptrs[i];
 714       check_release_entry(entry);
 715       // If entry not in block, finish block and resume outer loop with entry.
 716       if (!block->contains(entry)) break;
 717       // Add entry to releasing bitmap.
 718       log_trace(oopstorage, ref)("%s: released " PTR_FORMAT, name(), p2i(entry));
 719       uintx entry_bitmask = block->bitmask_for_entry(entry);
 720       assert((releasing & entry_bitmask) == 0,
 721              "Duplicate entry: " PTR_FORMAT, p2i(entry));
 722       releasing |= entry_bitmask;
 723       ++count;
 724     }
 725     // Release the contiguous entries that are in block.
 726     block->release_entries(releasing, this);
 727     Atomic::sub(count, &_allocation_count);
 728   }
 729 }
 730 
 731 const size_t initial_active_array_size = 8;
 732 
 733 OopStorage::OopStorage(const char* name,
 734                        Mutex* allocation_mutex,
 735                        Mutex* active_mutex) :
 736   _name(os::strdup(name)),
 737   _active_array(ActiveArray::create(initial_active_array_size)),
 738   _allocation_list(),
 739   _deferred_updates(NULL),
 740   _allocation_mutex(allocation_mutex),
 741   _active_mutex(active_mutex),
 742   _allocation_count(0),
 743   _concurrent_iteration_count(0),
 744   _needs_cleanup(false)
 745 {
 746   _active_array->increment_refcount();
 747   assert(_active_mutex->rank() < _allocation_mutex->rank(),


 808 const jlong cleanup_trigger_defer_period = 500 * NANOSECS_PER_MILLISEC;
 809 
 810 void OopStorage::trigger_cleanup_if_needed() {
 811   MonitorLocker ml(Service_lock, Monitor::_no_safepoint_check_flag);
 812   if (Atomic::load(&needs_cleanup_requested) &&
 813       !needs_cleanup_triggered &&
 814       (os::javaTimeNanos() > cleanup_trigger_permit_time)) {
 815     needs_cleanup_triggered = true;
 816     ml.notify_all();
 817   }
 818 }
 819 
 820 bool OopStorage::has_cleanup_work_and_reset() {
 821   assert_lock_strong(Service_lock);
 822   cleanup_trigger_permit_time =
 823     os::javaTimeNanos() + cleanup_trigger_defer_period;
 824   needs_cleanup_triggered = false;
 825   // Set the request flag false and return its old value.
 826   // Needs to be atomic to avoid dropping a concurrent request.
 827   // Can't use Atomic::xchg, which may not support bool.
 828   return Atomic::cmpxchg(false, &needs_cleanup_requested, true);
 829 }
 830 
 831 // Record that cleanup is needed, without notifying the Service thread.
 832 // Used by release(), where we can't lock even Service_lock.
 833 void OopStorage::record_needs_cleanup() {
 834   // Set local flag first, else service thread could wake up and miss
 835   // the request.  This order may instead (rarely) unnecessarily notify.
 836   Atomic::release_store(&_needs_cleanup, true);
 837   Atomic::release_store_fence(&needs_cleanup_requested, true);
 838 }
 839 
 840 bool OopStorage::delete_empty_blocks() {
 841   // Service thread might have oopstorage work, but not for this object.
 842   // Check for deferred updates even though that's not a service thread
 843   // trigger; since we're here, we might as well process them.
 844   if (!Atomic::load_acquire(&_needs_cleanup) &&
 845       (Atomic::load_acquire(&_deferred_updates) == NULL)) {
 846     return false;
 847   }
 848 


 993 
 994 bool OopStorage::BasicParState::claim_next_segment(IterationData* data) {
 995   data->_processed += data->_segment_end - data->_segment_start;
 996   size_t start = Atomic::load_acquire(&_next_block);
 997   if (start >= _block_count) {
 998     return finish_iteration(data); // No more blocks available.
 999   }
1000   // Try to claim several at a time, but not *too* many.  We want to
1001   // avoid deciding there are many available and selecting a large
1002   // quantity, get delayed, and then end up claiming most or all of
1003   // the remaining largish amount of work, leaving nothing for other
1004   // threads to do.  But too small a step can lead to contention
1005   // over _next_block, esp. when the work per block is small.
1006   size_t max_step = 10;
1007   size_t remaining = _block_count - start;
1008   size_t step = MIN2(max_step, 1 + (remaining / _estimated_thread_count));
1009   // Atomic::add with possible overshoot.  This can perform better
1010   // than a CAS loop on some platforms when there is contention.
1011   // We can cope with the uncertainty by recomputing start/end from
1012   // the result of the add, and dealing with potential overshoot.
1013   size_t end = Atomic::add(step, &_next_block);
1014   // _next_block may have changed, so recompute start from result of add.
1015   start = end - step;
1016   // _next_block may have changed so much that end has overshot.
1017   end = MIN2(end, _block_count);
1018   // _next_block may have changed so much that even start has overshot.
1019   if (start < _block_count) {
1020     // Record claimed segment for iteration.
1021     data->_segment_start = start;
1022     data->_segment_end = end;
1023     return true;                // Success.
1024   } else {
1025     // No more blocks to claim.
1026     return finish_iteration(data);
1027   }
1028 }
1029 
1030 bool OopStorage::BasicParState::finish_iteration(const IterationData* data) const {
1031   log_info(oopstorage, blocks, stats)
1032           ("Parallel iteration on %s: blocks = " SIZE_FORMAT
1033            ", processed = " SIZE_FORMAT " (%2.f%%)",




 127 }
 128 
 129 void OopStorage::ActiveArray::destroy(ActiveArray* ba) {
 130   ba->~ActiveArray();
 131   FREE_C_HEAP_ARRAY(char, ba);
 132 }
 133 
 134 size_t OopStorage::ActiveArray::size() const {
 135   return _size;
 136 }
 137 
 138 size_t OopStorage::ActiveArray::block_count() const {
 139   return _block_count;
 140 }
 141 
 142 size_t OopStorage::ActiveArray::block_count_acquire() const {
 143   return Atomic::load_acquire(&_block_count);
 144 }
 145 
 146 void OopStorage::ActiveArray::increment_refcount() const {
 147   int new_value = Atomic::add(&_refcount, 1);
 148   assert(new_value >= 1, "negative refcount %d", new_value - 1);
 149 }
 150 
 151 bool OopStorage::ActiveArray::decrement_refcount() const {
 152   int new_value = Atomic::sub(&_refcount, 1);
 153   assert(new_value >= 0, "negative refcount %d", new_value);
 154   return new_value == 0;
 155 }
 156 
 157 bool OopStorage::ActiveArray::push(Block* block) {
 158   size_t index = _block_count;
 159   if (index < _size) {
 160     block->set_active_index(index);
 161     *block_ptr(index) = block;
 162     // Use a release_store to ensure all the setup is complete before
 163     // making the block visible.
 164     Atomic::release_store(&_block_count, index + 1);
 165     return true;
 166   } else {
 167     return false;
 168   }
 169 }
 170 
 171 void OopStorage::ActiveArray::remove(Block* block) {
 172   assert(_block_count > 0, "array is empty");


 290 }
 291 
 292 size_t OopStorage::Block::active_index_safe(const Block* block) {
 293   STATIC_ASSERT(sizeof(intptr_t) == sizeof(block->_active_index));
 294   assert(CanUseSafeFetchN(), "precondition");
 295   return SafeFetchN((intptr_t*)&block->_active_index, 0);
 296 }
 297 
 298 unsigned OopStorage::Block::get_index(const oop* ptr) const {
 299   assert(contains(ptr), PTR_FORMAT " not in block " PTR_FORMAT, p2i(ptr), p2i(this));
 300   return static_cast<unsigned>(ptr - get_pointer(0));
 301 }
 302 
 303 oop* OopStorage::Block::allocate() {
 304   // Use CAS loop because release may change bitmask outside of lock.
 305   uintx allocated = allocated_bitmask();
 306   while (true) {
 307     assert(!is_full_bitmask(allocated), "attempt to allocate from full block");
 308     unsigned index = count_trailing_zeros(~allocated);
 309     uintx new_value = allocated | bitmask_for_index(index);
 310     uintx fetched = Atomic::cmpxchg(&_allocated_bitmask, allocated, new_value);
 311     if (fetched == allocated) {
 312       return get_pointer(index); // CAS succeeded; return entry for index.
 313     }
 314     allocated = fetched;       // CAS failed; retry with latest value.
 315   }
 316 }
 317 
 318 OopStorage::Block* OopStorage::Block::new_block(const OopStorage* owner) {
 319   // _data must be first member: aligning block => aligning _data.
 320   STATIC_ASSERT(_data_pos == 0);
 321   size_t size_needed = allocation_size();
 322   void* memory = NEW_C_HEAP_ARRAY_RETURN_NULL(char, size_needed, mtGC);
 323   if (memory == NULL) {
 324     return NULL;
 325   }
 326   void* block_mem = align_up(memory, block_alignment);
 327   assert(sizeof(Block) + pointer_delta(block_mem, memory, 1) <= size_needed,
 328          "allocated insufficient space for aligned block");
 329   return ::new (block_mem) Block(owner, memory);
 330 }


 578     LogStream ls(lt);
 579     if (is_full_bitmask(old_allocated)) {
 580       ls.print_cr("%s: block not full " PTR_FORMAT, owner->name(), p2i(block));
 581     }
 582     if (releasing == old_allocated) {
 583       ls.print_cr("%s: block empty " PTR_FORMAT, owner->name(), p2i(block));
 584     }
 585   }
 586 }
 587 
 588 void OopStorage::Block::release_entries(uintx releasing, OopStorage* owner) {
 589   assert(releasing != 0, "preconditon");
 590   // Prevent empty block deletion when transitioning to empty.
 591   Atomic::inc(&_release_refcount);
 592 
 593   // Atomically update allocated bitmask.
 594   uintx old_allocated = _allocated_bitmask;
 595   while (true) {
 596     assert((releasing & ~old_allocated) == 0, "releasing unallocated entries");
 597     uintx new_value = old_allocated ^ releasing;
 598     uintx fetched = Atomic::cmpxchg(&_allocated_bitmask, old_allocated, new_value);
 599     if (fetched == old_allocated) break; // Successful update.
 600     old_allocated = fetched;             // Retry with updated bitmask.
 601   }
 602 
 603   // Now that the bitmask has been updated, if we have a state transition
 604   // (updated bitmask is empty or old bitmask was full), atomically push
 605   // this block onto the deferred updates list.  Some future call to
 606   // reduce_deferred_updates will make any needed changes related to this
 607   // block and _allocation_list.  This deferral avoids _allocation_list
 608   // updates and the associated locking here.
 609   if ((releasing == old_allocated) || is_full_bitmask(old_allocated)) {
 610     // Log transitions.  Both transitions are possible in a single update.
 611     log_release_transitions(releasing, old_allocated, owner, this);
 612     // Attempt to claim responsibility for adding this block to the deferred
 613     // list, by setting the link to non-NULL by self-looping.  If this fails,
 614     // then someone else has made such a claim and the deferred update has not
 615     // yet been processed and will include our change, so we don't need to do
 616     // anything further.
 617     if (Atomic::replace_if_null(&_deferred_updates_next, this)) {
 618       // Successfully claimed.  Push, with self-loop for end-of-list.
 619       Block* head = owner->_deferred_updates;
 620       while (true) {
 621         _deferred_updates_next = (head == NULL) ? this : head;
 622         Block* fetched = Atomic::cmpxchg(&owner->_deferred_updates, head, this);
 623         if (fetched == head) break; // Successful update.
 624         head = fetched;             // Retry with updated head.
 625       }
 626       // Only request cleanup for to-empty transitions, not for from-full.
 627       // There isn't any rush to process from-full transitions.  Allocation
 628       // will reduce deferrals before allocating new blocks, so may process
 629       // some.  And the service thread will drain the entire deferred list
 630       // if there are any pending to-empty transitions.
 631       if (releasing == old_allocated) {
 632         owner->record_needs_cleanup();
 633       }
 634       log_trace(oopstorage, blocks)("%s: deferred update " PTR_FORMAT,
 635                                     owner->name(), p2i(this));
 636     }
 637   }
 638   // Release hold on empty block deletion.
 639   Atomic::dec(&_release_refcount);
 640 }
 641 
 642 // Process one available deferred update.  Returns true if one was processed.
 643 bool OopStorage::reduce_deferred_updates() {
 644   assert_lock_strong(_allocation_mutex);
 645   // Atomically pop a block off the list, if any available.
 646   // No ABA issue because this is only called by one thread at a time.
 647   // The atomicity is wrto pushes by release().
 648   Block* block = Atomic::load_acquire(&_deferred_updates);
 649   while (true) {
 650     if (block == NULL) return false;
 651     // Try atomic pop of block from list.
 652     Block* tail = block->deferred_updates_next();
 653     if (block == tail) tail = NULL; // Handle self-loop end marker.
 654     Block* fetched = Atomic::cmpxchg(&_deferred_updates, block, tail);
 655     if (fetched == block) break; // Update successful.
 656     block = fetched;             // Retry with updated block.
 657   }
 658   block->set_deferred_updates_next(NULL); // Clear tail after updating head.
 659   // Ensure bitmask read after pop is complete, including clearing tail, for
 660   // ordering with release().  Without this, we may be processing a stale
 661   // bitmask state here while blocking a release() operation from recording
 662   // the deferred update needed for its bitmask change.
 663   OrderAccess::fence();
 664   // Process popped block.
 665   uintx allocated = block->allocated_bitmask();
 666 
 667   // Make membership in list consistent with bitmask state.
 668   if ((_allocation_list.ctail() != NULL) &&
 669       ((_allocation_list.ctail() == block) ||
 670        (_allocation_list.next(*block) != NULL))) {
 671     // Block is in the _allocation_list.
 672     assert(!is_full_bitmask(allocated), "invariant");
 673   } else if (!is_full_bitmask(allocated)) {
 674     // Block is not in the _allocation_list, but now should be.


 707     Block* block = find_block_or_null(ptrs[i]);
 708     assert(block != NULL, "%s: invalid release " PTR_FORMAT, name(), p2i(ptrs[i]));
 709     log_trace(oopstorage, ref)("%s: released " PTR_FORMAT, name(), p2i(ptrs[i]));
 710     size_t count = 0;
 711     uintx releasing = 0;
 712     for ( ; i < size; ++i) {
 713       const oop* entry = ptrs[i];
 714       check_release_entry(entry);
 715       // If entry not in block, finish block and resume outer loop with entry.
 716       if (!block->contains(entry)) break;
 717       // Add entry to releasing bitmap.
 718       log_trace(oopstorage, ref)("%s: released " PTR_FORMAT, name(), p2i(entry));
 719       uintx entry_bitmask = block->bitmask_for_entry(entry);
 720       assert((releasing & entry_bitmask) == 0,
 721              "Duplicate entry: " PTR_FORMAT, p2i(entry));
 722       releasing |= entry_bitmask;
 723       ++count;
 724     }
 725     // Release the contiguous entries that are in block.
 726     block->release_entries(releasing, this);
 727     Atomic::sub(&_allocation_count, count);
 728   }
 729 }
 730 
 731 const size_t initial_active_array_size = 8;
 732 
 733 OopStorage::OopStorage(const char* name,
 734                        Mutex* allocation_mutex,
 735                        Mutex* active_mutex) :
 736   _name(os::strdup(name)),
 737   _active_array(ActiveArray::create(initial_active_array_size)),
 738   _allocation_list(),
 739   _deferred_updates(NULL),
 740   _allocation_mutex(allocation_mutex),
 741   _active_mutex(active_mutex),
 742   _allocation_count(0),
 743   _concurrent_iteration_count(0),
 744   _needs_cleanup(false)
 745 {
 746   _active_array->increment_refcount();
 747   assert(_active_mutex->rank() < _allocation_mutex->rank(),


 808 const jlong cleanup_trigger_defer_period = 500 * NANOSECS_PER_MILLISEC;
 809 
 810 void OopStorage::trigger_cleanup_if_needed() {
 811   MonitorLocker ml(Service_lock, Monitor::_no_safepoint_check_flag);
 812   if (Atomic::load(&needs_cleanup_requested) &&
 813       !needs_cleanup_triggered &&
 814       (os::javaTimeNanos() > cleanup_trigger_permit_time)) {
 815     needs_cleanup_triggered = true;
 816     ml.notify_all();
 817   }
 818 }
 819 
 820 bool OopStorage::has_cleanup_work_and_reset() {
 821   assert_lock_strong(Service_lock);
 822   cleanup_trigger_permit_time =
 823     os::javaTimeNanos() + cleanup_trigger_defer_period;
 824   needs_cleanup_triggered = false;
 825   // Set the request flag false and return its old value.
 826   // Needs to be atomic to avoid dropping a concurrent request.
 827   // Can't use Atomic::xchg, which may not support bool.
 828   return Atomic::cmpxchg(&needs_cleanup_requested, true, false);
 829 }
 830 
 831 // Record that cleanup is needed, without notifying the Service thread.
 832 // Used by release(), where we can't lock even Service_lock.
 833 void OopStorage::record_needs_cleanup() {
 834   // Set local flag first, else service thread could wake up and miss
 835   // the request.  This order may instead (rarely) unnecessarily notify.
 836   Atomic::release_store(&_needs_cleanup, true);
 837   Atomic::release_store_fence(&needs_cleanup_requested, true);
 838 }
 839 
 840 bool OopStorage::delete_empty_blocks() {
 841   // Service thread might have oopstorage work, but not for this object.
 842   // Check for deferred updates even though that's not a service thread
 843   // trigger; since we're here, we might as well process them.
 844   if (!Atomic::load_acquire(&_needs_cleanup) &&
 845       (Atomic::load_acquire(&_deferred_updates) == NULL)) {
 846     return false;
 847   }
 848 


 993 
 994 bool OopStorage::BasicParState::claim_next_segment(IterationData* data) {
 995   data->_processed += data->_segment_end - data->_segment_start;
 996   size_t start = Atomic::load_acquire(&_next_block);
 997   if (start >= _block_count) {
 998     return finish_iteration(data); // No more blocks available.
 999   }
1000   // Try to claim several at a time, but not *too* many.  We want to
1001   // avoid deciding there are many available and selecting a large
1002   // quantity, get delayed, and then end up claiming most or all of
1003   // the remaining largish amount of work, leaving nothing for other
1004   // threads to do.  But too small a step can lead to contention
1005   // over _next_block, esp. when the work per block is small.
1006   size_t max_step = 10;
1007   size_t remaining = _block_count - start;
1008   size_t step = MIN2(max_step, 1 + (remaining / _estimated_thread_count));
1009   // Atomic::add with possible overshoot.  This can perform better
1010   // than a CAS loop on some platforms when there is contention.
1011   // We can cope with the uncertainty by recomputing start/end from
1012   // the result of the add, and dealing with potential overshoot.
1013   size_t end = Atomic::add(&_next_block, step);
1014   // _next_block may have changed, so recompute start from result of add.
1015   start = end - step;
1016   // _next_block may have changed so much that end has overshot.
1017   end = MIN2(end, _block_count);
1018   // _next_block may have changed so much that even start has overshot.
1019   if (start < _block_count) {
1020     // Record claimed segment for iteration.
1021     data->_segment_start = start;
1022     data->_segment_end = end;
1023     return true;                // Success.
1024   } else {
1025     // No more blocks to claim.
1026     return finish_iteration(data);
1027   }
1028 }
1029 
1030 bool OopStorage::BasicParState::finish_iteration(const IterationData* data) const {
1031   log_info(oopstorage, blocks, stats)
1032           ("Parallel iteration on %s: blocks = " SIZE_FORMAT
1033            ", processed = " SIZE_FORMAT " (%2.f%%)",


< prev index next >