< prev index next >

src/hotspot/share/gc/shared/oopStorage.cpp

Print this page




 290 }
 291 
 292 size_t OopStorage::Block::active_index_safe(const Block* block) {
 293   STATIC_ASSERT(sizeof(intptr_t) == sizeof(block->_active_index));
 294   assert(CanUseSafeFetchN(), "precondition");
 295   return SafeFetchN((intptr_t*)&block->_active_index, 0);
 296 }
 297 
 298 unsigned OopStorage::Block::get_index(const oop* ptr) const {
 299   assert(contains(ptr), PTR_FORMAT " not in block " PTR_FORMAT, p2i(ptr), p2i(this));
 300   return static_cast<unsigned>(ptr - get_pointer(0));
 301 }
 302 
 303 oop* OopStorage::Block::allocate() {
 304   // Use CAS loop because release may change bitmask outside of lock.
 305   uintx allocated = allocated_bitmask();
 306   while (true) {
 307     assert(!is_full_bitmask(allocated), "attempt to allocate from full block");
 308     unsigned index = count_trailing_zeros(~allocated);
 309     uintx new_value = allocated | bitmask_for_index(index);
 310     uintx fetched = Atomic::cmpxchg(new_value, &_allocated_bitmask, allocated);
 311     if (fetched == allocated) {
 312       return get_pointer(index); // CAS succeeded; return entry for index.
 313     }
 314     allocated = fetched;       // CAS failed; retry with latest value.
 315   }
 316 }
 317 
 318 OopStorage::Block* OopStorage::Block::new_block(const OopStorage* owner) {
 319   // _data must be first member: aligning block => aligning _data.
 320   STATIC_ASSERT(_data_pos == 0);
 321   size_t size_needed = allocation_size();
 322   void* memory = NEW_C_HEAP_ARRAY_RETURN_NULL(char, size_needed, mtGC);
 323   if (memory == NULL) {
 324     return NULL;
 325   }
 326   void* block_mem = align_up(memory, block_alignment);
 327   assert(sizeof(Block) + pointer_delta(block_mem, memory, 1) <= size_needed,
 328          "allocated insufficient space for aligned block");
 329   return ::new (block_mem) Block(owner, memory);
 330 }


 578     LogStream ls(lt);
 579     if (is_full_bitmask(old_allocated)) {
 580       ls.print_cr("%s: block not full " PTR_FORMAT, owner->name(), p2i(block));
 581     }
 582     if (releasing == old_allocated) {
 583       ls.print_cr("%s: block empty " PTR_FORMAT, owner->name(), p2i(block));
 584     }
 585   }
 586 }
 587 
 588 void OopStorage::Block::release_entries(uintx releasing, OopStorage* owner) {
 589   assert(releasing != 0, "preconditon");
 590   // Prevent empty block deletion when transitioning to empty.
 591   Atomic::inc(&_release_refcount);
 592 
 593   // Atomically update allocated bitmask.
 594   uintx old_allocated = _allocated_bitmask;
 595   while (true) {
 596     assert((releasing & ~old_allocated) == 0, "releasing unallocated entries");
 597     uintx new_value = old_allocated ^ releasing;
 598     uintx fetched = Atomic::cmpxchg(new_value, &_allocated_bitmask, old_allocated);
 599     if (fetched == old_allocated) break; // Successful update.
 600     old_allocated = fetched;             // Retry with updated bitmask.
 601   }
 602 
 603   // Now that the bitmask has been updated, if we have a state transition
 604   // (updated bitmask is empty or old bitmask was full), atomically push
 605   // this block onto the deferred updates list.  Some future call to
 606   // reduce_deferred_updates will make any needed changes related to this
 607   // block and _allocation_list.  This deferral avoids _allocation_list
 608   // updates and the associated locking here.
 609   if ((releasing == old_allocated) || is_full_bitmask(old_allocated)) {
 610     // Log transitions.  Both transitions are possible in a single update.
 611     log_release_transitions(releasing, old_allocated, owner, this);
 612     // Attempt to claim responsibility for adding this block to the deferred
 613     // list, by setting the link to non-NULL by self-looping.  If this fails,
 614     // then someone else has made such a claim and the deferred update has not
 615     // yet been processed and will include our change, so we don't need to do
 616     // anything further.
 617     if (Atomic::replace_if_null(this, &_deferred_updates_next)) {
 618       // Successfully claimed.  Push, with self-loop for end-of-list.
 619       Block* head = owner->_deferred_updates;
 620       while (true) {
 621         _deferred_updates_next = (head == NULL) ? this : head;
 622         Block* fetched = Atomic::cmpxchg(this, &owner->_deferred_updates, head);
 623         if (fetched == head) break; // Successful update.
 624         head = fetched;             // Retry with updated head.
 625       }
 626       // Only request cleanup for to-empty transitions, not for from-full.
 627       // There isn't any rush to process from-full transitions.  Allocation
 628       // will reduce deferrals before allocating new blocks, so may process
 629       // some.  And the service thread will drain the entire deferred list
 630       // if there are any pending to-empty transitions.
 631       if (releasing == old_allocated) {
 632         owner->record_needs_cleanup();
 633       }
 634       log_trace(oopstorage, blocks)("%s: deferred update " PTR_FORMAT,
 635                                     owner->name(), p2i(this));
 636     }
 637   }
 638   // Release hold on empty block deletion.
 639   Atomic::dec(&_release_refcount);
 640 }
 641 
 642 // Process one available deferred update.  Returns true if one was processed.
 643 bool OopStorage::reduce_deferred_updates() {
 644   assert_lock_strong(_allocation_mutex);
 645   // Atomically pop a block off the list, if any available.
 646   // No ABA issue because this is only called by one thread at a time.
 647   // The atomicity is wrto pushes by release().
 648   Block* block = Atomic::load_acquire(&_deferred_updates);
 649   while (true) {
 650     if (block == NULL) return false;
 651     // Try atomic pop of block from list.
 652     Block* tail = block->deferred_updates_next();
 653     if (block == tail) tail = NULL; // Handle self-loop end marker.
 654     Block* fetched = Atomic::cmpxchg(tail, &_deferred_updates, block);
 655     if (fetched == block) break; // Update successful.
 656     block = fetched;             // Retry with updated block.
 657   }
 658   block->set_deferred_updates_next(NULL); // Clear tail after updating head.
 659   // Ensure bitmask read after pop is complete, including clearing tail, for
 660   // ordering with release().  Without this, we may be processing a stale
 661   // bitmask state here while blocking a release() operation from recording
 662   // the deferred update needed for its bitmask change.
 663   OrderAccess::fence();
 664   // Process popped block.
 665   uintx allocated = block->allocated_bitmask();
 666 
 667   // Make membership in list consistent with bitmask state.
 668   if ((_allocation_list.ctail() != NULL) &&
 669       ((_allocation_list.ctail() == block) ||
 670        (_allocation_list.next(*block) != NULL))) {
 671     // Block is in the _allocation_list.
 672     assert(!is_full_bitmask(allocated), "invariant");
 673   } else if (!is_full_bitmask(allocated)) {
 674     // Block is not in the _allocation_list, but now should be.


 808 const jlong cleanup_trigger_defer_period = 500 * NANOSECS_PER_MILLISEC;
 809 
 810 void OopStorage::trigger_cleanup_if_needed() {
 811   MonitorLocker ml(Service_lock, Monitor::_no_safepoint_check_flag);
 812   if (Atomic::load(&needs_cleanup_requested) &&
 813       !needs_cleanup_triggered &&
 814       (os::javaTimeNanos() > cleanup_trigger_permit_time)) {
 815     needs_cleanup_triggered = true;
 816     ml.notify_all();
 817   }
 818 }
 819 
 820 bool OopStorage::has_cleanup_work_and_reset() {
 821   assert_lock_strong(Service_lock);
 822   cleanup_trigger_permit_time =
 823     os::javaTimeNanos() + cleanup_trigger_defer_period;
 824   needs_cleanup_triggered = false;
 825   // Set the request flag false and return its old value.
 826   // Needs to be atomic to avoid dropping a concurrent request.
 827   // Can't use Atomic::xchg, which may not support bool.
 828   return Atomic::cmpxchg(false, &needs_cleanup_requested, true);
 829 }
 830 
 831 // Record that cleanup is needed, without notifying the Service thread.
 832 // Used by release(), where we can't lock even Service_lock.
 833 void OopStorage::record_needs_cleanup() {
 834   // Set local flag first, else service thread could wake up and miss
 835   // the request.  This order may instead (rarely) unnecessarily notify.
 836   Atomic::release_store(&_needs_cleanup, true);
 837   Atomic::release_store_fence(&needs_cleanup_requested, true);
 838 }
 839 
 840 bool OopStorage::delete_empty_blocks() {
 841   // Service thread might have oopstorage work, but not for this object.
 842   // Check for deferred updates even though that's not a service thread
 843   // trigger; since we're here, we might as well process them.
 844   if (!Atomic::load_acquire(&_needs_cleanup) &&
 845       (Atomic::load_acquire(&_deferred_updates) == NULL)) {
 846     return false;
 847   }
 848 




 290 }
 291 
 292 size_t OopStorage::Block::active_index_safe(const Block* block) {
 293   STATIC_ASSERT(sizeof(intptr_t) == sizeof(block->_active_index));
 294   assert(CanUseSafeFetchN(), "precondition");
 295   return SafeFetchN((intptr_t*)&block->_active_index, 0);
 296 }
 297 
 298 unsigned OopStorage::Block::get_index(const oop* ptr) const {
 299   assert(contains(ptr), PTR_FORMAT " not in block " PTR_FORMAT, p2i(ptr), p2i(this));
 300   return static_cast<unsigned>(ptr - get_pointer(0));
 301 }
 302 
 303 oop* OopStorage::Block::allocate() {
 304   // Use CAS loop because release may change bitmask outside of lock.
 305   uintx allocated = allocated_bitmask();
 306   while (true) {
 307     assert(!is_full_bitmask(allocated), "attempt to allocate from full block");
 308     unsigned index = count_trailing_zeros(~allocated);
 309     uintx new_value = allocated | bitmask_for_index(index);
 310     uintx fetched = Atomic::cmpxchg(&_allocated_bitmask, allocated, new_value);
 311     if (fetched == allocated) {
 312       return get_pointer(index); // CAS succeeded; return entry for index.
 313     }
 314     allocated = fetched;       // CAS failed; retry with latest value.
 315   }
 316 }
 317 
 318 OopStorage::Block* OopStorage::Block::new_block(const OopStorage* owner) {
 319   // _data must be first member: aligning block => aligning _data.
 320   STATIC_ASSERT(_data_pos == 0);
 321   size_t size_needed = allocation_size();
 322   void* memory = NEW_C_HEAP_ARRAY_RETURN_NULL(char, size_needed, mtGC);
 323   if (memory == NULL) {
 324     return NULL;
 325   }
 326   void* block_mem = align_up(memory, block_alignment);
 327   assert(sizeof(Block) + pointer_delta(block_mem, memory, 1) <= size_needed,
 328          "allocated insufficient space for aligned block");
 329   return ::new (block_mem) Block(owner, memory);
 330 }


 578     LogStream ls(lt);
 579     if (is_full_bitmask(old_allocated)) {
 580       ls.print_cr("%s: block not full " PTR_FORMAT, owner->name(), p2i(block));
 581     }
 582     if (releasing == old_allocated) {
 583       ls.print_cr("%s: block empty " PTR_FORMAT, owner->name(), p2i(block));
 584     }
 585   }
 586 }
 587 
 588 void OopStorage::Block::release_entries(uintx releasing, OopStorage* owner) {
 589   assert(releasing != 0, "preconditon");
 590   // Prevent empty block deletion when transitioning to empty.
 591   Atomic::inc(&_release_refcount);
 592 
 593   // Atomically update allocated bitmask.
 594   uintx old_allocated = _allocated_bitmask;
 595   while (true) {
 596     assert((releasing & ~old_allocated) == 0, "releasing unallocated entries");
 597     uintx new_value = old_allocated ^ releasing;
 598     uintx fetched = Atomic::cmpxchg(&_allocated_bitmask, old_allocated, new_value);
 599     if (fetched == old_allocated) break; // Successful update.
 600     old_allocated = fetched;             // Retry with updated bitmask.
 601   }
 602 
 603   // Now that the bitmask has been updated, if we have a state transition
 604   // (updated bitmask is empty or old bitmask was full), atomically push
 605   // this block onto the deferred updates list.  Some future call to
 606   // reduce_deferred_updates will make any needed changes related to this
 607   // block and _allocation_list.  This deferral avoids _allocation_list
 608   // updates and the associated locking here.
 609   if ((releasing == old_allocated) || is_full_bitmask(old_allocated)) {
 610     // Log transitions.  Both transitions are possible in a single update.
 611     log_release_transitions(releasing, old_allocated, owner, this);
 612     // Attempt to claim responsibility for adding this block to the deferred
 613     // list, by setting the link to non-NULL by self-looping.  If this fails,
 614     // then someone else has made such a claim and the deferred update has not
 615     // yet been processed and will include our change, so we don't need to do
 616     // anything further.
 617     if (Atomic::replace_if_null(&_deferred_updates_next, this)) {
 618       // Successfully claimed.  Push, with self-loop for end-of-list.
 619       Block* head = owner->_deferred_updates;
 620       while (true) {
 621         _deferred_updates_next = (head == NULL) ? this : head;
 622         Block* fetched = Atomic::cmpxchg(&owner->_deferred_updates, head, this);
 623         if (fetched == head) break; // Successful update.
 624         head = fetched;             // Retry with updated head.
 625       }
 626       // Only request cleanup for to-empty transitions, not for from-full.
 627       // There isn't any rush to process from-full transitions.  Allocation
 628       // will reduce deferrals before allocating new blocks, so may process
 629       // some.  And the service thread will drain the entire deferred list
 630       // if there are any pending to-empty transitions.
 631       if (releasing == old_allocated) {
 632         owner->record_needs_cleanup();
 633       }
 634       log_trace(oopstorage, blocks)("%s: deferred update " PTR_FORMAT,
 635                                     owner->name(), p2i(this));
 636     }
 637   }
 638   // Release hold on empty block deletion.
 639   Atomic::dec(&_release_refcount);
 640 }
 641 
 642 // Process one available deferred update.  Returns true if one was processed.
 643 bool OopStorage::reduce_deferred_updates() {
 644   assert_lock_strong(_allocation_mutex);
 645   // Atomically pop a block off the list, if any available.
 646   // No ABA issue because this is only called by one thread at a time.
 647   // The atomicity is wrto pushes by release().
 648   Block* block = Atomic::load_acquire(&_deferred_updates);
 649   while (true) {
 650     if (block == NULL) return false;
 651     // Try atomic pop of block from list.
 652     Block* tail = block->deferred_updates_next();
 653     if (block == tail) tail = NULL; // Handle self-loop end marker.
 654     Block* fetched = Atomic::cmpxchg(&_deferred_updates, block, tail);
 655     if (fetched == block) break; // Update successful.
 656     block = fetched;             // Retry with updated block.
 657   }
 658   block->set_deferred_updates_next(NULL); // Clear tail after updating head.
 659   // Ensure bitmask read after pop is complete, including clearing tail, for
 660   // ordering with release().  Without this, we may be processing a stale
 661   // bitmask state here while blocking a release() operation from recording
 662   // the deferred update needed for its bitmask change.
 663   OrderAccess::fence();
 664   // Process popped block.
 665   uintx allocated = block->allocated_bitmask();
 666 
 667   // Make membership in list consistent with bitmask state.
 668   if ((_allocation_list.ctail() != NULL) &&
 669       ((_allocation_list.ctail() == block) ||
 670        (_allocation_list.next(*block) != NULL))) {
 671     // Block is in the _allocation_list.
 672     assert(!is_full_bitmask(allocated), "invariant");
 673   } else if (!is_full_bitmask(allocated)) {
 674     // Block is not in the _allocation_list, but now should be.


 808 const jlong cleanup_trigger_defer_period = 500 * NANOSECS_PER_MILLISEC;
 809 
 810 void OopStorage::trigger_cleanup_if_needed() {
 811   MonitorLocker ml(Service_lock, Monitor::_no_safepoint_check_flag);
 812   if (Atomic::load(&needs_cleanup_requested) &&
 813       !needs_cleanup_triggered &&
 814       (os::javaTimeNanos() > cleanup_trigger_permit_time)) {
 815     needs_cleanup_triggered = true;
 816     ml.notify_all();
 817   }
 818 }
 819 
 820 bool OopStorage::has_cleanup_work_and_reset() {
 821   assert_lock_strong(Service_lock);
 822   cleanup_trigger_permit_time =
 823     os::javaTimeNanos() + cleanup_trigger_defer_period;
 824   needs_cleanup_triggered = false;
 825   // Set the request flag false and return its old value.
 826   // Needs to be atomic to avoid dropping a concurrent request.
 827   // Can't use Atomic::xchg, which may not support bool.
 828   return Atomic::cmpxchg(&needs_cleanup_requested, true, false);
 829 }
 830 
 831 // Record that cleanup is needed, without notifying the Service thread.
 832 // Used by release(), where we can't lock even Service_lock.
 833 void OopStorage::record_needs_cleanup() {
 834   // Set local flag first, else service thread could wake up and miss
 835   // the request.  This order may instead (rarely) unnecessarily notify.
 836   Atomic::release_store(&_needs_cleanup, true);
 837   Atomic::release_store_fence(&needs_cleanup_requested, true);
 838 }
 839 
 840 bool OopStorage::delete_empty_blocks() {
 841   // Service thread might have oopstorage work, but not for this object.
 842   // Check for deferred updates even though that's not a service thread
 843   // trigger; since we're here, we might as well process them.
 844   if (!Atomic::load_acquire(&_needs_cleanup) &&
 845       (Atomic::load_acquire(&_deferred_updates) == NULL)) {
 846     return false;
 847   }
 848 


< prev index next >