< prev index next >

src/hotspot/share/gc/shared/oopStorage.cpp

Print this page
rev 50278 : [mq]: fix


  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "gc/shared/oopStorage.inline.hpp"
  27 #include "gc/shared/oopStorageParState.inline.hpp"
  28 #include "logging/log.hpp"
  29 #include "logging/logStream.hpp"
  30 #include "memory/allocation.inline.hpp"
  31 #include "runtime/atomic.hpp"
  32 #include "runtime/globals.hpp"
  33 #include "runtime/handles.inline.hpp"
  34 #include "runtime/mutex.hpp"
  35 #include "runtime/mutexLocker.hpp"
  36 #include "runtime/orderAccess.inline.hpp"
  37 #include "runtime/safepoint.hpp"
  38 #include "runtime/stubRoutines.hpp"
  39 #include "runtime/thread.hpp"
  40 #include "utilities/align.hpp"
  41 #include "utilities/count_trailing_zeros.hpp"
  42 #include "utilities/debug.hpp"

  43 #include "utilities/globalDefinitions.hpp"
  44 #include "utilities/macros.hpp"
  45 #include "utilities/ostream.hpp"
  46 #include "utilities/spinYield.hpp"
  47 
  48 OopStorage::AllocateEntry::AllocateEntry() : _prev(NULL), _next(NULL) {}
  49 
  50 OopStorage::AllocateEntry::~AllocateEntry() {
  51   assert(_prev == NULL, "deleting attached block");
  52   assert(_next == NULL, "deleting attached block");
  53 }
  54 
  55 OopStorage::AllocateList::AllocateList(const AllocateEntry& (*get_entry)(const Block& block)) :
  56   _head(NULL), _tail(NULL), _get_entry(get_entry)
  57 {}
  58 
  59 OopStorage::AllocateList::~AllocateList() {
  60   // ~OopStorage() empties its lists before destroying them.
  61   assert(_head == NULL, "deleting non-empty block list");
  62   assert(_tail == NULL, "deleting non-empty block list");


 484 }
 485 
 486 // Create a new, larger, active array with the same content as the
 487 // current array, and then replace, relinquishing the old array.
 488 // Return true if the array was successfully expanded, false to
 489 // indicate allocation failure.
 490 bool OopStorage::expand_active_array() {
 491   assert_lock_strong(_allocate_mutex);
 492   ActiveArray* old_array = _active_array;
 493   size_t new_size = 2 * old_array->size();
 494   log_info(oopstorage, blocks)("%s: expand active array " SIZE_FORMAT,
 495                                name(), new_size);
 496   ActiveArray* new_array = ActiveArray::create(new_size, AllocFailStrategy::RETURN_NULL);
 497   if (new_array == NULL) return false;
 498   new_array->copy_from(old_array);
 499   replace_active_array(new_array);
 500   relinquish_block_array(old_array);
 501   return true;
 502 }
 503 
 504 OopStorage::ProtectActive::ProtectActive() : _enter(0), _exit() {}
 505 
 506 // Begin read-side critical section.
 507 uint OopStorage::ProtectActive::read_enter() {
 508   return Atomic::add(2u, &_enter);
 509 }
 510 
 511 // End read-side critical section.
 512 void OopStorage::ProtectActive::read_exit(uint enter_value) {
 513   Atomic::add(2u, &_exit[enter_value & 1]);
 514 }
 515 
 516 // Wait until all readers that entered the critical section before
 517 // synchronization have exited that critical section.
 518 void OopStorage::ProtectActive::write_synchronize() {
 519   SpinYield spinner;
 520   // Determine old and new exit counters, based on bit0 of the
 521   // on-entry _enter counter.
 522   uint value = OrderAccess::load_acquire(&_enter);
 523   volatile uint* new_ptr = &_exit[(value + 1) & 1];
 524   // Atomically change the in-use exit counter to the new counter, by
 525   // adding 1 to the _enter counter (flipping bit0 between 0 and 1)
 526   // and initializing the new exit counter to that enter value.  Note:
 527   // The new exit counter is not being used by read operations until
 528   // this change succeeds.
 529   uint old;
 530   do {
 531     old = value;
 532     *new_ptr = ++value;
 533     value = Atomic::cmpxchg(value, &_enter, old);
 534   } while (old != value);
 535   // Readers that entered the critical section before we changed the
 536   // selected exit counter will use the old exit counter.  Readers
 537   // entering after the change will use the new exit counter.  Wait
 538   // for all the critical sections started before the change to
 539   // complete, e.g. for the value of old_ptr to catch up with old.
 540   volatile uint* old_ptr = &_exit[old & 1];
 541   while (old != OrderAccess::load_acquire(old_ptr)) {
 542     spinner.wait();
 543   }
 544 }
 545 
 546 // Make new_array the _active_array.  Increments new_array's refcount
 547 // to account for the new reference.  The assignment is atomic wrto
 548 // obtain_active_array; once this function returns, it is safe for the
 549 // caller to relinquish the old array.
 550 void OopStorage::replace_active_array(ActiveArray* new_array) {
 551   // Caller has the old array that is the current value of _active_array.
 552   // Update new_array refcount to account for the new reference.
 553   new_array->increment_refcount();
 554   // Install new_array, ensuring its initialization is complete first.
 555   OrderAccess::release_store(&_active_array, new_array);
 556   // Wait for any readers that could read the old array from _active_array.
 557   _protect_active.write_synchronize();
 558   // All obtain critical sections that could see the old array have
 559   // completed, having incremented the refcount of the old array.  The
 560   // caller can now safely relinquish the old array.
 561 }
 562 
 563 // Atomically (wrto replace_active_array) get the active array and
 564 // increment its refcount.  This provides safe access to the array,
 565 // even if an allocate operation expands and replaces the value of
 566 // _active_array.  The caller must relinquish the array when done
 567 // using it.
 568 OopStorage::ActiveArray* OopStorage::obtain_active_array() const {
 569   uint enter_value = _protect_active.read_enter();
 570   ActiveArray* result = OrderAccess::load_acquire(&_active_array);
 571   result->increment_refcount();
 572   _protect_active.read_exit(enter_value);
 573   return result;
 574 }
 575 
 576 // Decrement refcount of array and destroy if refcount is zero.
 577 void OopStorage::relinquish_block_array(ActiveArray* array) const {
 578   if (array->decrement_refcount()) {
 579     assert(array != _active_array, "invariant");
 580     ActiveArray::destroy(array);
 581   }
 582 }
 583 
 584 class OopStorage::WithActiveArray : public StackObj {
 585   const OopStorage* _storage;
 586   ActiveArray* _active_array;
 587 
 588 public:
 589   WithActiveArray(const OopStorage* storage) :
 590     _storage(storage),
 591     _active_array(storage->obtain_active_array())
 592   {}




  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "gc/shared/oopStorage.inline.hpp"
  27 #include "gc/shared/oopStorageParState.inline.hpp"
  28 #include "logging/log.hpp"
  29 #include "logging/logStream.hpp"
  30 #include "memory/allocation.inline.hpp"
  31 #include "runtime/atomic.hpp"
  32 #include "runtime/globals.hpp"
  33 #include "runtime/handles.inline.hpp"
  34 #include "runtime/mutex.hpp"
  35 #include "runtime/mutexLocker.hpp"
  36 #include "runtime/orderAccess.inline.hpp"
  37 #include "runtime/safepoint.hpp"
  38 #include "runtime/stubRoutines.hpp"
  39 #include "runtime/thread.hpp"
  40 #include "utilities/align.hpp"
  41 #include "utilities/count_trailing_zeros.hpp"
  42 #include "utilities/debug.hpp"
  43 #include "utilities/globalCounter.inline.hpp"
  44 #include "utilities/globalDefinitions.hpp"
  45 #include "utilities/macros.hpp"
  46 #include "utilities/ostream.hpp"
  47 #include "utilities/spinYield.hpp"
  48 
  49 OopStorage::AllocateEntry::AllocateEntry() : _prev(NULL), _next(NULL) {}
  50 
  51 OopStorage::AllocateEntry::~AllocateEntry() {
  52   assert(_prev == NULL, "deleting attached block");
  53   assert(_next == NULL, "deleting attached block");
  54 }
  55 
  56 OopStorage::AllocateList::AllocateList(const AllocateEntry& (*get_entry)(const Block& block)) :
  57   _head(NULL), _tail(NULL), _get_entry(get_entry)
  58 {}
  59 
  60 OopStorage::AllocateList::~AllocateList() {
  61   // ~OopStorage() empties its lists before destroying them.
  62   assert(_head == NULL, "deleting non-empty block list");
  63   assert(_tail == NULL, "deleting non-empty block list");


 485 }
 486 
 487 // Create a new, larger, active array with the same content as the
 488 // current array, and then replace, relinquishing the old array.
 489 // Return true if the array was successfully expanded, false to
 490 // indicate allocation failure.
 491 bool OopStorage::expand_active_array() {
 492   assert_lock_strong(_allocate_mutex);
 493   ActiveArray* old_array = _active_array;
 494   size_t new_size = 2 * old_array->size();
 495   log_info(oopstorage, blocks)("%s: expand active array " SIZE_FORMAT,
 496                                name(), new_size);
 497   ActiveArray* new_array = ActiveArray::create(new_size, AllocFailStrategy::RETURN_NULL);
 498   if (new_array == NULL) return false;
 499   new_array->copy_from(old_array);
 500   replace_active_array(new_array);
 501   relinquish_block_array(old_array);
 502   return true;
 503 }
 504 










































 505 // Make new_array the _active_array.  Increments new_array's refcount
 506 // to account for the new reference.  The assignment is atomic wrto
 507 // obtain_active_array; once this function returns, it is safe for the
 508 // caller to relinquish the old array.
 509 void OopStorage::replace_active_array(ActiveArray* new_array) {
 510   // Caller has the old array that is the current value of _active_array.
 511   // Update new_array refcount to account for the new reference.
 512   new_array->increment_refcount();
 513   // Install new_array, ensuring its initialization is complete first.
 514   OrderAccess::release_store(&_active_array, new_array);
 515   // Wait for any readers that could read the old array from _active_array.
 516   GlobalCounter::write_synchronize();
 517   // All obtain_actuve_array critical sections that could see the old array
 518   // have completed, having incremented the refcount of the old array.  The
 519   // caller can now safely relinquish the old array.
 520 }
 521 
 522 // Atomically (wrto replace_active_array) get the active array and
 523 // increment its refcount.  This provides safe access to the array,
 524 // even if an allocate operation expands and replaces the value of
 525 // _active_array.  The caller must relinquish the array when done
 526 // using it.
 527 OopStorage::ActiveArray* OopStorage::obtain_active_array() const {
 528   GlobalCounter::CriticalSection cs(Thread::current());
 529   ActiveArray* result = OrderAccess::load_acquire(&_active_array);
 530   result->increment_refcount();

 531   return result;
 532 }
 533 
 534 // Decrement refcount of array and destroy if refcount is zero.
 535 void OopStorage::relinquish_block_array(ActiveArray* array) const {
 536   if (array->decrement_refcount()) {
 537     assert(array != _active_array, "invariant");
 538     ActiveArray::destroy(array);
 539   }
 540 }
 541 
 542 class OopStorage::WithActiveArray : public StackObj {
 543   const OopStorage* _storage;
 544   ActiveArray* _active_array;
 545 
 546 public:
 547   WithActiveArray(const OopStorage* storage) :
 548     _storage(storage),
 549     _active_array(storage->obtain_active_array())
 550   {}


< prev index next >