412 // Do some deferred update processing every time we allocate.
413 // Continue processing deferred updates if _allocation_list is empty,
414 // in the hope that we'll get a block from that, rather than
415 // allocating a new block.
416 while (reduce_deferred_updates() && (_allocation_list.head() == NULL)) {}
417
418 // Use the first block in _allocation_list for the allocation.
419 Block* block = _allocation_list.head();
420 if (block == NULL) {
421 // No available blocks; make a new one, and add to storage.
422 {
423 MutexUnlockerEx mul(_allocation_mutex, Mutex::_no_safepoint_check_flag);
424 block = Block::new_block(this);
425 }
426 if (block == NULL) {
427 while (_allocation_list.head() == NULL) {
428 if (!reduce_deferred_updates()) {
429 // Failed to make new block, no other thread made a block
430 // available while the mutex was released, and didn't get
431 // one from a deferred update either, so return failure.
432 log_info(oopstorage, ref)("%s: failed block allocation", name());
433 return NULL;
434 }
435 }
436 } else {
437 // Add new block to storage.
438 log_info(oopstorage, blocks)("%s: new block " PTR_FORMAT, name(), p2i(block));
439
440 // Add new block to the _active_array, growing if needed.
441 if (!_active_array->push(block)) {
442 if (expand_active_array()) {
443 guarantee(_active_array->push(block), "push failed after expansion");
444 } else {
445 log_info(oopstorage, blocks)("%s: failed active array expand", name());
446 Block::delete_block(*block);
447 return NULL;
448 }
449 }
450 // Add to end of _allocation_list. The mutex release allowed
451 // other threads to add blocks to the _allocation_list. We prefer
452 // to allocate from non-empty blocks, to allow empty blocks to
453 // be deleted.
454 _allocation_list.push_back(*block);
455 }
456 block = _allocation_list.head();
457 }
458 // Allocate from first block.
459 assert(block != NULL, "invariant");
460 assert(!block->is_full(), "invariant");
461 if (block->is_empty()) {
462 // Transitioning from empty to not empty.
463 log_debug(oopstorage, blocks)("%s: block not empty " PTR_FORMAT, name(), p2i(block));
464 }
465 oop* result = block->allocate();
466 assert(result != NULL, "allocation failed");
467 assert(!block->is_empty(), "postcondition");
468 Atomic::inc(&_allocation_count); // release updates outside lock.
469 if (block->is_full()) {
470 // Transitioning from not full to full.
471 // Remove full blocks from consideration by future allocates.
472 log_debug(oopstorage, blocks)("%s: block full " PTR_FORMAT, name(), p2i(block));
473 _allocation_list.unlink(*block);
474 }
475 log_info(oopstorage, ref)("%s: allocated " PTR_FORMAT, name(), p2i(result));
476 return result;
477 }
478
479 // Create a new, larger, active array with the same content as the
480 // current array, and then replace, relinquishing the old array.
481 // Return true if the array was successfully expanded, false to
482 // indicate allocation failure.
483 bool OopStorage::expand_active_array() {
484 assert_lock_strong(_allocation_mutex);
485 ActiveArray* old_array = _active_array;
486 size_t new_size = 2 * old_array->size();
487 log_info(oopstorage, blocks)("%s: expand active array " SIZE_FORMAT,
488 name(), new_size);
489 ActiveArray* new_array = ActiveArray::create(new_size, AllocFailStrategy::RETURN_NULL);
490 if (new_array == NULL) return false;
491 new_array->copy_from(old_array);
492 replace_active_array(new_array);
493 relinquish_block_array(old_array);
494 return true;
495 }
496
497 // Make new_array the _active_array. Increments new_array's refcount
498 // to account for the new reference. The assignment is atomic wrto
499 // obtain_active_array; once this function returns, it is safe for the
500 // caller to relinquish the old array.
501 void OopStorage::replace_active_array(ActiveArray* new_array) {
502 // Caller has the old array that is the current value of _active_array.
503 // Update new_array refcount to account for the new reference.
504 new_array->increment_refcount();
505 // Install new_array, ensuring its initialization is complete first.
506 OrderAccess::release_store(&_active_array, new_array);
507 // Wait for any readers that could read the old array from _active_array.
659 // Move empty block to end of list, for possible deletion.
660 if (is_empty_bitmask(allocated)) {
661 _allocation_list.unlink(*block);
662 _allocation_list.push_back(*block);
663 }
664
665 log_debug(oopstorage, blocks)("%s: processed deferred update " PTR_FORMAT,
666 name(), p2i(block));
667 return true; // Processed one pending update.
668 }
669
670 inline void check_release_entry(const oop* entry) {
671 assert(entry != NULL, "Releasing NULL");
672 assert(*entry == NULL, "Releasing uncleared entry: " PTR_FORMAT, p2i(entry));
673 }
674
675 void OopStorage::release(const oop* ptr) {
676 check_release_entry(ptr);
677 Block* block = find_block_or_null(ptr);
678 assert(block != NULL, "%s: invalid release " PTR_FORMAT, name(), p2i(ptr));
679 log_info(oopstorage, ref)("%s: released " PTR_FORMAT, name(), p2i(ptr));
680 block->release_entries(block->bitmask_for_entry(ptr), &_deferred_updates);
681 Atomic::dec(&_allocation_count);
682 }
683
684 void OopStorage::release(const oop* const* ptrs, size_t size) {
685 size_t i = 0;
686 while (i < size) {
687 check_release_entry(ptrs[i]);
688 Block* block = find_block_or_null(ptrs[i]);
689 assert(block != NULL, "%s: invalid release " PTR_FORMAT, name(), p2i(ptrs[i]));
690 log_info(oopstorage, ref)("%s: released " PTR_FORMAT, name(), p2i(ptrs[i]));
691 size_t count = 0;
692 uintx releasing = 0;
693 for ( ; i < size; ++i) {
694 const oop* entry = ptrs[i];
695 check_release_entry(entry);
696 // If entry not in block, finish block and resume outer loop with entry.
697 if (!block->contains(entry)) break;
698 // Add entry to releasing bitmap.
699 log_info(oopstorage, ref)("%s: released " PTR_FORMAT, name(), p2i(entry));
700 uintx entry_bitmask = block->bitmask_for_entry(entry);
701 assert((releasing & entry_bitmask) == 0,
702 "Duplicate entry: " PTR_FORMAT, p2i(entry));
703 releasing |= entry_bitmask;
704 ++count;
705 }
706 // Release the contiguous entries that are in block.
707 block->release_entries(releasing, &_deferred_updates);
708 Atomic::sub(count, &_allocation_count);
709 }
710 }
711
712 const char* dup_name(const char* name) {
713 char* dup = NEW_C_HEAP_ARRAY(char, strlen(name) + 1, mtGC);
714 strcpy(dup, name);
715 return dup;
716 }
717
718 const size_t initial_active_array_size = 8;
719
723 _name(dup_name(name)),
724 _active_array(ActiveArray::create(initial_active_array_size)),
725 _allocation_list(),
726 _deferred_updates(NULL),
727 _allocation_mutex(allocation_mutex),
728 _active_mutex(active_mutex),
729 _allocation_count(0),
730 _concurrent_iteration_active(false)
731 {
732 _active_array->increment_refcount();
733 assert(_active_mutex->rank() < _allocation_mutex->rank(),
734 "%s: active_mutex must have lower rank than allocation_mutex", _name);
735 assert(_active_mutex->_safepoint_check_required != Mutex::_safepoint_check_always,
736 "%s: active mutex requires safepoint check", _name);
737 assert(_allocation_mutex->_safepoint_check_required != Mutex::_safepoint_check_always,
738 "%s: allocation mutex requires safepoint check", _name);
739 }
740
741 void OopStorage::delete_empty_block(const Block& block) {
742 assert(block.is_empty(), "discarding non-empty block");
743 log_info(oopstorage, blocks)("%s: delete empty block " PTR_FORMAT, name(), p2i(&block));
744 Block::delete_block(block);
745 }
746
747 OopStorage::~OopStorage() {
748 Block* block;
749 while ((block = _deferred_updates) != NULL) {
750 _deferred_updates = block->deferred_updates_next();
751 block->set_deferred_updates_next(NULL);
752 }
753 while ((block = _allocation_list.head()) != NULL) {
754 _allocation_list.unlink(*block);
755 }
756 bool unreferenced = _active_array->decrement_refcount();
757 assert(unreferenced, "deleting storage while _active_array is referenced");
758 for (size_t i = _active_array->block_count(); 0 < i; ) {
759 block = _active_array->at(--i);
760 Block::delete_block(*block);
761 }
762 ActiveArray::destroy(_active_array);
763 FREE_C_HEAP_ARRAY(char, _name);
916 // We can cope with the uncertainty by recomputing start/end from
917 // the result of the add, and dealing with potential overshoot.
918 size_t end = Atomic::add(step, &_next_block);
919 // _next_block may have changed, so recompute start from result of add.
920 start = end - step;
921 // _next_block may have changed so much that end has overshot.
922 end = MIN2(end, _block_count);
923 // _next_block may have changed so much that even start has overshot.
924 if (start < _block_count) {
925 // Record claimed segment for iteration.
926 data->_segment_start = start;
927 data->_segment_end = end;
928 return true; // Success.
929 } else {
930 // No more blocks to claim.
931 return finish_iteration(data);
932 }
933 }
934
935 bool OopStorage::BasicParState::finish_iteration(const IterationData* data) const {
936 log_debug(oopstorage, blocks, stats)
937 ("Parallel iteration on %s: blocks = " SIZE_FORMAT
938 ", processed = " SIZE_FORMAT " (%2.f%%)",
939 _storage->name(), _block_count, data->_processed,
940 percent_of(data->_processed, _block_count));
941 return false;
942 }
943
944 const char* OopStorage::name() const { return _name; }
945
946 #ifndef PRODUCT
947
948 void OopStorage::print_on(outputStream* st) const {
949 size_t allocations = _allocation_count;
950 size_t blocks = _active_array->block_count();
951
952 double data_size = section_size * section_count;
953 double alloc_percentage = percent_of((double)allocations, blocks * data_size);
954
955 st->print("%s: " SIZE_FORMAT " entries in " SIZE_FORMAT " blocks (%.F%%), " SIZE_FORMAT " bytes",
956 name(), allocations, blocks, alloc_percentage, total_memory_usage());
|
412 // Do some deferred update processing every time we allocate.
413 // Continue processing deferred updates if _allocation_list is empty,
414 // in the hope that we'll get a block from that, rather than
415 // allocating a new block.
416 while (reduce_deferred_updates() && (_allocation_list.head() == NULL)) {}
417
418 // Use the first block in _allocation_list for the allocation.
419 Block* block = _allocation_list.head();
420 if (block == NULL) {
421 // No available blocks; make a new one, and add to storage.
422 {
423 MutexUnlockerEx mul(_allocation_mutex, Mutex::_no_safepoint_check_flag);
424 block = Block::new_block(this);
425 }
426 if (block == NULL) {
427 while (_allocation_list.head() == NULL) {
428 if (!reduce_deferred_updates()) {
429 // Failed to make new block, no other thread made a block
430 // available while the mutex was released, and didn't get
431 // one from a deferred update either, so return failure.
432 log_debug(oopstorage, blocks)("%s: failed block allocation", name());
433 return NULL;
434 }
435 }
436 } else {
437 // Add new block to storage.
438 log_debug(oopstorage, blocks)("%s: new block " PTR_FORMAT, name(), p2i(block));
439
440 // Add new block to the _active_array, growing if needed.
441 if (!_active_array->push(block)) {
442 if (expand_active_array()) {
443 guarantee(_active_array->push(block), "push failed after expansion");
444 } else {
445 log_debug(oopstorage, blocks)("%s: failed active array expand", name());
446 Block::delete_block(*block);
447 return NULL;
448 }
449 }
450 // Add to end of _allocation_list. The mutex release allowed
451 // other threads to add blocks to the _allocation_list. We prefer
452 // to allocate from non-empty blocks, to allow empty blocks to
453 // be deleted.
454 _allocation_list.push_back(*block);
455 }
456 block = _allocation_list.head();
457 }
458 // Allocate from first block.
459 assert(block != NULL, "invariant");
460 assert(!block->is_full(), "invariant");
461 if (block->is_empty()) {
462 // Transitioning from empty to not empty.
463 log_debug(oopstorage, blocks)("%s: block not empty " PTR_FORMAT, name(), p2i(block));
464 }
465 oop* result = block->allocate();
466 assert(result != NULL, "allocation failed");
467 assert(!block->is_empty(), "postcondition");
468 Atomic::inc(&_allocation_count); // release updates outside lock.
469 if (block->is_full()) {
470 // Transitioning from not full to full.
471 // Remove full blocks from consideration by future allocates.
472 log_debug(oopstorage, blocks)("%s: block full " PTR_FORMAT, name(), p2i(block));
473 _allocation_list.unlink(*block);
474 }
475 log_trace(oopstorage, ref)("%s: allocated " PTR_FORMAT, name(), p2i(result));
476 return result;
477 }
478
479 // Create a new, larger, active array with the same content as the
480 // current array, and then replace, relinquishing the old array.
481 // Return true if the array was successfully expanded, false to
482 // indicate allocation failure.
483 bool OopStorage::expand_active_array() {
484 assert_lock_strong(_allocation_mutex);
485 ActiveArray* old_array = _active_array;
486 size_t new_size = 2 * old_array->size();
487 log_debug(oopstorage, blocks)("%s: expand active array " SIZE_FORMAT,
488 name(), new_size);
489 ActiveArray* new_array = ActiveArray::create(new_size, AllocFailStrategy::RETURN_NULL);
490 if (new_array == NULL) return false;
491 new_array->copy_from(old_array);
492 replace_active_array(new_array);
493 relinquish_block_array(old_array);
494 return true;
495 }
496
497 // Make new_array the _active_array. Increments new_array's refcount
498 // to account for the new reference. The assignment is atomic wrto
499 // obtain_active_array; once this function returns, it is safe for the
500 // caller to relinquish the old array.
501 void OopStorage::replace_active_array(ActiveArray* new_array) {
502 // Caller has the old array that is the current value of _active_array.
503 // Update new_array refcount to account for the new reference.
504 new_array->increment_refcount();
505 // Install new_array, ensuring its initialization is complete first.
506 OrderAccess::release_store(&_active_array, new_array);
507 // Wait for any readers that could read the old array from _active_array.
659 // Move empty block to end of list, for possible deletion.
660 if (is_empty_bitmask(allocated)) {
661 _allocation_list.unlink(*block);
662 _allocation_list.push_back(*block);
663 }
664
665 log_debug(oopstorage, blocks)("%s: processed deferred update " PTR_FORMAT,
666 name(), p2i(block));
667 return true; // Processed one pending update.
668 }
669
670 inline void check_release_entry(const oop* entry) {
671 assert(entry != NULL, "Releasing NULL");
672 assert(*entry == NULL, "Releasing uncleared entry: " PTR_FORMAT, p2i(entry));
673 }
674
675 void OopStorage::release(const oop* ptr) {
676 check_release_entry(ptr);
677 Block* block = find_block_or_null(ptr);
678 assert(block != NULL, "%s: invalid release " PTR_FORMAT, name(), p2i(ptr));
679 log_trace(oopstorage, ref)("%s: released " PTR_FORMAT, name(), p2i(ptr));
680 block->release_entries(block->bitmask_for_entry(ptr), &_deferred_updates);
681 Atomic::dec(&_allocation_count);
682 }
683
684 void OopStorage::release(const oop* const* ptrs, size_t size) {
685 size_t i = 0;
686 while (i < size) {
687 check_release_entry(ptrs[i]);
688 Block* block = find_block_or_null(ptrs[i]);
689 assert(block != NULL, "%s: invalid release " PTR_FORMAT, name(), p2i(ptrs[i]));
690 log_trace(oopstorage, ref)("%s: released " PTR_FORMAT, name(), p2i(ptrs[i]));
691 size_t count = 0;
692 uintx releasing = 0;
693 for ( ; i < size; ++i) {
694 const oop* entry = ptrs[i];
695 check_release_entry(entry);
696 // If entry not in block, finish block and resume outer loop with entry.
697 if (!block->contains(entry)) break;
698 // Add entry to releasing bitmap.
699 log_trace(oopstorage, ref)("%s: released " PTR_FORMAT, name(), p2i(entry));
700 uintx entry_bitmask = block->bitmask_for_entry(entry);
701 assert((releasing & entry_bitmask) == 0,
702 "Duplicate entry: " PTR_FORMAT, p2i(entry));
703 releasing |= entry_bitmask;
704 ++count;
705 }
706 // Release the contiguous entries that are in block.
707 block->release_entries(releasing, &_deferred_updates);
708 Atomic::sub(count, &_allocation_count);
709 }
710 }
711
712 const char* dup_name(const char* name) {
713 char* dup = NEW_C_HEAP_ARRAY(char, strlen(name) + 1, mtGC);
714 strcpy(dup, name);
715 return dup;
716 }
717
718 const size_t initial_active_array_size = 8;
719
723 _name(dup_name(name)),
724 _active_array(ActiveArray::create(initial_active_array_size)),
725 _allocation_list(),
726 _deferred_updates(NULL),
727 _allocation_mutex(allocation_mutex),
728 _active_mutex(active_mutex),
729 _allocation_count(0),
730 _concurrent_iteration_active(false)
731 {
732 _active_array->increment_refcount();
733 assert(_active_mutex->rank() < _allocation_mutex->rank(),
734 "%s: active_mutex must have lower rank than allocation_mutex", _name);
735 assert(_active_mutex->_safepoint_check_required != Mutex::_safepoint_check_always,
736 "%s: active mutex requires safepoint check", _name);
737 assert(_allocation_mutex->_safepoint_check_required != Mutex::_safepoint_check_always,
738 "%s: allocation mutex requires safepoint check", _name);
739 }
740
741 void OopStorage::delete_empty_block(const Block& block) {
742 assert(block.is_empty(), "discarding non-empty block");
743 log_debug(oopstorage, blocks)("%s: delete empty block " PTR_FORMAT, name(), p2i(&block));
744 Block::delete_block(block);
745 }
746
747 OopStorage::~OopStorage() {
748 Block* block;
749 while ((block = _deferred_updates) != NULL) {
750 _deferred_updates = block->deferred_updates_next();
751 block->set_deferred_updates_next(NULL);
752 }
753 while ((block = _allocation_list.head()) != NULL) {
754 _allocation_list.unlink(*block);
755 }
756 bool unreferenced = _active_array->decrement_refcount();
757 assert(unreferenced, "deleting storage while _active_array is referenced");
758 for (size_t i = _active_array->block_count(); 0 < i; ) {
759 block = _active_array->at(--i);
760 Block::delete_block(*block);
761 }
762 ActiveArray::destroy(_active_array);
763 FREE_C_HEAP_ARRAY(char, _name);
916 // We can cope with the uncertainty by recomputing start/end from
917 // the result of the add, and dealing with potential overshoot.
918 size_t end = Atomic::add(step, &_next_block);
919 // _next_block may have changed, so recompute start from result of add.
920 start = end - step;
921 // _next_block may have changed so much that end has overshot.
922 end = MIN2(end, _block_count);
923 // _next_block may have changed so much that even start has overshot.
924 if (start < _block_count) {
925 // Record claimed segment for iteration.
926 data->_segment_start = start;
927 data->_segment_end = end;
928 return true; // Success.
929 } else {
930 // No more blocks to claim.
931 return finish_iteration(data);
932 }
933 }
934
935 bool OopStorage::BasicParState::finish_iteration(const IterationData* data) const {
936 log_info(oopstorage, blocks, stats)
937 ("Parallel iteration on %s: blocks = " SIZE_FORMAT
938 ", processed = " SIZE_FORMAT " (%2.f%%)",
939 _storage->name(), _block_count, data->_processed,
940 percent_of(data->_processed, _block_count));
941 return false;
942 }
943
944 const char* OopStorage::name() const { return _name; }
945
946 #ifndef PRODUCT
947
948 void OopStorage::print_on(outputStream* st) const {
949 size_t allocations = _allocation_count;
950 size_t blocks = _active_array->block_count();
951
952 double data_size = section_size * section_count;
953 double alloc_percentage = percent_of((double)allocations, blocks * data_size);
954
955 st->print("%s: " SIZE_FORMAT " entries in " SIZE_FORMAT " blocks (%.F%%), " SIZE_FORMAT " bytes",
956 name(), allocations, blocks, alloc_percentage, total_memory_usage());
|