603 size_t ShenandoahHeap::used() const {
604 return Atomic::load_acquire(&_used);
605 }
606
607 size_t ShenandoahHeap::committed() const {
608 OrderAccess::acquire();
609 return _committed;
610 }
611
612 void ShenandoahHeap::increase_committed(size_t bytes) {
613 assert_heaplock_or_safepoint();
614 _committed += bytes;
615 }
616
617 void ShenandoahHeap::decrease_committed(size_t bytes) {
618 assert_heaplock_or_safepoint();
619 _committed -= bytes;
620 }
621
622 void ShenandoahHeap::increase_used(size_t bytes) {
623 Atomic::add(bytes, &_used);
624 }
625
626 void ShenandoahHeap::set_used(size_t bytes) {
627 Atomic::release_store_fence(&_used, bytes);
628 }
629
630 void ShenandoahHeap::decrease_used(size_t bytes) {
631 assert(used() >= bytes, "never decrease heap size by more than we've left");
632 Atomic::sub(bytes, &_used);
633 }
634
635 void ShenandoahHeap::increase_allocated(size_t bytes) {
636 Atomic::add(bytes, &_bytes_allocated_since_gc_start);
637 }
638
639 void ShenandoahHeap::notify_mutator_alloc_words(size_t words, bool waste) {
640 size_t bytes = words * HeapWordSize;
641 if (!waste) {
642 increase_used(bytes);
643 }
644 increase_allocated(bytes);
645 if (ShenandoahPacing) {
646 control_thread()->pacing_notify_alloc(words);
647 if (waste) {
648 pacer()->claim_for_alloc(words, true);
649 }
650 }
651 }
652
653 size_t ShenandoahHeap::capacity() const {
654 return committed();
655 }
656
1333
1334 class ShenandoahParallelHeapRegionTask : public AbstractGangTask {
1335 private:
1336 ShenandoahHeap* const _heap;
1337 ShenandoahHeapRegionClosure* const _blk;
1338
1339 DEFINE_PAD_MINUS_SIZE(0, DEFAULT_CACHE_LINE_SIZE, sizeof(volatile size_t));
1340 volatile size_t _index;
1341 DEFINE_PAD_MINUS_SIZE(1, DEFAULT_CACHE_LINE_SIZE, 0);
1342
1343 public:
1344 ShenandoahParallelHeapRegionTask(ShenandoahHeapRegionClosure* blk) :
1345 AbstractGangTask("Parallel Region Task"),
1346 _heap(ShenandoahHeap::heap()), _blk(blk), _index(0) {}
1347
1348 void work(uint worker_id) {
1349 size_t stride = ShenandoahParallelRegionStride;
1350
1351 size_t max = _heap->num_regions();
1352 while (_index < max) {
1353 size_t cur = Atomic::add(stride, &_index) - stride;
1354 size_t start = cur;
1355 size_t end = MIN2(cur + stride, max);
1356 if (start >= max) break;
1357
1358 for (size_t i = cur; i < end; i++) {
1359 ShenandoahHeapRegion* current = _heap->get_region(i);
1360 _blk->heap_region_do(current);
1361 }
1362 }
1363 }
1364 };
1365
1366 void ShenandoahHeap::parallel_heap_region_iterate(ShenandoahHeapRegionClosure* blk) const {
1367 assert(blk->is_thread_safe(), "Only thread-safe closures here");
1368 if (num_regions() > ShenandoahParallelRegionStride) {
1369 ShenandoahParallelHeapRegionTask task(blk);
1370 workers()->run_task(&task);
1371 } else {
1372 heap_region_iterate(blk);
1373 }
|
603 size_t ShenandoahHeap::used() const {
604 return Atomic::load_acquire(&_used);
605 }
606
607 size_t ShenandoahHeap::committed() const {
608 OrderAccess::acquire();
609 return _committed;
610 }
611
612 void ShenandoahHeap::increase_committed(size_t bytes) {
613 assert_heaplock_or_safepoint();
614 _committed += bytes;
615 }
616
617 void ShenandoahHeap::decrease_committed(size_t bytes) {
618 assert_heaplock_or_safepoint();
619 _committed -= bytes;
620 }
621
622 void ShenandoahHeap::increase_used(size_t bytes) {
623 Atomic::add(&_used, bytes);
624 }
625
626 void ShenandoahHeap::set_used(size_t bytes) {
627 Atomic::release_store_fence(&_used, bytes);
628 }
629
630 void ShenandoahHeap::decrease_used(size_t bytes) {
631 assert(used() >= bytes, "never decrease heap size by more than we've left");
632 Atomic::sub(&_used, bytes);
633 }
634
635 void ShenandoahHeap::increase_allocated(size_t bytes) {
636 Atomic::add(&_bytes_allocated_since_gc_start, bytes);
637 }
638
639 void ShenandoahHeap::notify_mutator_alloc_words(size_t words, bool waste) {
640 size_t bytes = words * HeapWordSize;
641 if (!waste) {
642 increase_used(bytes);
643 }
644 increase_allocated(bytes);
645 if (ShenandoahPacing) {
646 control_thread()->pacing_notify_alloc(words);
647 if (waste) {
648 pacer()->claim_for_alloc(words, true);
649 }
650 }
651 }
652
653 size_t ShenandoahHeap::capacity() const {
654 return committed();
655 }
656
1333
1334 class ShenandoahParallelHeapRegionTask : public AbstractGangTask {
1335 private:
1336 ShenandoahHeap* const _heap;
1337 ShenandoahHeapRegionClosure* const _blk;
1338
1339 DEFINE_PAD_MINUS_SIZE(0, DEFAULT_CACHE_LINE_SIZE, sizeof(volatile size_t));
1340 volatile size_t _index;
1341 DEFINE_PAD_MINUS_SIZE(1, DEFAULT_CACHE_LINE_SIZE, 0);
1342
1343 public:
1344 ShenandoahParallelHeapRegionTask(ShenandoahHeapRegionClosure* blk) :
1345 AbstractGangTask("Parallel Region Task"),
1346 _heap(ShenandoahHeap::heap()), _blk(blk), _index(0) {}
1347
1348 void work(uint worker_id) {
1349 size_t stride = ShenandoahParallelRegionStride;
1350
1351 size_t max = _heap->num_regions();
1352 while (_index < max) {
1353 size_t cur = Atomic::add(&_index, stride) - stride;
1354 size_t start = cur;
1355 size_t end = MIN2(cur + stride, max);
1356 if (start >= max) break;
1357
1358 for (size_t i = cur; i < end; i++) {
1359 ShenandoahHeapRegion* current = _heap->get_region(i);
1360 _blk->heap_region_do(current);
1361 }
1362 }
1363 }
1364 };
1365
1366 void ShenandoahHeap::parallel_heap_region_iterate(ShenandoahHeapRegionClosure* blk) const {
1367 assert(blk->is_thread_safe(), "Only thread-safe closures here");
1368 if (num_regions() > ShenandoahParallelRegionStride) {
1369 ShenandoahParallelHeapRegionTask task(blk);
1370 workers()->run_task(&task);
1371 } else {
1372 heap_region_iterate(blk);
1373 }
|