< prev index next >

src/share/vm/gc_implementation/shenandoah/shenandoahFreeSet.cpp

Print this page
rev 10674 : [backport] Move ShenandoahAllocType and ShenandoahAllocRequest to separate file
rev 10676 : [backport] Remove ShHeap::region_in_collection_set in favor of SHR::in_cset
rev 10690 : [backport] Cleanup header files and forward declarations
rev 10712 : [backport] Fix compilation errors due to missing spaces between string literal and macro
rev 10715 : [backport] Cleanup up superfluous newlines
rev 10772 : [backport] Update copyrights

@@ -1,7 +1,7 @@
 /*
- * Copyright (c) 2016, Red Hat, Inc. and/or its affiliates.
+ * Copyright (c) 2016, 2018, Red Hat, Inc. All rights reserved.
  *
  * This code is free software; you can redistribute it and/or modify it
  * under the terms of the GNU General Public License version 2 only, as
  * published by the Free Software Foundation.
  *

@@ -20,10 +20,11 @@
  * questions.
  *
  */
 
 #include "precompiled.hpp"
+
 #include "gc_implementation/shenandoah/shenandoahFreeSet.hpp"
 #include "gc_implementation/shenandoah/shenandoahHeap.inline.hpp"
 
 ShenandoahFreeSet::ShenandoahFreeSet(ShenandoahHeap* heap,size_t max_regions) :
         _heap(heap),

@@ -36,12 +37,12 @@
 
 void ShenandoahFreeSet::increase_used(size_t num_bytes) {
   assert_heaplock_owned_by_current_thread();
   _used += num_bytes;
 
-  assert(_used <= _capacity, err_msg("must not use more than we have: used: "SIZE_FORMAT
-                                     ", capacity: "SIZE_FORMAT", num_bytes: "SIZE_FORMAT,
+  assert(_used <= _capacity, err_msg("must not use more than we have: used: " SIZE_FORMAT
+                                     ", capacity: " SIZE_FORMAT ", num_bytes: " SIZE_FORMAT,
                                      _used, _capacity, num_bytes));
 }
 
 bool ShenandoahFreeSet::is_mutator_free(size_t idx) const {
   assert (idx < _max,

@@ -55,11 +56,11 @@
           err_msg("index is sane: " SIZE_FORMAT " < " SIZE_FORMAT " (left: " SIZE_FORMAT ", right: " SIZE_FORMAT ")",
                   idx, _max, _collector_leftmost, _collector_rightmost));
   return _collector_free_bitmap.at(idx);
 }
 
-HeapWord* ShenandoahFreeSet::allocate_single(ShenandoahHeap::ShenandoahAllocationRequest& req, bool& in_new_region) {
+HeapWord* ShenandoahFreeSet::allocate_single(ShenandoahAllocRequest& req, bool& in_new_region) {
   // Scan the bitmap looking for a first fit.
   //
   // Leftmost and rightmost bounds provide enough caching to walk bitmap efficiently. Normally,
   // we would find the region to allocate at right away.
   //

@@ -69,12 +70,12 @@
   //
   // Free set maintains mutator and collector views, and normally they allocate in their views only,
   // unless we special cases for stealing and mixed allocations.
 
   switch (req.type()) {
-    case ShenandoahHeap::_alloc_tlab:
-    case ShenandoahHeap::_alloc_shared: {
+    case ShenandoahAllocRequest::_alloc_tlab:
+    case ShenandoahAllocRequest::_alloc_shared: {
 
       // Try to allocate in the mutator view
       for (size_t idx = _mutator_leftmost; idx <= _mutator_rightmost; idx++) {
         if (is_mutator_free(idx)) {
           HeapWord* result = try_allocate_in(_heap->get_region(idx), req, in_new_region);

@@ -85,12 +86,12 @@
       }
 
       // There is no recovery. Mutator does not touch collector view at all.
       break;
     }
-    case ShenandoahHeap::_alloc_gclab:
-    case ShenandoahHeap::_alloc_shared_gc: {
+    case ShenandoahAllocRequest::_alloc_gclab:
+    case ShenandoahAllocRequest::_alloc_shared_gc: {
       // size_t is unsigned, need to dodge underflow when _leftmost = 0
 
       // Fast-path: try to allocate in the collector view first
       for (size_t c = _collector_rightmost + 1; c > _collector_leftmost; c--) {
         size_t idx = c - 1;

@@ -141,11 +142,11 @@
   }
 
   return NULL;
 }
 
-HeapWord* ShenandoahFreeSet::try_allocate_in(ShenandoahHeapRegion* r, ShenandoahHeap::ShenandoahAllocationRequest& req, bool& in_new_region) {
+HeapWord* ShenandoahFreeSet::try_allocate_in(ShenandoahHeapRegion* r, ShenandoahAllocRequest& req, bool& in_new_region) {
   assert (!has_no_alloc_capacity(r), err_msg("Performance: should avoid full regions on this path: " SIZE_FORMAT, r->region_number()));
 
   try_recycle_trashed(r);
 
   in_new_region = r->is_empty();

@@ -235,11 +236,11 @@
   while (_collector_rightmost > 0 && !is_collector_free(_collector_rightmost)) {
     _collector_rightmost--;
   }
 }
 
-HeapWord* ShenandoahFreeSet::allocate_contiguous(ShenandoahHeap::ShenandoahAllocationRequest& req) {
+HeapWord* ShenandoahFreeSet::allocate_contiguous(ShenandoahAllocRequest& req) {
   assert_heaplock_owned_by_current_thread();
 
   size_t words_size = req.size();
   size_t num = ShenandoahHeapRegion::required_regions(words_size * HeapWordSize);
 

@@ -403,11 +404,11 @@
   clear();
 
   for (size_t idx = 0; idx < _heap->num_regions(); idx++) {
     ShenandoahHeapRegion* region = _heap->get_region(idx);
     if (region->is_alloc_allowed() || region->is_trash()) {
-      assert(!region->in_collection_set(), "Shouldn't be adding those to the free set");
+      assert(!region->is_cset(), "Shouldn't be adding those to the free set");
 
       // Do not add regions that would surely fail allocation
       if (has_no_alloc_capacity(region)) continue;
 
       _capacity += alloc_capacity(region);

@@ -517,22 +518,22 @@
                   total_free / M, collector_count(), max / K);
     }
   }
 }
 
-HeapWord* ShenandoahFreeSet::allocate(ShenandoahHeap::ShenandoahAllocationRequest& req, bool& in_new_region) {
+HeapWord* ShenandoahFreeSet::allocate(ShenandoahAllocRequest& req, bool& in_new_region) {
   assert_heaplock_owned_by_current_thread();
   assert_bounds();
 
   if (req.size() > ShenandoahHeapRegion::humongous_threshold_words()) {
     switch (req.type()) {
-      case ShenandoahHeap::_alloc_shared:
-      case ShenandoahHeap::_alloc_shared_gc:
+      case ShenandoahAllocRequest::_alloc_shared:
+      case ShenandoahAllocRequest::_alloc_shared_gc:
         in_new_region = true;
         return allocate_contiguous(req);
-      case ShenandoahHeap::_alloc_gclab:
-      case ShenandoahHeap::_alloc_tlab:
+      case ShenandoahAllocRequest::_alloc_gclab:
+      case ShenandoahAllocRequest::_alloc_tlab:
         in_new_region = false;
         assert(false, err_msg("Trying to allocate TLAB larger than the humongous threshold: " SIZE_FORMAT " > " SIZE_FORMAT,
                               req.size(), ShenandoahHeapRegion::humongous_threshold_words()));
         return NULL;
       default:

@@ -578,12 +579,10 @@
 #ifdef ASSERT
 void ShenandoahFreeSet::assert_heaplock_owned_by_current_thread() const {
   _heap->assert_heaplock_owned_by_current_thread();
 }
 
-
-
 void ShenandoahFreeSet::assert_heaplock_not_owned_by_current_thread() const {
   _heap->assert_heaplock_not_owned_by_current_thread();
 }
 
 void ShenandoahFreeSet::assert_bounds() const {
< prev index next >