< prev index next >

src/share/vm/gc/g1/g1ConcurrentMark.cpp

Print this page
rev 13100 : imported patch 8182169-arrayallocator-should-take-memflag-parameter


 127 void G1CMBitMap::clear_range(MemRegion mr) {
 128   mr.intersection(MemRegion(_bmStartWord, _bmWordSize));
 129   assert(!mr.is_empty(), "unexpected empty region");
 130   // convert address range into offset range
 131   _bm.at_put_range(heapWordToOffset(mr.start()),
 132                    heapWordToOffset(mr.end()), false);
 133 }
 134 
 135 G1CMMarkStack::G1CMMarkStack() :
 136   _max_chunk_capacity(0),
 137   _base(NULL),
 138   _chunk_capacity(0) {
 139   set_empty();
 140 }
 141 
 142 bool G1CMMarkStack::resize(size_t new_capacity) {
 143   assert(is_empty(), "Only resize when stack is empty.");
 144   assert(new_capacity <= _max_chunk_capacity,
 145          "Trying to resize stack to " SIZE_FORMAT " chunks when the maximum is " SIZE_FORMAT, new_capacity, _max_chunk_capacity);
 146 
 147   TaskQueueEntryChunk* new_base = MmapArrayAllocator<TaskQueueEntryChunk, mtGC>::allocate_or_null(new_capacity);
 148 
 149   if (new_base == NULL) {
 150     log_warning(gc)("Failed to reserve memory for new overflow mark stack with " SIZE_FORMAT " chunks and size " SIZE_FORMAT "B.", new_capacity, new_capacity * sizeof(TaskQueueEntryChunk));
 151     return false;
 152   }
 153   // Release old mapping.
 154   if (_base != NULL) {
 155     MmapArrayAllocator<TaskQueueEntryChunk, mtGC>::free(_base, _chunk_capacity);
 156   }
 157 
 158   _base = new_base;
 159   _chunk_capacity = new_capacity;
 160   set_empty();
 161 
 162   return true;
 163 }
 164 
 165 size_t G1CMMarkStack::capacity_alignment() {
 166   return (size_t)lcm(os::vm_allocation_granularity(), sizeof(TaskQueueEntryChunk)) / sizeof(G1TaskQueueEntry);
 167 }
 168 
 169 bool G1CMMarkStack::initialize(size_t initial_capacity, size_t max_capacity) {
 170   guarantee(_max_chunk_capacity == 0, "G1CMMarkStack already initialized.");
 171 
 172   size_t const TaskEntryChunkSizeInVoidStar = sizeof(TaskQueueEntryChunk) / sizeof(G1TaskQueueEntry);
 173 
 174   _max_chunk_capacity = (size_t)align_size_up(max_capacity, capacity_alignment()) / TaskEntryChunkSizeInVoidStar;
 175   size_t initial_chunk_capacity = (size_t)align_size_up(initial_capacity, capacity_alignment()) / TaskEntryChunkSizeInVoidStar;


 188 void G1CMMarkStack::expand() {
 189   if (_chunk_capacity == _max_chunk_capacity) {
 190     log_debug(gc)("Can not expand overflow mark stack further, already at maximum capacity of " SIZE_FORMAT " chunks.", _chunk_capacity);
 191     return;
 192   }
 193   size_t old_capacity = _chunk_capacity;
 194   // Double capacity if possible
 195   size_t new_capacity = MIN2(old_capacity * 2, _max_chunk_capacity);
 196 
 197   if (resize(new_capacity)) {
 198     log_debug(gc)("Expanded mark stack capacity from " SIZE_FORMAT " to " SIZE_FORMAT " chunks",
 199                   old_capacity, new_capacity);
 200   } else {
 201     log_warning(gc)("Failed to expand mark stack capacity from " SIZE_FORMAT " to " SIZE_FORMAT " chunks",
 202                     old_capacity, new_capacity);
 203   }
 204 }
 205 
 206 G1CMMarkStack::~G1CMMarkStack() {
 207   if (_base != NULL) {
 208     MmapArrayAllocator<TaskQueueEntryChunk, mtGC>::free(_base, _chunk_capacity);
 209   }
 210 }
 211 
 212 void G1CMMarkStack::add_chunk_to_list(TaskQueueEntryChunk* volatile* list, TaskQueueEntryChunk* elem) {
 213   elem->next = *list;
 214   *list = elem;
 215 }
 216 
 217 void G1CMMarkStack::add_chunk_to_chunk_list(TaskQueueEntryChunk* elem) {
 218   MutexLockerEx x(MarkStackChunkList_lock, Mutex::_no_safepoint_check_flag);
 219   add_chunk_to_list(&_chunk_list, elem);
 220   _chunks_in_chunk_list++;
 221 }
 222 
 223 void G1CMMarkStack::add_chunk_to_free_list(TaskQueueEntryChunk* elem) {
 224   MutexLockerEx x(MarkStackFreeList_lock, Mutex::_no_safepoint_check_flag);
 225   add_chunk_to_list(&_free_list, elem);
 226 }
 227 
 228 G1CMMarkStack::TaskQueueEntryChunk* G1CMMarkStack::remove_chunk_from_list(TaskQueueEntryChunk* volatile* list) {




 127 void G1CMBitMap::clear_range(MemRegion mr) {
 128   mr.intersection(MemRegion(_bmStartWord, _bmWordSize));
 129   assert(!mr.is_empty(), "unexpected empty region");
 130   // convert address range into offset range
 131   _bm.at_put_range(heapWordToOffset(mr.start()),
 132                    heapWordToOffset(mr.end()), false);
 133 }
 134 
 135 G1CMMarkStack::G1CMMarkStack() :
 136   _max_chunk_capacity(0),
 137   _base(NULL),
 138   _chunk_capacity(0) {
 139   set_empty();
 140 }
 141 
 142 bool G1CMMarkStack::resize(size_t new_capacity) {
 143   assert(is_empty(), "Only resize when stack is empty.");
 144   assert(new_capacity <= _max_chunk_capacity,
 145          "Trying to resize stack to " SIZE_FORMAT " chunks when the maximum is " SIZE_FORMAT, new_capacity, _max_chunk_capacity);
 146 
 147   TaskQueueEntryChunk* new_base = MmapArrayAllocator<TaskQueueEntryChunk>::allocate_or_null(new_capacity, mtGC);
 148 
 149   if (new_base == NULL) {
 150     log_warning(gc)("Failed to reserve memory for new overflow mark stack with " SIZE_FORMAT " chunks and size " SIZE_FORMAT "B.", new_capacity, new_capacity * sizeof(TaskQueueEntryChunk));
 151     return false;
 152   }
 153   // Release old mapping.
 154   if (_base != NULL) {
 155     MmapArrayAllocator<TaskQueueEntryChunk>::free(_base, _chunk_capacity);
 156   }
 157 
 158   _base = new_base;
 159   _chunk_capacity = new_capacity;
 160   set_empty();
 161 
 162   return true;
 163 }
 164 
 165 size_t G1CMMarkStack::capacity_alignment() {
 166   return (size_t)lcm(os::vm_allocation_granularity(), sizeof(TaskQueueEntryChunk)) / sizeof(G1TaskQueueEntry);
 167 }
 168 
 169 bool G1CMMarkStack::initialize(size_t initial_capacity, size_t max_capacity) {
 170   guarantee(_max_chunk_capacity == 0, "G1CMMarkStack already initialized.");
 171 
 172   size_t const TaskEntryChunkSizeInVoidStar = sizeof(TaskQueueEntryChunk) / sizeof(G1TaskQueueEntry);
 173 
 174   _max_chunk_capacity = (size_t)align_size_up(max_capacity, capacity_alignment()) / TaskEntryChunkSizeInVoidStar;
 175   size_t initial_chunk_capacity = (size_t)align_size_up(initial_capacity, capacity_alignment()) / TaskEntryChunkSizeInVoidStar;


 188 void G1CMMarkStack::expand() {
 189   if (_chunk_capacity == _max_chunk_capacity) {
 190     log_debug(gc)("Can not expand overflow mark stack further, already at maximum capacity of " SIZE_FORMAT " chunks.", _chunk_capacity);
 191     return;
 192   }
 193   size_t old_capacity = _chunk_capacity;
 194   // Double capacity if possible
 195   size_t new_capacity = MIN2(old_capacity * 2, _max_chunk_capacity);
 196 
 197   if (resize(new_capacity)) {
 198     log_debug(gc)("Expanded mark stack capacity from " SIZE_FORMAT " to " SIZE_FORMAT " chunks",
 199                   old_capacity, new_capacity);
 200   } else {
 201     log_warning(gc)("Failed to expand mark stack capacity from " SIZE_FORMAT " to " SIZE_FORMAT " chunks",
 202                     old_capacity, new_capacity);
 203   }
 204 }
 205 
 206 G1CMMarkStack::~G1CMMarkStack() {
 207   if (_base != NULL) {
 208     MmapArrayAllocator<TaskQueueEntryChunk>::free(_base, _chunk_capacity);
 209   }
 210 }
 211 
 212 void G1CMMarkStack::add_chunk_to_list(TaskQueueEntryChunk* volatile* list, TaskQueueEntryChunk* elem) {
 213   elem->next = *list;
 214   *list = elem;
 215 }
 216 
 217 void G1CMMarkStack::add_chunk_to_chunk_list(TaskQueueEntryChunk* elem) {
 218   MutexLockerEx x(MarkStackChunkList_lock, Mutex::_no_safepoint_check_flag);
 219   add_chunk_to_list(&_chunk_list, elem);
 220   _chunks_in_chunk_list++;
 221 }
 222 
 223 void G1CMMarkStack::add_chunk_to_free_list(TaskQueueEntryChunk* elem) {
 224   MutexLockerEx x(MarkStackFreeList_lock, Mutex::_no_safepoint_check_flag);
 225   add_chunk_to_list(&_free_list, elem);
 226 }
 227 
 228 G1CMMarkStack::TaskQueueEntryChunk* G1CMMarkStack::remove_chunk_from_list(TaskQueueEntryChunk* volatile* list) {


< prev index next >