< prev index next >

src/share/vm/gc/g1/heapRegionManager.cpp

Print this page
rev 12309 : [mq]: 8169703-crash-with-alwayspretouch


 269   }
 270   num_regions = cur - *res_idx;
 271 #ifdef ASSERT
 272   for (uint i = *res_idx; i < (*res_idx + num_regions); i++) {
 273     assert(!is_available(i), "just checking");
 274   }
 275   assert(cur == max_length() || num_regions == 0 || is_available(cur),
 276          "The region at the current position %u must be available or at the end of the heap.", cur);
 277 #endif
 278   return num_regions;
 279 }
 280 
 281 uint HeapRegionManager::find_highest_free(bool* expanded) {
 282   // Loop downwards from the highest region index, looking for an
 283   // entry which is either free or not yet committed.  If not yet
 284   // committed, expand_at that index.
 285   uint curr = max_length() - 1;
 286   while (true) {
 287     HeapRegion *hr = _regions.get_by_index(curr);
 288     if (hr == NULL) {
 289       uint res = expand_at(curr, 1);
 290       if (res == 1) {
 291         *expanded = true;
 292         return curr;
 293       }
 294     } else {
 295       if (hr->is_free()) {
 296         *expanded = false;
 297         return curr;
 298       }
 299     }
 300     if (curr == 0) {
 301       return G1_NO_HRM_INDEX;
 302     }
 303     curr--;
 304   }
 305 }
 306 
 307 bool HeapRegionManager::allocate_containing_regions(MemRegion range, size_t* commit_count) {
 308   size_t commits = 0;
 309   uint start_index = (uint)_regions.get_index_by_address(range.start());
 310   uint last_index = (uint)_regions.get_index_by_address(range.last());
 311 
 312   // Ensure that each G1 region in the range is free, returning false if not.
 313   // Commit those that are not yet available, and keep count.
 314   for (uint curr_index = start_index; curr_index <= last_index; curr_index++) {
 315     if (!is_available(curr_index)) {
 316       commits++;
 317       expand_at(curr_index, 1);
 318     }
 319     HeapRegion* curr_region  = _regions.get_by_index(curr_index);
 320     if (!curr_region->is_free()) {
 321       return false;
 322     }
 323   }
 324 
 325   allocate_free_regions_starting_at(start_index, (last_index - start_index) + 1);
 326   *commit_count = commits;
 327   return true;
 328 }
 329 
 330 void HeapRegionManager::par_iterate(HeapRegionClosure* blk, uint worker_id, HeapRegionClaimer* hrclaimer, bool concurrent) const {
 331   const uint start_index = hrclaimer->start_region_for_worker(worker_id);
 332 
 333   // Every worker will actually look at all regions, skipping over regions that
 334   // are currently not committed.
 335   // This also (potentially) iterates over regions newly allocated during GC. This
 336   // is no problem except for some extra work.
 337   const uint n_regions = hrclaimer->n_regions();




 269   }
 270   num_regions = cur - *res_idx;
 271 #ifdef ASSERT
 272   for (uint i = *res_idx; i < (*res_idx + num_regions); i++) {
 273     assert(!is_available(i), "just checking");
 274   }
 275   assert(cur == max_length() || num_regions == 0 || is_available(cur),
 276          "The region at the current position %u must be available or at the end of the heap.", cur);
 277 #endif
 278   return num_regions;
 279 }
 280 
 281 uint HeapRegionManager::find_highest_free(bool* expanded) {
 282   // Loop downwards from the highest region index, looking for an
 283   // entry which is either free or not yet committed.  If not yet
 284   // committed, expand_at that index.
 285   uint curr = max_length() - 1;
 286   while (true) {
 287     HeapRegion *hr = _regions.get_by_index(curr);
 288     if (hr == NULL) {
 289       uint res = expand_at(curr, 1, NULL);
 290       if (res == 1) {
 291         *expanded = true;
 292         return curr;
 293       }
 294     } else {
 295       if (hr->is_free()) {
 296         *expanded = false;
 297         return curr;
 298       }
 299     }
 300     if (curr == 0) {
 301       return G1_NO_HRM_INDEX;
 302     }
 303     curr--;
 304   }
 305 }
 306 
 307 bool HeapRegionManager::allocate_containing_regions(MemRegion range, size_t* commit_count, WorkGang* pretouch_workers) {
 308   size_t commits = 0;
 309   uint start_index = (uint)_regions.get_index_by_address(range.start());
 310   uint last_index = (uint)_regions.get_index_by_address(range.last());
 311 
 312   // Ensure that each G1 region in the range is free, returning false if not.
 313   // Commit those that are not yet available, and keep count.
 314   for (uint curr_index = start_index; curr_index <= last_index; curr_index++) {
 315     if (!is_available(curr_index)) {
 316       commits++;
 317       expand_at(curr_index, 1, pretouch_workers);
 318     }
 319     HeapRegion* curr_region  = _regions.get_by_index(curr_index);
 320     if (!curr_region->is_free()) {
 321       return false;
 322     }
 323   }
 324 
 325   allocate_free_regions_starting_at(start_index, (last_index - start_index) + 1);
 326   *commit_count = commits;
 327   return true;
 328 }
 329 
 330 void HeapRegionManager::par_iterate(HeapRegionClosure* blk, uint worker_id, HeapRegionClaimer* hrclaimer, bool concurrent) const {
 331   const uint start_index = hrclaimer->start_region_for_worker(worker_id);
 332 
 333   // Every worker will actually look at all regions, skipping over regions that
 334   // are currently not committed.
 335   // This also (potentially) iterates over regions newly allocated during GC. This
 336   // is no problem except for some extra work.
 337   const uint n_regions = hrclaimer->n_regions();


< prev index next >