src/share/vm/gc/g1/heapRegionManager.cpp
Index Unified diffs Context diffs Sdiffs Wdiffs Patch New Old Previous File Next File hotspot Sdiff src/share/vm/gc/g1

src/share/vm/gc/g1/heapRegionManager.cpp

Print this page




 225 HeapRegion* HeapRegionManager::next_region_in_heap(const HeapRegion* r) const {
 226   guarantee(r != NULL, "Start region must be a valid region");
 227   guarantee(is_available(r->hrm_index()), "Trying to iterate starting from region %u which is not in the heap", r->hrm_index());
 228   for (uint i = r->hrm_index() + 1; i < _allocated_heapregions_length; i++) {
 229     HeapRegion* hr = _regions.get_by_index(i);
 230     if (is_available(i)) {
 231       return hr;
 232     }
 233   }
 234   return NULL;
 235 }
 236 
 237 void HeapRegionManager::iterate(HeapRegionClosure* blk) const {
 238   uint len = max_length();
 239 
 240   for (uint i = 0; i < len; i++) {
 241     if (!is_available(i)) {
 242       continue;
 243     }
 244     guarantee(at(i) != NULL, "Tried to access region %u that has a NULL HeapRegion*", i);
 245     bool res = blk->doHeapRegion(at(i));












 246     if (res) {
 247       blk->incomplete();
 248       return;
 249     }
 250   }
 251 }
 252 
 253 uint HeapRegionManager::find_unavailable_from_idx(uint start_idx, uint* res_idx) const {
 254   guarantee(res_idx != NULL, "checking");
 255   guarantee(start_idx <= (max_length() + 1), "checking");
 256 
 257   uint num_regions = 0;
 258 
 259   uint cur = start_idx;
 260   while (cur < max_length() && is_available(cur)) {
 261     cur++;
 262   }
 263   if (cur == max_length()) {
 264     return num_regions;
 265   }


 338   for (uint count = 0; count < n_regions; count++) {
 339     const uint index = (start_index + count) % n_regions;
 340     assert(index < n_regions, "sanity");
 341     // Skip over unavailable regions
 342     if (!is_available(index)) {
 343       continue;
 344     }
 345     HeapRegion* r = _regions.get_by_index(index);
 346     // We'll ignore regions already claimed.
 347     // However, if the iteration is specified as concurrent, the values for
 348     // is_starts_humongous and is_continues_humongous can not be trusted,
 349     // and we should just blindly iterate over regions regardless of their
 350     // humongous status.
 351     if (hrclaimer->is_region_claimed(index)) {
 352       continue;
 353     }
 354     // OK, try to claim it
 355     if (!hrclaimer->claim_region(index)) {
 356       continue;
 357     }
 358     bool res = blk->doHeapRegion(r);
































 359     if (res) {
 360       return;
 361     }
 362   }
 363 }
 364 
 365 uint HeapRegionManager::shrink_by(uint num_regions_to_remove) {
 366   assert(length() > 0, "the region sequence should not be empty");
 367   assert(length() <= _allocated_heapregions_length, "invariant");
 368   assert(_allocated_heapregions_length > 0, "we should have at least one region committed");
 369   assert(num_regions_to_remove < length(), "We should never remove all regions");
 370 
 371   if (num_regions_to_remove == 0) {
 372     return 0;
 373   }
 374 
 375   uint removed = 0;
 376   uint cur = _allocated_heapregions_length - 1;
 377   uint idx_last_found = 0;
 378   uint num_last_found = 0;




 225 HeapRegion* HeapRegionManager::next_region_in_heap(const HeapRegion* r) const {
 226   guarantee(r != NULL, "Start region must be a valid region");
 227   guarantee(is_available(r->hrm_index()), "Trying to iterate starting from region %u which is not in the heap", r->hrm_index());
 228   for (uint i = r->hrm_index() + 1; i < _allocated_heapregions_length; i++) {
 229     HeapRegion* hr = _regions.get_by_index(i);
 230     if (is_available(i)) {
 231       return hr;
 232     }
 233   }
 234   return NULL;
 235 }
 236 
 237 void HeapRegionManager::iterate(HeapRegionClosure* blk) const {
 238   uint len = max_length();
 239 
 240   for (uint i = 0; i < len; i++) {
 241     if (!is_available(i)) {
 242       continue;
 243     }
 244     guarantee(at(i) != NULL, "Tried to access region %u that has a NULL HeapRegion*", i);
 245     blk->doHeapRegion(at(i));
 246   }
 247 }
 248 
 249 void HeapRegionManager::iterate(AbortableHeapRegionClosure* blk) const {
 250   uint len = max_length();
 251 
 252   for (uint i = 0; i < len; i++) {
 253     if (!is_available(i)) {
 254       continue;
 255     }
 256     guarantee(at(i) != NULL, "Tried to access region %u that has a NULL HeapRegion*", i);
 257     bool res = blk->doHeapRegionAbortable(at(i));
 258     if (res) {
 259       blk->incomplete();
 260       return;
 261     }
 262   }
 263 }
 264 
 265 uint HeapRegionManager::find_unavailable_from_idx(uint start_idx, uint* res_idx) const {
 266   guarantee(res_idx != NULL, "checking");
 267   guarantee(start_idx <= (max_length() + 1), "checking");
 268 
 269   uint num_regions = 0;
 270 
 271   uint cur = start_idx;
 272   while (cur < max_length() && is_available(cur)) {
 273     cur++;
 274   }
 275   if (cur == max_length()) {
 276     return num_regions;
 277   }


 350   for (uint count = 0; count < n_regions; count++) {
 351     const uint index = (start_index + count) % n_regions;
 352     assert(index < n_regions, "sanity");
 353     // Skip over unavailable regions
 354     if (!is_available(index)) {
 355       continue;
 356     }
 357     HeapRegion* r = _regions.get_by_index(index);
 358     // We'll ignore regions already claimed.
 359     // However, if the iteration is specified as concurrent, the values for
 360     // is_starts_humongous and is_continues_humongous can not be trusted,
 361     // and we should just blindly iterate over regions regardless of their
 362     // humongous status.
 363     if (hrclaimer->is_region_claimed(index)) {
 364       continue;
 365     }
 366     // OK, try to claim it
 367     if (!hrclaimer->claim_region(index)) {
 368       continue;
 369     }
 370     blk->doHeapRegion(r);
 371   }
 372 }
 373 
 374 void HeapRegionManager::par_iterate(AbortableHeapRegionClosure* blk, uint worker_id, HeapRegionClaimer* hrclaimer, bool concurrent) const {
 375   const uint start_index = hrclaimer->start_region_for_worker(worker_id);
 376 
 377   // Every worker will actually look at all regions, skipping over regions that
 378   // are currently not committed.
 379   // This also (potentially) iterates over regions newly allocated during GC. This
 380   // is no problem except for some extra work.
 381   const uint n_regions = hrclaimer->n_regions();
 382   for (uint count = 0; count < n_regions; count++) {
 383     const uint index = (start_index + count) % n_regions;
 384     assert(index < n_regions, "sanity");
 385     // Skip over unavailable regions
 386     if (!is_available(index)) {
 387       continue;
 388     }
 389     HeapRegion* r = _regions.get_by_index(index);
 390     // We'll ignore regions already claimed.
 391     // However, if the iteration is specified as concurrent, the values for
 392     // is_starts_humongous and is_continues_humongous can not be trusted,
 393     // and we should just blindly iterate over regions regardless of their
 394     // humongous status.
 395     if (hrclaimer->is_region_claimed(index)) {
 396       continue;
 397     }
 398     // OK, try to claim it
 399     if (!hrclaimer->claim_region(index)) {
 400       continue;
 401     }
 402     bool res = blk->doHeapRegionAbortable(r);
 403     if (res) {
 404       return;
 405     }
 406   }
 407 }
 408 
 409 uint HeapRegionManager::shrink_by(uint num_regions_to_remove) {
 410   assert(length() > 0, "the region sequence should not be empty");
 411   assert(length() <= _allocated_heapregions_length, "invariant");
 412   assert(_allocated_heapregions_length > 0, "we should have at least one region committed");
 413   assert(num_regions_to_remove < length(), "We should never remove all regions");
 414 
 415   if (num_regions_to_remove == 0) {
 416     return 0;
 417   }
 418 
 419   uint removed = 0;
 420   uint cur = _allocated_heapregions_length - 1;
 421   uint idx_last_found = 0;
 422   uint num_last_found = 0;


src/share/vm/gc/g1/heapRegionManager.cpp
Index Unified diffs Context diffs Sdiffs Wdiffs Patch New Old Previous File Next File