< prev index next >

src/share/vm/gc/g1/heapRegionManager.cpp

Print this page




 326   *commit_count = commits;
 327   return true;
 328 }
 329 
 330 void HeapRegionManager::par_iterate(HeapRegionClosure* blk, uint worker_id, HeapRegionClaimer* hrclaimer, bool concurrent) const {
 331   const uint start_index = hrclaimer->start_region_for_worker(worker_id);
 332 
 333   // Every worker will actually look at all regions, skipping over regions that
 334   // are currently not committed.
 335   // This also (potentially) iterates over regions newly allocated during GC. This
 336   // is no problem except for some extra work.
 337   const uint n_regions = hrclaimer->n_regions();
 338   for (uint count = 0; count < n_regions; count++) {
 339     const uint index = (start_index + count) % n_regions;
 340     assert(index < n_regions, "sanity");
 341     // Skip over unavailable regions
 342     if (!is_available(index)) {
 343       continue;
 344     }
 345     HeapRegion* r = _regions.get_by_index(index);
 346     // We'll ignore "continues humongous" regions (we'll process them
 347     // when we come across their corresponding "start humongous"
 348     // region) and regions already claimed.
 349     // However, if the iteration is specified as concurrent, the values for
 350     // is_starts_humongous and is_continues_humongous can not be trusted,
 351     // and we should just blindly iterate over regions regardless of their
 352     // humongous status.
 353     if (hrclaimer->is_region_claimed(index) || (!concurrent && r->is_continues_humongous())) {
 354       continue;
 355     }
 356     // OK, try to claim it
 357     if (!hrclaimer->claim_region(index)) {
 358       continue;
 359     }
 360     // Success!
 361     // As mentioned above, special treatment of humongous regions can only be
 362     // done if we are iterating non-concurrently.
 363     if (!concurrent && r->is_starts_humongous()) {
 364       // If the region is "starts humongous" we'll iterate over its
 365       // "continues humongous" first; in fact we'll do them
 366       // first. The order is important. In one case, calling the
 367       // closure on the "starts humongous" region might de-allocate
 368       // and clear all its "continues humongous" regions and, as a
 369       // result, we might end up processing them twice. So, we'll do
 370       // them first (note: most closures will ignore them anyway) and
 371       // then we'll do the "starts humongous" region.
 372       for (uint ch_index = index + 1; ch_index < index + r->region_num(); ch_index++) {
 373         HeapRegion* chr = _regions.get_by_index(ch_index);
 374 
 375         assert(chr->is_continues_humongous(), "Must be humongous region");
 376         assert(chr->humongous_start_region() == r,
 377                "Must work on humongous continuation of the original start region "
 378                PTR_FORMAT ", but is " PTR_FORMAT, p2i(r), p2i(chr));
 379         assert(!hrclaimer->is_region_claimed(ch_index),
 380                "Must not have been claimed yet because claiming of humongous continuation first claims the start region");
 381 
 382         // Claim the region so no other worker tries to process the region. When a worker processes a
 383         // starts_humongous region it may also process the associated continues_humongous regions.
 384         // The continues_humongous regions can be changed to free regions. Unless this worker claims
 385         // all of these regions, other workers might try claim and process these newly free regions.
 386         bool claim_result = hrclaimer->claim_region(ch_index);
 387         guarantee(claim_result, "We should always be able to claim the continuesHumongous part of the humongous object");
 388 
 389         bool res2 = blk->doHeapRegion(chr);
 390         if (res2) {
 391           return;
 392         }
 393 
 394         // Right now, this holds (i.e., no closure that actually
 395         // does something with "continues humongous" regions
 396         // clears them). We might have to weaken it in the future,
 397         // but let's leave these two asserts here for extra safety.
 398         assert(chr->is_continues_humongous(), "should still be the case");
 399         assert(chr->humongous_start_region() == r, "sanity");
 400       }
 401     }
 402 
 403     bool res = blk->doHeapRegion(r);
 404     if (res) {
 405       return;
 406     }
 407   }
 408 }
 409 
 410 uint HeapRegionManager::shrink_by(uint num_regions_to_remove) {
 411   assert(length() > 0, "the region sequence should not be empty");
 412   assert(length() <= _allocated_heapregions_length, "invariant");
 413   assert(_allocated_heapregions_length > 0, "we should have at least one region committed");
 414   assert(num_regions_to_remove < length(), "We should never remove all regions");
 415 
 416   if (num_regions_to_remove == 0) {
 417     return 0;
 418   }
 419 
 420   uint removed = 0;
 421   uint cur = _allocated_heapregions_length - 1;
 422   uint idx_last_found = 0;


 491   for (uint i = 0; i < _allocated_heapregions_length; i++) {
 492     if (!is_available(i)) {
 493       prev_committed = false;
 494       continue;
 495     }
 496     num_committed++;
 497     HeapRegion* hr = _regions.get_by_index(i);
 498     guarantee(hr != NULL, "invariant: i: %u", i);
 499     guarantee(!prev_committed || hr->bottom() == prev_end,
 500               "invariant i: %u " HR_FORMAT " prev_end: " PTR_FORMAT,
 501               i, HR_FORMAT_PARAMS(hr), p2i(prev_end));
 502     guarantee(hr->hrm_index() == i,
 503               "invariant: i: %u hrm_index(): %u", i, hr->hrm_index());
 504     // Asserts will fire if i is >= _length
 505     HeapWord* addr = hr->bottom();
 506     guarantee(addr_to_region(addr) == hr, "sanity");
 507     // We cannot check whether the region is part of a particular set: at the time
 508     // this method may be called, we have only completed allocation of the regions,
 509     // but not put into a region set.
 510     prev_committed = true;
 511     if (hr->is_starts_humongous()) {
 512       prev_end = hr->orig_end();
 513     } else {
 514       prev_end = hr->end();
 515     }
 516   }
 517   for (uint i = _allocated_heapregions_length; i < max_length(); i++) {
 518     guarantee(_regions.get_by_index(i) == NULL, "invariant i: %u", i);
 519   }
 520 
 521   guarantee(num_committed == _num_committed, "Found %u committed regions, but should be %u", num_committed, _num_committed);
 522   _free_list.verify();
 523 }
 524 
 525 #ifndef PRODUCT
 526 void HeapRegionManager::verify_optional() {
 527   verify();
 528 }
 529 #endif // PRODUCT
 530 
 531 HeapRegionClaimer::HeapRegionClaimer(uint n_workers) :
 532     _n_workers(n_workers), _n_regions(G1CollectedHeap::heap()->_hrm._allocated_heapregions_length), _claims(NULL) {
 533   assert(n_workers > 0, "Need at least one worker.");
 534   _claims = NEW_C_HEAP_ARRAY(uint, _n_regions, mtGC);
 535   memset(_claims, Unclaimed, sizeof(*_claims) * _n_regions);




 326   *commit_count = commits;
 327   return true;
 328 }
 329 
 330 void HeapRegionManager::par_iterate(HeapRegionClosure* blk, uint worker_id, HeapRegionClaimer* hrclaimer, bool concurrent) const {
 331   const uint start_index = hrclaimer->start_region_for_worker(worker_id);
 332 
 333   // Every worker will actually look at all regions, skipping over regions that
 334   // are currently not committed.
 335   // This also (potentially) iterates over regions newly allocated during GC. This
 336   // is no problem except for some extra work.
 337   const uint n_regions = hrclaimer->n_regions();
 338   for (uint count = 0; count < n_regions; count++) {
 339     const uint index = (start_index + count) % n_regions;
 340     assert(index < n_regions, "sanity");
 341     // Skip over unavailable regions
 342     if (!is_available(index)) {
 343       continue;
 344     }
 345     HeapRegion* r = _regions.get_by_index(index);
 346     // We'll ignore regions already claimed.


 347     // However, if the iteration is specified as concurrent, the values for
 348     // is_starts_humongous and is_continues_humongous can not be trusted,
 349     // and we should just blindly iterate over regions regardless of their
 350     // humongous status.
 351     if (hrclaimer->is_region_claimed(index)) {
 352       continue;
 353     }
 354     // OK, try to claim it
 355     if (!hrclaimer->claim_region(index)) {
 356       continue;
 357     }











































 358     bool res = blk->doHeapRegion(r);
 359     if (res) {
 360       return;
 361     }
 362   }
 363 }
 364 
 365 uint HeapRegionManager::shrink_by(uint num_regions_to_remove) {
 366   assert(length() > 0, "the region sequence should not be empty");
 367   assert(length() <= _allocated_heapregions_length, "invariant");
 368   assert(_allocated_heapregions_length > 0, "we should have at least one region committed");
 369   assert(num_regions_to_remove < length(), "We should never remove all regions");
 370 
 371   if (num_regions_to_remove == 0) {
 372     return 0;
 373   }
 374 
 375   uint removed = 0;
 376   uint cur = _allocated_heapregions_length - 1;
 377   uint idx_last_found = 0;


 446   for (uint i = 0; i < _allocated_heapregions_length; i++) {
 447     if (!is_available(i)) {
 448       prev_committed = false;
 449       continue;
 450     }
 451     num_committed++;
 452     HeapRegion* hr = _regions.get_by_index(i);
 453     guarantee(hr != NULL, "invariant: i: %u", i);
 454     guarantee(!prev_committed || hr->bottom() == prev_end,
 455               "invariant i: %u " HR_FORMAT " prev_end: " PTR_FORMAT,
 456               i, HR_FORMAT_PARAMS(hr), p2i(prev_end));
 457     guarantee(hr->hrm_index() == i,
 458               "invariant: i: %u hrm_index(): %u", i, hr->hrm_index());
 459     // Asserts will fire if i is >= _length
 460     HeapWord* addr = hr->bottom();
 461     guarantee(addr_to_region(addr) == hr, "sanity");
 462     // We cannot check whether the region is part of a particular set: at the time
 463     // this method may be called, we have only completed allocation of the regions,
 464     // but not put into a region set.
 465     prev_committed = true;



 466     prev_end = hr->end();

 467   }
 468   for (uint i = _allocated_heapregions_length; i < max_length(); i++) {
 469     guarantee(_regions.get_by_index(i) == NULL, "invariant i: %u", i);
 470   }
 471 
 472   guarantee(num_committed == _num_committed, "Found %u committed regions, but should be %u", num_committed, _num_committed);
 473   _free_list.verify();
 474 }
 475 
 476 #ifndef PRODUCT
 477 void HeapRegionManager::verify_optional() {
 478   verify();
 479 }
 480 #endif // PRODUCT
 481 
 482 HeapRegionClaimer::HeapRegionClaimer(uint n_workers) :
 483     _n_workers(n_workers), _n_regions(G1CollectedHeap::heap()->_hrm._allocated_heapregions_length), _claims(NULL) {
 484   assert(n_workers > 0, "Need at least one worker.");
 485   _claims = NEW_C_HEAP_ARRAY(uint, _n_regions, mtGC);
 486   memset(_claims, Unclaimed, sizeof(*_claims) * _n_regions);


< prev index next >