3455 size_t buffer_size = dcqs.buffer_size();
3456 size_t buffer_num = dcqs.completed_buffers_num();
3457
3458 // PtrQueueSet::buffer_size() and PtrQueue:size() return sizes
3459 // in bytes - not the number of 'entries'. We need to convert
3460 // into a number of cards.
3461 return (buffer_size * buffer_num + extra_cards) / oopSize;
3462 }
3463
3464 size_t G1CollectedHeap::cards_scanned() {
3465 return g1_rem_set()->cardsScanned();
3466 }
3467
3468 class RegisterHumongousWithInCSetFastTestClosure : public HeapRegionClosure {
3469 private:
3470 size_t _total_humongous;
3471 size_t _candidate_humongous;
3472
3473 DirtyCardQueue _dcq;
3474
3475 bool humongous_region_is_candidate(G1CollectedHeap* heap, HeapRegion* region) const {
3476 assert(region->is_starts_humongous(), "Must start a humongous object");
3477 if (heap->mark_in_progress() &&
3478 (region->bottom() < region->next_top_at_mark_start())) {
3479 // While concurrent marking is in progress, disallow eager
3480 // reclaim of humongous objects that existed at the start of the
3481 // marking cycle. For objects containing references, this
3482 // avoids SATB violations; such objects must be scanned. This
3483 // also avoids problems when eagerly reclaiming an object that
3484 // has been marked and placed in the mark stack, but has not yet
3485 // been scanned.
3486 return false;
3487 } else if (!oop(region->bottom())->is_typeArray()) {
3488 // For now, only permit reclaiming of humongous is_typeArray()
3489 // objects. For objects containing references, there is more
3490 // work to be done to deal with remembered sets from the object.
3491 return false;
3492 } else {
3493 HeapRegionRemSet* const rset = region->rem_set();
3494 if (G1EagerReclaimHumongousObjectsWithStaleRefs) {
3495 return rset->occupancy_less_or_equal_than(G1RSetSparseRegionEntries);
3496 } else {
3497 return rset->is_empty();
3498 }
3499 }
3500 }
3501
3502 public:
3503 RegisterHumongousWithInCSetFastTestClosure()
3504 : _total_humongous(0),
3505 _candidate_humongous(0),
3506 _dcq(&JavaThread::dirty_card_queue_set()) {
3507 }
3508
3509 virtual bool doHeapRegion(HeapRegion* r) {
3510 if (!r->is_starts_humongous()) {
3511 return false;
3512 }
3513 G1CollectedHeap* g1h = G1CollectedHeap::heap();
3514
3515 if (!humongous_region_is_candidate(g1h, r)) {
3516 g1h->remove_humongous_reclaim_candidate(r->hrm_index());
3517 } else {
3518 // Is_candidate already filters out humongous object with large remembered sets.
|
3455 size_t buffer_size = dcqs.buffer_size();
3456 size_t buffer_num = dcqs.completed_buffers_num();
3457
3458 // PtrQueueSet::buffer_size() and PtrQueue:size() return sizes
3459 // in bytes - not the number of 'entries'. We need to convert
3460 // into a number of cards.
3461 return (buffer_size * buffer_num + extra_cards) / oopSize;
3462 }
3463
3464 size_t G1CollectedHeap::cards_scanned() {
3465 return g1_rem_set()->cardsScanned();
3466 }
3467
3468 class RegisterHumongousWithInCSetFastTestClosure : public HeapRegionClosure {
3469 private:
3470 size_t _total_humongous;
3471 size_t _candidate_humongous;
3472
3473 DirtyCardQueue _dcq;
3474
3475 // We don't nominate objects with many remembered set entries, on
3476 // the assumption that such objects are likely still live.
3477 bool is_remset_small(HeapRegion* region) const {
3478 HeapRegionRemSet* const rset = region->rem_set();
3479 return G1EagerReclaimHumongousObjectsWithStaleRefs
3480 ? rset->occupancy_less_or_equal_than(G1RSetSparseRegionEntries)
3481 : rset->is_empty();
3482 }
3483
3484 bool is_typeArray_region(HeapRegion* region) const {
3485 return oop(region->bottom())->is_typeArray();
3486 }
3487
3488 bool humongous_region_is_candidate(G1CollectedHeap* heap, HeapRegion* region) const {
3489 assert(region->is_starts_humongous(), "Must start a humongous object");
3490
3491 if (!heap->mark_in_progress()
3492 || (region->bottom() >= region->next_top_at_mark_start())) {
3493 // In order to maintain SATB invariants, during concurrent mark
3494 // we should only nominate an object containing references if it
3495 // was allocated after the start of marking, as such an object
3496 // doesn't need to have its references scanned.
3497 //
3498 // Also, we must not reclaim an object that is in the concurrent
3499 // mark stack. Objects allocated since the start of marking are
3500 // never added to the mark stack.
3501
3502 // However, we presently only nominate is_typeArray() objects.
3503 // A humongous object containing references induces remembered
3504 // set entries on other regions. In order to reclaim such an
3505 // object, those remembered sets would need to be cleaned up.
3506 return is_typeArray_region(region) && is_remset_small(region);
3507
3508 } else {
3509 // We may allow nomination of is_typeArray() objects that were
3510 // allocated before the start of concurrent marking. For this
3511 // we rely on mark stack insertion to exclude is_typeArray()
3512 // objects, preventing reclaiming an object that is in the mark
3513 // stack. Frequent allocation and drop of large binary blobs is
3514 // an important use case for eager reclaim, and this special
3515 // handling may reduce needed headroom.
3516 return G1EagerReclaimHumongousPreSnapshotTypeArrays
3517 && is_typeArray_region(region)
3518 && is_remset_small(region);
3519 }
3520 }
3521
3522 public:
3523 RegisterHumongousWithInCSetFastTestClosure()
3524 : _total_humongous(0),
3525 _candidate_humongous(0),
3526 _dcq(&JavaThread::dirty_card_queue_set()) {
3527 }
3528
3529 virtual bool doHeapRegion(HeapRegion* r) {
3530 if (!r->is_starts_humongous()) {
3531 return false;
3532 }
3533 G1CollectedHeap* g1h = G1CollectedHeap::heap();
3534
3535 if (!humongous_region_is_candidate(g1h, r)) {
3536 g1h->remove_humongous_reclaim_candidate(r->hrm_index());
3537 } else {
3538 // Is_candidate already filters out humongous object with large remembered sets.
|