< prev index next >

src/hotspot/share/gc/g1/g1CollectedHeap.cpp

Print this page
rev 49184 : imported patch 8197569-refactor-eager-reclaim
rev 49185 : [mq]: 8197569-stefanj-review


2565   MutexLockerEx x(CGC_lock, Mutex::_no_safepoint_check_flag);
2566   if (!_cmThread->in_progress()) {
2567     _cmThread->set_started();
2568     CGC_lock->notify();
2569   }
2570 }
2571 
2572 size_t G1CollectedHeap::pending_card_num() {
2573   size_t extra_cards = 0;
2574   for (JavaThreadIteratorWithHandle jtiwh; JavaThread *curr = jtiwh.next(); ) {
2575     DirtyCardQueue& dcq = curr->dirty_card_queue();
2576     extra_cards += dcq.size();
2577   }
2578   DirtyCardQueueSet& dcqs = JavaThread::dirty_card_queue_set();
2579   size_t buffer_size = dcqs.buffer_size();
2580   size_t buffer_num = dcqs.completed_buffers_num();
2581 
2582   return buffer_size * buffer_num + extra_cards;
2583 }
2584 










2585 class RegisterHumongousWithInCSetFastTestClosure : public HeapRegionClosure {
2586  private:
2587   size_t _total_humongous;
2588   size_t _candidate_humongous;
2589 
2590   DirtyCardQueue _dcq;
2591 
2592   // We don't nominate objects with many remembered set entries, on
2593   // the assumption that such objects are likely still live.
2594   bool is_remset_small(HeapRegion* region) const {
2595     HeapRegionRemSet* const rset = region->rem_set();
2596     return G1EagerReclaimHumongousObjectsWithStaleRefs
2597       ? rset->occupancy_less_or_equal_than(G1RSetSparseRegionEntries)
2598       : rset->is_empty();
2599   }
2600 
2601   bool humongous_region_is_candidate(G1CollectedHeap* heap, HeapRegion* region) const {
2602     assert(region->is_starts_humongous(), "Must start a humongous object");
2603 
2604     oop obj = oop(region->bottom());
2605 
2606     // Dead objects cannot be eager reclaim candidates. Due to class
2607     // unloading it is unsafe to query their classes so we return early.
2608     if (heap->is_obj_dead(obj, region)) {
2609       return false;
2610     }
2611 
2612     // Candidate selection must satisfy the following constraints
2613     // while concurrent marking is in progress:
2614     //
2615     // * In order to maintain SATB invariants, an object must not be
2616     // reclaimed if it was allocated before the start of marking and
2617     // has not had its references scanned.  Such an object must have
2618     // its references (including type metadata) scanned to ensure no
2619     // live objects are missed by the marking process.  Objects
2620     // allocated after the start of concurrent marking don't need to
2621     // be scanned.
2622     //
2623     // * An object must not be reclaimed if it is on the concurrent
2624     // mark stack.  Objects allocated after the start of concurrent
2625     // marking are never pushed on the mark stack.
2626     //
2627     // Nominating only objects allocated after the start of concurrent
2628     // marking is sufficient to meet both constraints.  This may miss
2629     // some objects that satisfy the constraints, but the marking data
2630     // structures don't support efficiently performing the needed
2631     // additional tests or scrubbing of the mark stack.
2632     //
2633     // However, we presently only nominate is_typeArray() objects.
2634     // A humongous object containing references induces remembered
2635     // set entries on other regions.  In order to reclaim such an
2636     // object, those remembered sets would need to be cleaned up.
2637     //
2638     // We also treat is_typeArray() objects specially, allowing them
2639     // to be reclaimed even if allocated before the start of
2640     // concurrent mark.  For this we rely on mark stack insertion to
2641     // exclude is_typeArray() objects, preventing reclaiming an object
2642     // that is in the mark stack.  We also rely on the metadata for
2643     // such objects to be built-in and so ensured to be kept live.
2644     // Frequent allocation and drop of large binary blobs is an
2645     // important use case for eager reclaim, and this special handling
2646     // may reduce needed headroom.
2647 
2648     return obj->is_typeArray() && is_remset_small(region);

2649   }
2650 
2651  public:
2652   RegisterHumongousWithInCSetFastTestClosure()
2653   : _total_humongous(0),
2654     _candidate_humongous(0),
2655     _dcq(&JavaThread::dirty_card_queue_set()) {
2656   }
2657 
2658   virtual bool do_heap_region(HeapRegion* r) {
2659     if (!r->is_starts_humongous()) {
2660       return false;
2661     }
2662     G1CollectedHeap* g1h = G1CollectedHeap::heap();
2663 
2664     bool is_candidate = humongous_region_is_candidate(g1h, r);
2665     uint rindex = r->hrm_index();
2666     g1h->set_humongous_reclaim_candidate(rindex, is_candidate);
2667     if (is_candidate) {
2668       _candidate_humongous++;


4800                                obj->is_typeArray()
4801                               );
4802       return false;
4803     }
4804 
4805     guarantee(obj->is_typeArray(),
4806               "Only eagerly reclaiming type arrays is supported, but the object "
4807               PTR_FORMAT " is not.", p2i(r->bottom()));
4808 
4809     log_debug(gc, humongous)("Dead humongous region %u object size " SIZE_FORMAT " start " PTR_FORMAT " with remset " SIZE_FORMAT " code roots " SIZE_FORMAT " is marked %d reclaim candidate %d type array %d",
4810                              region_idx,
4811                              (size_t)obj->size() * HeapWordSize,
4812                              p2i(r->bottom()),
4813                              r->rem_set()->occupied(),
4814                              r->rem_set()->strong_code_roots_list_length(),
4815                              next_bitmap->is_marked(r->bottom()),
4816                              g1h->is_humongous_reclaim_candidate(region_idx),
4817                              obj->is_typeArray()
4818                             );
4819 
4820     // Need to clear mark bit of the humongous object if already set.
4821     if (next_bitmap->is_marked(r->bottom())) {
4822       next_bitmap->clear(r->bottom());
4823     }
4824     _humongous_objects_reclaimed++;
4825     do {
4826       HeapRegion* next = g1h->next_region_in_humongous(r);
4827       _freed_bytes += r->used();
4828       r->set_containing_set(NULL);
4829       _humongous_regions_reclaimed++;
4830       g1h->free_humongous_region(r, _free_region_list, false /* skip_remset */ );
4831       r = next;
4832     } while (r != NULL);
4833 
4834     return false;
4835   }
4836 
4837   uint humongous_objects_reclaimed() {
4838     return _humongous_objects_reclaimed;
4839   }
4840 
4841   uint humongous_regions_reclaimed() {
4842     return _humongous_regions_reclaimed;
4843   }




2565   MutexLockerEx x(CGC_lock, Mutex::_no_safepoint_check_flag);
2566   if (!_cmThread->in_progress()) {
2567     _cmThread->set_started();
2568     CGC_lock->notify();
2569   }
2570 }
2571 
2572 size_t G1CollectedHeap::pending_card_num() {
2573   size_t extra_cards = 0;
2574   for (JavaThreadIteratorWithHandle jtiwh; JavaThread *curr = jtiwh.next(); ) {
2575     DirtyCardQueue& dcq = curr->dirty_card_queue();
2576     extra_cards += dcq.size();
2577   }
2578   DirtyCardQueueSet& dcqs = JavaThread::dirty_card_queue_set();
2579   size_t buffer_size = dcqs.buffer_size();
2580   size_t buffer_num = dcqs.completed_buffers_num();
2581 
2582   return buffer_size * buffer_num + extra_cards;
2583 }
2584 
2585 bool G1CollectedHeap::is_potential_eager_reclaim_candidate(HeapRegion* r) const {
2586   // We don't nominate objects with many remembered set entries, on
2587   // the assumption that such objects are likely still live.
2588   HeapRegionRemSet* rem_set = r->rem_set();
2589 
2590   return G1EagerReclaimHumongousObjectsWithStaleRefs ?
2591          rem_set->occupancy_less_or_equal_than(G1RSetSparseRegionEntries) :
2592          G1EagerReclaimHumongousObjects && rem_set->is_empty();
2593 }
2594 
2595 class RegisterHumongousWithInCSetFastTestClosure : public HeapRegionClosure {
2596  private:
2597   size_t _total_humongous;
2598   size_t _candidate_humongous;
2599 
2600   DirtyCardQueue _dcq;
2601 
2602   bool humongous_region_is_candidate(G1CollectedHeap* g1h, HeapRegion* region) const {









2603     assert(region->is_starts_humongous(), "Must start a humongous object");
2604 
2605     oop obj = oop(region->bottom());
2606 
2607     // Dead objects cannot be eager reclaim candidates. Due to class
2608     // unloading it is unsafe to query their classes so we return early.
2609     if (g1h->is_obj_dead(obj, region)) {
2610       return false;
2611     }
2612 
2613     // Candidate selection must satisfy the following constraints
2614     // while concurrent marking is in progress:
2615     //
2616     // * In order to maintain SATB invariants, an object must not be
2617     // reclaimed if it was allocated before the start of marking and
2618     // has not had its references scanned.  Such an object must have
2619     // its references (including type metadata) scanned to ensure no
2620     // live objects are missed by the marking process.  Objects
2621     // allocated after the start of concurrent marking don't need to
2622     // be scanned.
2623     //
2624     // * An object must not be reclaimed if it is on the concurrent
2625     // mark stack.  Objects allocated after the start of concurrent
2626     // marking are never pushed on the mark stack.
2627     //
2628     // Nominating only objects allocated after the start of concurrent
2629     // marking is sufficient to meet both constraints.  This may miss
2630     // some objects that satisfy the constraints, but the marking data
2631     // structures don't support efficiently performing the needed
2632     // additional tests or scrubbing of the mark stack.
2633     //
2634     // However, we presently only nominate is_typeArray() objects.
2635     // A humongous object containing references induces remembered
2636     // set entries on other regions.  In order to reclaim such an
2637     // object, those remembered sets would need to be cleaned up.
2638     //
2639     // We also treat is_typeArray() objects specially, allowing them
2640     // to be reclaimed even if allocated before the start of
2641     // concurrent mark.  For this we rely on mark stack insertion to
2642     // exclude is_typeArray() objects, preventing reclaiming an object
2643     // that is in the mark stack.  We also rely on the metadata for
2644     // such objects to be built-in and so ensured to be kept live.
2645     // Frequent allocation and drop of large binary blobs is an
2646     // important use case for eager reclaim, and this special handling
2647     // may reduce needed headroom.
2648 
2649     return obj->is_typeArray() &&
2650            g1h->is_potential_eager_reclaim_candidate(region);
2651   }
2652 
2653  public:
2654   RegisterHumongousWithInCSetFastTestClosure()
2655   : _total_humongous(0),
2656     _candidate_humongous(0),
2657     _dcq(&JavaThread::dirty_card_queue_set()) {
2658   }
2659 
2660   virtual bool do_heap_region(HeapRegion* r) {
2661     if (!r->is_starts_humongous()) {
2662       return false;
2663     }
2664     G1CollectedHeap* g1h = G1CollectedHeap::heap();
2665 
2666     bool is_candidate = humongous_region_is_candidate(g1h, r);
2667     uint rindex = r->hrm_index();
2668     g1h->set_humongous_reclaim_candidate(rindex, is_candidate);
2669     if (is_candidate) {
2670       _candidate_humongous++;


4802                                obj->is_typeArray()
4803                               );
4804       return false;
4805     }
4806 
4807     guarantee(obj->is_typeArray(),
4808               "Only eagerly reclaiming type arrays is supported, but the object "
4809               PTR_FORMAT " is not.", p2i(r->bottom()));
4810 
4811     log_debug(gc, humongous)("Dead humongous region %u object size " SIZE_FORMAT " start " PTR_FORMAT " with remset " SIZE_FORMAT " code roots " SIZE_FORMAT " is marked %d reclaim candidate %d type array %d",
4812                              region_idx,
4813                              (size_t)obj->size() * HeapWordSize,
4814                              p2i(r->bottom()),
4815                              r->rem_set()->occupied(),
4816                              r->rem_set()->strong_code_roots_list_length(),
4817                              next_bitmap->is_marked(r->bottom()),
4818                              g1h->is_humongous_reclaim_candidate(region_idx),
4819                              obj->is_typeArray()
4820                             );
4821 
4822     g1h->concurrent_mark()->humongous_object_eagerly_reclaimed(r);



4823     _humongous_objects_reclaimed++;
4824     do {
4825       HeapRegion* next = g1h->next_region_in_humongous(r);
4826       _freed_bytes += r->used();
4827       r->set_containing_set(NULL);
4828       _humongous_regions_reclaimed++;
4829       g1h->free_humongous_region(r, _free_region_list, false /* skip_remset */ );
4830       r = next;
4831     } while (r != NULL);
4832 
4833     return false;
4834   }
4835 
4836   uint humongous_objects_reclaimed() {
4837     return _humongous_objects_reclaimed;
4838   }
4839 
4840   uint humongous_regions_reclaimed() {
4841     return _humongous_regions_reclaimed;
4842   }


< prev index next >