< prev index next >

src/hotspot/share/gc/g1/g1CollectedHeap.cpp

Print this page
rev 53865 : imported patch njt_iterate
rev 53868 : imported patch remove_shared_satb_lock


1659   // compressed oops mode.
1660 
1661   ReservedSpace heap_rs = Universe::reserve_heap(max_byte_size,
1662                                                  heap_alignment);
1663 
1664   initialize_reserved_region((HeapWord*)heap_rs.base(), (HeapWord*)(heap_rs.base() + heap_rs.size()));
1665 
1666   // Create the barrier set for the entire reserved region.
1667   G1CardTable* ct = new G1CardTable(reserved_region());
1668   ct->initialize();
1669   G1BarrierSet* bs = new G1BarrierSet(ct);
1670   bs->initialize();
1671   assert(bs->is_a(BarrierSet::G1BarrierSet), "sanity");
1672   BarrierSet::set_barrier_set(bs);
1673   _card_table = ct;
1674 
1675   G1BarrierSet::satb_mark_queue_set().initialize(this,
1676                                                  SATB_Q_CBL_mon,
1677                                                  &bs->satb_mark_queue_buffer_allocator(),
1678                                                  G1SATBProcessCompletedThreshold,
1679                                                  G1SATBBufferEnqueueingThresholdPercent,
1680                                                  Shared_SATB_Q_lock);
1681 
1682   // process_completed_buffers_threshold and max_completed_buffers are updated
1683   // later, based on the concurrent refinement object.
1684   G1BarrierSet::dirty_card_queue_set().initialize(DirtyCardQ_CBL_mon,
1685                                                   &bs->dirty_card_queue_buffer_allocator(),
1686                                                   Shared_DirtyCardQ_lock,
1687                                                   true); // init_free_ids
1688 
1689   dirty_card_queue_set().initialize(DirtyCardQ_CBL_mon,
1690                                     &bs->dirty_card_queue_buffer_allocator(),
1691                                     Shared_DirtyCardQ_lock);
1692 
1693   // Create the hot card cache.
1694   _hot_card_cache = new G1HotCardCache(this);
1695 
1696   // Carve out the G1 part of the heap.
1697   ReservedSpace g1_rs = heap_rs.first_part(max_byte_size);
1698   size_t page_size = actual_reserved_page_size(heap_rs);
1699   G1RegionToSpaceMapper* heap_storage =
1700     G1RegionToSpaceMapper::create_heap_mapper(g1_rs,


2587 
2588   HeapWord* result = op.result();
2589   bool ret_succeeded = op.prologue_succeeded() && op.pause_succeeded();
2590   assert(result == NULL || ret_succeeded,
2591          "the result should be NULL if the VM did not succeed");
2592   *succeeded = ret_succeeded;
2593 
2594   assert_heap_not_locked();
2595   return result;
2596 }
2597 
2598 void G1CollectedHeap::do_concurrent_mark() {
2599   MutexLockerEx x(CGC_lock, Mutex::_no_safepoint_check_flag);
2600   if (!_cm_thread->in_progress()) {
2601     _cm_thread->set_started();
2602     CGC_lock->notify();
2603   }
2604 }
2605 
2606 size_t G1CollectedHeap::pending_card_num() {
2607   size_t extra_cards = 0;
2608   for (JavaThreadIteratorWithHandle jtiwh; JavaThread *curr = jtiwh.next(); ) {
2609     G1DirtyCardQueue& dcq = G1ThreadLocalData::dirty_card_queue(curr);
2610     extra_cards += dcq.size();

2611   }



2612   G1DirtyCardQueueSet& dcqs = G1BarrierSet::dirty_card_queue_set();
2613   size_t buffer_size = dcqs.buffer_size();
2614   size_t buffer_num = dcqs.completed_buffers_num();
2615 
2616   return buffer_size * buffer_num + extra_cards;
2617 }
2618 
2619 bool G1CollectedHeap::is_potential_eager_reclaim_candidate(HeapRegion* r) const {
2620   // We don't nominate objects with many remembered set entries, on
2621   // the assumption that such objects are likely still live.
2622   HeapRegionRemSet* rem_set = r->rem_set();
2623 
2624   return G1EagerReclaimHumongousObjectsWithStaleRefs ?
2625          rem_set->occupancy_less_or_equal_than(G1RSetSparseRegionEntries) :
2626          G1EagerReclaimHumongousObjects && rem_set->is_empty();
2627 }
2628 
2629 class RegisterHumongousWithInCSetFastTestClosure : public HeapRegionClosure {
2630  private:
2631   size_t _total_humongous;
2632   size_t _candidate_humongous;
2633 
2634   G1DirtyCardQueue _dcq;
2635 
2636   bool humongous_region_is_candidate(G1CollectedHeap* g1h, HeapRegion* region) const {




1659   // compressed oops mode.
1660 
1661   ReservedSpace heap_rs = Universe::reserve_heap(max_byte_size,
1662                                                  heap_alignment);
1663 
1664   initialize_reserved_region((HeapWord*)heap_rs.base(), (HeapWord*)(heap_rs.base() + heap_rs.size()));
1665 
1666   // Create the barrier set for the entire reserved region.
1667   G1CardTable* ct = new G1CardTable(reserved_region());
1668   ct->initialize();
1669   G1BarrierSet* bs = new G1BarrierSet(ct);
1670   bs->initialize();
1671   assert(bs->is_a(BarrierSet::G1BarrierSet), "sanity");
1672   BarrierSet::set_barrier_set(bs);
1673   _card_table = ct;
1674 
1675   G1BarrierSet::satb_mark_queue_set().initialize(this,
1676                                                  SATB_Q_CBL_mon,
1677                                                  &bs->satb_mark_queue_buffer_allocator(),
1678                                                  G1SATBProcessCompletedThreshold,
1679                                                  G1SATBBufferEnqueueingThresholdPercent);

1680 
1681   // process_completed_buffers_threshold and max_completed_buffers are updated
1682   // later, based on the concurrent refinement object.
1683   G1BarrierSet::dirty_card_queue_set().initialize(DirtyCardQ_CBL_mon,
1684                                                   &bs->dirty_card_queue_buffer_allocator(),
1685                                                   Shared_DirtyCardQ_lock,
1686                                                   true); // init_free_ids
1687 
1688   dirty_card_queue_set().initialize(DirtyCardQ_CBL_mon,
1689                                     &bs->dirty_card_queue_buffer_allocator(),
1690                                     Shared_DirtyCardQ_lock);
1691 
1692   // Create the hot card cache.
1693   _hot_card_cache = new G1HotCardCache(this);
1694 
1695   // Carve out the G1 part of the heap.
1696   ReservedSpace g1_rs = heap_rs.first_part(max_byte_size);
1697   size_t page_size = actual_reserved_page_size(heap_rs);
1698   G1RegionToSpaceMapper* heap_storage =
1699     G1RegionToSpaceMapper::create_heap_mapper(g1_rs,


2586 
2587   HeapWord* result = op.result();
2588   bool ret_succeeded = op.prologue_succeeded() && op.pause_succeeded();
2589   assert(result == NULL || ret_succeeded,
2590          "the result should be NULL if the VM did not succeed");
2591   *succeeded = ret_succeeded;
2592 
2593   assert_heap_not_locked();
2594   return result;
2595 }
2596 
2597 void G1CollectedHeap::do_concurrent_mark() {
2598   MutexLockerEx x(CGC_lock, Mutex::_no_safepoint_check_flag);
2599   if (!_cm_thread->in_progress()) {
2600     _cm_thread->set_started();
2601     CGC_lock->notify();
2602   }
2603 }
2604 
2605 size_t G1CollectedHeap::pending_card_num() {
2606   struct CountCardsClosure : public ThreadClosure {
2607     size_t _cards;
2608     CountCardsClosure() : _cards(0) {}
2609     virtual void do_thread(Thread* t) {
2610       _cards += G1ThreadLocalData::dirty_card_queue(t).size();
2611     }
2612   } count_from_threads;
2613   Threads::threads_do(&count_from_threads);
2614 
2615   G1DirtyCardQueueSet& dcqs = G1BarrierSet::dirty_card_queue_set();
2616   size_t buffer_size = dcqs.buffer_size();
2617   size_t buffer_num = dcqs.completed_buffers_num();
2618 
2619   return buffer_size * buffer_num + count_from_threads._cards;
2620 }
2621 
2622 bool G1CollectedHeap::is_potential_eager_reclaim_candidate(HeapRegion* r) const {
2623   // We don't nominate objects with many remembered set entries, on
2624   // the assumption that such objects are likely still live.
2625   HeapRegionRemSet* rem_set = r->rem_set();
2626 
2627   return G1EagerReclaimHumongousObjectsWithStaleRefs ?
2628          rem_set->occupancy_less_or_equal_than(G1RSetSparseRegionEntries) :
2629          G1EagerReclaimHumongousObjects && rem_set->is_empty();
2630 }
2631 
2632 class RegisterHumongousWithInCSetFastTestClosure : public HeapRegionClosure {
2633  private:
2634   size_t _total_humongous;
2635   size_t _candidate_humongous;
2636 
2637   G1DirtyCardQueue _dcq;
2638 
2639   bool humongous_region_is_candidate(G1CollectedHeap* g1h, HeapRegion* region) const {


< prev index next >