1660 // base of the reserved heap may end up differing from the
1661 // address that was requested (i.e. the preferred heap base).
1662 // If this happens then we could end up using a non-optimal
1663 // compressed oops mode.
1664
1665 ReservedSpace heap_rs = Universe::reserve_heap(reserved_byte_size,
1666 HeapAlignment);
1667
1668 initialize_reserved_region((HeapWord*)heap_rs.base(), (HeapWord*)(heap_rs.base() + heap_rs.size()));
1669
1670 // Create the barrier set for the entire reserved region.
1671 G1CardTable* ct = new G1CardTable(reserved_region());
1672 ct->initialize();
1673 G1BarrierSet* bs = new G1BarrierSet(ct);
1674 bs->initialize();
1675 assert(bs->is_a(BarrierSet::G1BarrierSet), "sanity");
1676 BarrierSet::set_barrier_set(bs);
1677 _card_table = ct;
1678
1679 G1BarrierSet::satb_mark_queue_set().initialize(this,
1680 SATB_Q_CBL_mon,
1681 &bs->satb_mark_queue_buffer_allocator(),
1682 G1SATBProcessCompletedThreshold,
1683 G1SATBBufferEnqueueingThresholdPercent);
1684
1685 // process_completed_buffers_threshold and max_completed_buffers are updated
1686 // later, based on the concurrent refinement object.
1687 G1BarrierSet::dirty_card_queue_set().initialize(DirtyCardQ_CBL_mon,
1688 &bs->dirty_card_queue_buffer_allocator(),
1689 true); // init_free_ids
1690
1691 dirty_card_queue_set().initialize(DirtyCardQ_CBL_mon,
1692 &bs->dirty_card_queue_buffer_allocator());
1693
1694 // Create the hot card cache.
1695 _hot_card_cache = new G1HotCardCache(this);
1696
1697 // Carve out the G1 part of the heap.
1698 ReservedSpace g1_rs = heap_rs.first_part(reserved_byte_size);
1699 size_t page_size = actual_reserved_page_size(heap_rs);
1700 G1RegionToSpaceMapper* heap_storage =
|
1660 // base of the reserved heap may end up differing from the
1661 // address that was requested (i.e. the preferred heap base).
1662 // If this happens then we could end up using a non-optimal
1663 // compressed oops mode.
1664
1665 ReservedSpace heap_rs = Universe::reserve_heap(reserved_byte_size,
1666 HeapAlignment);
1667
1668 initialize_reserved_region((HeapWord*)heap_rs.base(), (HeapWord*)(heap_rs.base() + heap_rs.size()));
1669
1670 // Create the barrier set for the entire reserved region.
1671 G1CardTable* ct = new G1CardTable(reserved_region());
1672 ct->initialize();
1673 G1BarrierSet* bs = new G1BarrierSet(ct);
1674 bs->initialize();
1675 assert(bs->is_a(BarrierSet::G1BarrierSet), "sanity");
1676 BarrierSet::set_barrier_set(bs);
1677 _card_table = ct;
1678
1679 G1BarrierSet::satb_mark_queue_set().initialize(this,
1680 &bs->satb_mark_queue_buffer_allocator(),
1681 G1SATBProcessCompletedThreshold,
1682 G1SATBBufferEnqueueingThresholdPercent);
1683
1684 // process_completed_buffers_threshold and max_completed_buffers are updated
1685 // later, based on the concurrent refinement object.
1686 G1BarrierSet::dirty_card_queue_set().initialize(DirtyCardQ_CBL_mon,
1687 &bs->dirty_card_queue_buffer_allocator(),
1688 true); // init_free_ids
1689
1690 dirty_card_queue_set().initialize(DirtyCardQ_CBL_mon,
1691 &bs->dirty_card_queue_buffer_allocator());
1692
1693 // Create the hot card cache.
1694 _hot_card_cache = new G1HotCardCache(this);
1695
1696 // Carve out the G1 part of the heap.
1697 ReservedSpace g1_rs = heap_rs.first_part(reserved_byte_size);
1698 size_t page_size = actual_reserved_page_size(heap_rs);
1699 G1RegionToSpaceMapper* heap_storage =
|