1 /*
2 * Copyright (c) 2001, 2012, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
79 // SharedHeap::process_strong_roots() which calls eventuall to
80 // CardTableModRefBS::par_non_clean_card_iterate_work() which uses
81 // SequentialSubTasksDone. SharedHeap::process_strong_roots() also
82 // directly uses SubTasksDone (_process_strong_tasks field in SharedHeap).
83 //
84
85 // Local to this file.
86
87 class RefineCardTableEntryClosure: public CardTableEntryClosure {
88 SuspendibleThreadSet* _sts;
89 G1RemSet* _g1rs;
90 ConcurrentG1Refine* _cg1r;
91 bool _concurrent;
92 public:
93 RefineCardTableEntryClosure(SuspendibleThreadSet* sts,
94 G1RemSet* g1rs,
95 ConcurrentG1Refine* cg1r) :
96 _sts(sts), _g1rs(g1rs), _cg1r(cg1r), _concurrent(true)
97 {}
98 bool do_card_ptr(jbyte* card_ptr, int worker_i) {
99 bool oops_into_cset = _g1rs->concurrentRefineOneCard(card_ptr, worker_i, false);
100 // This path is executed by the concurrent refine or mutator threads,
101 // concurrently, and so we do not care if card_ptr contains references
102 // that point into the collection set.
103 assert(!oops_into_cset, "should be");
104
105 if (_concurrent && _sts->should_yield()) {
106 // Caller will actually yield.
107 return false;
108 }
109 // Otherwise, we finished successfully; return true.
110 return true;
111 }
112 void set_concurrent(bool b) { _concurrent = b; }
113 };
114
115
116 class ClearLoggedCardTableEntryClosure: public CardTableEntryClosure {
117 int _calls;
118 G1CollectedHeap* _g1h;
119 CardTableModRefBS* _ctbs;
1417
1418 reset_gc_time_stamp();
1419 // Since everything potentially moved, we will clear all remembered
1420 // sets, and clear all cards. Later we will rebuild remebered
1421 // sets. We will also reset the GC time stamps of the regions.
1422 clear_rsets_post_compaction();
1423 check_gc_time_stamps();
1424
1425 // Resize the heap if necessary.
1426 resize_if_necessary_after_full_collection(explicit_gc ? 0 : word_size);
1427
1428 if (_hr_printer.is_active()) {
1429 // We should do this after we potentially resize the heap so
1430 // that all the COMMIT / UNCOMMIT events are generated before
1431 // the end GC event.
1432
1433 print_hrs_post_compaction();
1434 _hr_printer.end_gc(true /* full */, (size_t) total_collections());
1435 }
1436
1437 if (_cg1r->use_cache()) {
1438 _cg1r->clear_and_record_card_counts();
1439 _cg1r->clear_hot_cache();
1440 }
1441
1442 // Rebuild remembered sets of all regions.
1443 if (G1CollectedHeap::use_parallel_gc_threads()) {
1444 uint n_workers =
1445 AdaptiveSizePolicy::calc_active_workers(workers()->total_workers(),
1446 workers()->active_workers(),
1447 Threads::number_of_non_daemon_threads());
1448 assert(UseDynamicNumberOfGCThreads ||
1449 n_workers == workers()->total_workers(),
1450 "If not dynamic should be using all the workers");
1451 workers()->set_active_workers(n_workers);
1452 // Set parallel threads in the heap (_n_par_threads) only
1453 // before a parallel phase and always reset it to 0 after
1454 // the phase so that the number of parallel threads does
1455 // no get carried forward to a serial phase where there
1456 // may be code that is "possibly_parallel".
1457 set_par_threads(n_workers);
1458
1459 ParRebuildRSTask rebuild_rs_task(this);
1726 if (expand(expand_bytes)) {
1727 _hrs.verify_optional();
1728 verify_region_sets_optional();
1729 return attempt_allocation_at_safepoint(word_size,
1730 false /* expect_null_mutator_alloc_region */);
1731 }
1732 return NULL;
1733 }
1734
1735 void G1CollectedHeap::update_committed_space(HeapWord* old_end,
1736 HeapWord* new_end) {
1737 assert(old_end != new_end, "don't call this otherwise");
1738 assert((HeapWord*) _g1_storage.high() == new_end, "invariant");
1739
1740 // Update the committed mem region.
1741 _g1_committed.set_end(new_end);
1742 // Tell the card table about the update.
1743 Universe::heap()->barrier_set()->resize_covered_region(_g1_committed);
1744 // Tell the BOT about the update.
1745 _bot_shared->resize(_g1_committed.word_size());
1746 }
1747
1748 bool G1CollectedHeap::expand(size_t expand_bytes) {
1749 size_t old_mem_size = _g1_storage.committed_size();
1750 size_t aligned_expand_bytes = ReservedSpace::page_align_size_up(expand_bytes);
1751 aligned_expand_bytes = align_size_up(aligned_expand_bytes,
1752 HeapRegion::GrainBytes);
1753 ergo_verbose2(ErgoHeapSizing,
1754 "expand the heap",
1755 ergo_format_byte("requested expansion amount")
1756 ergo_format_byte("attempted expansion amount"),
1757 expand_bytes, aligned_expand_bytes);
1758
1759 // First commit the memory.
1760 HeapWord* old_end = (HeapWord*) _g1_storage.high();
1761 bool successful = _g1_storage.expand_by(aligned_expand_bytes);
1762 if (successful) {
1763 // Then propagate this update to the necessary data structures.
1764 HeapWord* new_end = (HeapWord*) _g1_storage.high();
1765 update_committed_space(old_end, new_end);
1967 MutexLocker x(Heap_lock);
1968
1969 // We have to initialize the printer before committing the heap, as
1970 // it will be used then.
1971 _hr_printer.set_active(G1PrintHeapRegions);
1972
1973 // While there are no constraints in the GC code that HeapWordSize
1974 // be any particular value, there are multiple other areas in the
1975 // system which believe this to be true (e.g. oop->object_size in some
1976 // cases incorrectly returns the size in wordSize units rather than
1977 // HeapWordSize).
1978 guarantee(HeapWordSize == wordSize, "HeapWordSize must equal wordSize");
1979
1980 size_t init_byte_size = collector_policy()->initial_heap_byte_size();
1981 size_t max_byte_size = collector_policy()->max_heap_byte_size();
1982
1983 // Ensure that the sizes are properly aligned.
1984 Universe::check_alignment(init_byte_size, HeapRegion::GrainBytes, "g1 heap");
1985 Universe::check_alignment(max_byte_size, HeapRegion::GrainBytes, "g1 heap");
1986
1987 _cg1r = new ConcurrentG1Refine();
1988
1989 // Reserve the maximum.
1990
1991 // When compressed oops are enabled, the preferred heap base
1992 // is calculated by subtracting the requested size from the
1993 // 32Gb boundary and using the result as the base address for
1994 // heap reservation. If the requested size is not aligned to
1995 // HeapRegion::GrainBytes (i.e. the alignment that is passed
1996 // into the ReservedHeapSpace constructor) then the actual
1997 // base of the reserved heap may end up differing from the
1998 // address that was requested (i.e. the preferred heap base).
1999 // If this happens then we could end up using a non-optimal
2000 // compressed oops mode.
2001
2002 // Since max_byte_size is aligned to the size of a heap region (checked
2003 // above).
2004 Universe::check_alignment(max_byte_size, HeapRegion::GrainBytes, "g1 heap");
2005
2006 ReservedSpace heap_rs = Universe::reserve_heap(max_byte_size,
2007 HeapRegion::GrainBytes);
2028 // Also create a G1 rem set.
2029 if (mr_bs()->is_a(BarrierSet::CardTableModRef)) {
2030 _g1_rem_set = new G1RemSet(this, (CardTableModRefBS*)mr_bs());
2031 } else {
2032 vm_exit_during_initialization("G1 requires a cardtable mod ref bs.");
2033 return JNI_ENOMEM;
2034 }
2035
2036 // Carve out the G1 part of the heap.
2037
2038 ReservedSpace g1_rs = heap_rs.first_part(max_byte_size);
2039 _g1_reserved = MemRegion((HeapWord*)g1_rs.base(),
2040 g1_rs.size()/HeapWordSize);
2041
2042 _g1_storage.initialize(g1_rs, 0);
2043 _g1_committed = MemRegion((HeapWord*)_g1_storage.low(), (size_t) 0);
2044 _hrs.initialize((HeapWord*) _g1_reserved.start(),
2045 (HeapWord*) _g1_reserved.end(),
2046 _expansion_regions);
2047
2048 // 6843694 - ensure that the maximum region index can fit
2049 // in the remembered set structures.
2050 const uint max_region_idx = (1U << (sizeof(RegionIdx_t)*BitsPerByte-1)) - 1;
2051 guarantee((max_regions() - 1) <= max_region_idx, "too many regions");
2052
2053 size_t max_cards_per_region = ((size_t)1 << (sizeof(CardIdx_t)*BitsPerByte-1)) - 1;
2054 guarantee(HeapRegion::CardsPerRegion > 0, "make sure it's initialized");
2055 guarantee(HeapRegion::CardsPerRegion < max_cards_per_region,
2056 "too many cards per region");
2057
2058 HeapRegionSet::set_unrealistically_long_length(max_regions() + 1);
2059
2060 _bot_shared = new G1BlockOffsetSharedArray(_reserved,
2061 heap_word_size(init_byte_size));
2062
2063 _g1h = this;
2064
2065 _in_cset_fast_test_length = max_regions();
2066 _in_cset_fast_test_base =
2067 NEW_C_HEAP_ARRAY(bool, (size_t) _in_cset_fast_test_length, mtGC);
2120 DirtyCardQ_FL_lock,
2121 -1, // never trigger processing
2122 -1, // no limit on length
2123 Shared_DirtyCardQ_lock,
2124 &JavaThread::dirty_card_queue_set());
2125 }
2126
2127 // Initialize the card queue set used to hold cards containing
2128 // references into the collection set.
2129 _into_cset_dirty_card_queue_set.initialize(DirtyCardQ_CBL_mon,
2130 DirtyCardQ_FL_lock,
2131 -1, // never trigger processing
2132 -1, // no limit on length
2133 Shared_DirtyCardQ_lock,
2134 &JavaThread::dirty_card_queue_set());
2135
2136 // In case we're keeping closure specialization stats, initialize those
2137 // counts and that mechanism.
2138 SpecializationStats::clear();
2139
2140 // Do later initialization work for concurrent refinement.
2141 _cg1r->init();
2142
2143 // Here we allocate the dummy full region that is required by the
2144 // G1AllocRegion class. If we don't pass an address in the reserved
2145 // space here, lots of asserts fire.
2146
2147 HeapRegion* dummy_region = new_heap_region(0 /* index of bottom region */,
2148 _g1_reserved.start());
2149 // We'll re-use the same region whether the alloc region will
2150 // require BOT updates or not and, if it doesn't, then a non-young
2151 // region will complain that it cannot support allocations without
2152 // BOT updates. So we'll tag the dummy region as young to avoid that.
2153 dummy_region->set_young();
2154 // Make sure it's full.
2155 dummy_region->set_top(dummy_region->end());
2156 G1AllocRegion::setup(this, dummy_region);
2157
2158 init_mutator_alloc_region();
2159
2160 // Do create of the monitoring and management support so that
2161 // values in the heap have been properly initialized.
2162 _g1mm = new G1MonitoringSupport(this);
2281 _failures = true;
2282 }
2283 return false;
2284 }
2285
2286 bool failures() { return _failures; }
2287 };
2288
2289 void G1CollectedHeap::check_gc_time_stamps() {
2290 CheckGCTimeStampsHRClosure cl(_gc_time_stamp);
2291 heap_region_iterate(&cl);
2292 guarantee(!cl.failures(), "all GC time stamps should have been reset");
2293 }
2294 #endif // PRODUCT
2295
2296 void G1CollectedHeap::iterate_dirty_card_closure(CardTableEntryClosure* cl,
2297 DirtyCardQueue* into_cset_dcq,
2298 bool concurrent,
2299 int worker_i) {
2300 // Clean cards in the hot card cache
2301 concurrent_g1_refine()->clean_up_cache(worker_i, g1_rem_set(), into_cset_dcq);
2302
2303 DirtyCardQueueSet& dcqs = JavaThread::dirty_card_queue_set();
2304 int n_completed_buffers = 0;
2305 while (dcqs.apply_closure_to_completed_buffer(cl, worker_i, 0, true)) {
2306 n_completed_buffers++;
2307 }
2308 g1_policy()->phase_times()->record_update_rs_processed_buffers(worker_i, n_completed_buffers);
2309 dcqs.clear_n_completed_buffers();
2310 assert(!dcqs.completed_buffers_exist_dirty(), "Completed buffers exist!");
2311 }
2312
2313
2314 // Computes the sum of the storage used by the various regions.
2315
2316 size_t G1CollectedHeap::used() const {
2317 assert(Heap_lock->owner() != NULL,
2318 "Should be owned on this thread's behalf.");
2319 size_t result = _summary_bytes_used;
2320 // Read only once in case it is set to NULL concurrently
2321 HeapRegion* hr = _mutator_alloc_region.get();
5578 rp->verify_no_references_recorded();
5579 assert(!rp->discovery_enabled(), "should have been disabled");
5580
5581 // FIXME
5582 // CM's reference processing also cleans up the string and symbol tables.
5583 // Should we do that here also? We could, but it is a serial operation
5584 // and could signicantly increase the pause time.
5585
5586 double ref_enq_time = os::elapsedTime() - ref_enq_start;
5587 g1_policy()->phase_times()->record_ref_enq_time(ref_enq_time * 1000.0);
5588 }
5589
5590 void G1CollectedHeap::evacuate_collection_set() {
5591 _expand_heap_after_alloc_failure = true;
5592 set_evacuation_failed(false);
5593
5594 // Should G1EvacuationFailureALot be in effect for this GC?
5595 NOT_PRODUCT(set_evacuation_failure_alot_for_current_gc();)
5596
5597 g1_rem_set()->prepare_for_oops_into_collection_set_do();
5598 concurrent_g1_refine()->set_use_cache(false);
5599 concurrent_g1_refine()->clear_hot_cache_claimed_index();
5600
5601 uint n_workers;
5602 if (G1CollectedHeap::use_parallel_gc_threads()) {
5603 n_workers =
5604 AdaptiveSizePolicy::calc_active_workers(workers()->total_workers(),
5605 workers()->active_workers(),
5606 Threads::number_of_non_daemon_threads());
5607 assert(UseDynamicNumberOfGCThreads ||
5608 n_workers == workers()->total_workers(),
5609 "If not dynamic should be using all the workers");
5610 workers()->set_active_workers(n_workers);
5611 set_par_threads(n_workers);
5612 } else {
5613 assert(n_par_threads() == 0,
5614 "Should be the original non-parallel value");
5615 n_workers = 1;
5616 }
5617
5618 G1ParTask g1_par_task(this, _task_queues);
5619
5661 // Process any discovered reference objects - we have
5662 // to do this _before_ we retire the GC alloc regions
5663 // as we may have to copy some 'reachable' referent
5664 // objects (and their reachable sub-graphs) that were
5665 // not copied during the pause.
5666 process_discovered_references(n_workers);
5667
5668 // Weak root processing.
5669 // Note: when JSR 292 is enabled and code blobs can contain
5670 // non-perm oops then we will need to process the code blobs
5671 // here too.
5672 {
5673 G1STWIsAliveClosure is_alive(this);
5674 G1KeepAliveClosure keep_alive(this);
5675 JNIHandles::weak_oops_do(&is_alive, &keep_alive);
5676 }
5677
5678 release_gc_alloc_regions(n_workers);
5679 g1_rem_set()->cleanup_after_oops_into_collection_set_do();
5680
5681 concurrent_g1_refine()->clear_hot_cache();
5682 concurrent_g1_refine()->set_use_cache(true);
5683
5684 finalize_for_evac_failure();
5685
5686 if (evacuation_failed()) {
5687 remove_self_forwarding_pointers();
5688
5689 // Reset the G1EvacuationFailureALot counters and flags
5690 // Note: the values are reset only when an actual
5691 // evacuation failure occurs.
5692 NOT_PRODUCT(reset_evacuation_should_fail();)
5693 }
5694
5695 // Enqueue any remaining references remaining on the STW
5696 // reference processor's discovered lists. We need to do
5697 // this after the card table is cleaned (and verified) as
5698 // the act of enqueuing entries on to the pending list
5699 // will log these updates (and dirty their associated
5700 // cards). We need these updates logged to update any
5701 // RSets.
5702 enqueue_discovered_references(n_workers);
5724 if (hr->isHumongous()) {
5725 assert(hr->startsHumongous(), "we should only see starts humongous");
5726 free_humongous_region(hr, pre_used, free_list, humongous_proxy_set, par);
5727 } else {
5728 _old_set.remove_with_proxy(hr, old_proxy_set);
5729 free_region(hr, pre_used, free_list, par);
5730 }
5731 } else {
5732 hr->rem_set()->do_cleanup_work(hrrs_cleanup_task);
5733 }
5734 }
5735
5736 void G1CollectedHeap::free_region(HeapRegion* hr,
5737 size_t* pre_used,
5738 FreeRegionList* free_list,
5739 bool par) {
5740 assert(!hr->isHumongous(), "this is only for non-humongous regions");
5741 assert(!hr->is_empty(), "the region should not be empty");
5742 assert(free_list != NULL, "pre-condition");
5743
5744 *pre_used += hr->used();
5745 hr->hr_clear(par, true /* clear_space */);
5746 free_list->add_as_head(hr);
5747 }
5748
5749 void G1CollectedHeap::free_humongous_region(HeapRegion* hr,
5750 size_t* pre_used,
5751 FreeRegionList* free_list,
5752 HumongousRegionSet* humongous_proxy_set,
5753 bool par) {
5754 assert(hr->startsHumongous(), "this is only for starts humongous regions");
5755 assert(free_list != NULL, "pre-condition");
5756 assert(humongous_proxy_set != NULL, "pre-condition");
5757
5758 size_t hr_used = hr->used();
5759 size_t hr_capacity = hr->capacity();
5760 size_t hr_pre_used = 0;
5761 _humongous_set.remove_with_proxy(hr, humongous_proxy_set);
5762 // We need to read this before we make the region non-humongous,
5763 // otherwise the information will be gone.
|
1 /*
2 * Copyright (c) 2001, 2013, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
79 // SharedHeap::process_strong_roots() which calls eventuall to
80 // CardTableModRefBS::par_non_clean_card_iterate_work() which uses
81 // SequentialSubTasksDone. SharedHeap::process_strong_roots() also
82 // directly uses SubTasksDone (_process_strong_tasks field in SharedHeap).
83 //
84
85 // Local to this file.
86
87 class RefineCardTableEntryClosure: public CardTableEntryClosure {
88 SuspendibleThreadSet* _sts;
89 G1RemSet* _g1rs;
90 ConcurrentG1Refine* _cg1r;
91 bool _concurrent;
92 public:
93 RefineCardTableEntryClosure(SuspendibleThreadSet* sts,
94 G1RemSet* g1rs,
95 ConcurrentG1Refine* cg1r) :
96 _sts(sts), _g1rs(g1rs), _cg1r(cg1r), _concurrent(true)
97 {}
98 bool do_card_ptr(jbyte* card_ptr, int worker_i) {
99 bool oops_into_cset = _g1rs->refine_card(card_ptr, worker_i, false);
100 // This path is executed by the concurrent refine or mutator threads,
101 // concurrently, and so we do not care if card_ptr contains references
102 // that point into the collection set.
103 assert(!oops_into_cset, "should be");
104
105 if (_concurrent && _sts->should_yield()) {
106 // Caller will actually yield.
107 return false;
108 }
109 // Otherwise, we finished successfully; return true.
110 return true;
111 }
112 void set_concurrent(bool b) { _concurrent = b; }
113 };
114
115
116 class ClearLoggedCardTableEntryClosure: public CardTableEntryClosure {
117 int _calls;
118 G1CollectedHeap* _g1h;
119 CardTableModRefBS* _ctbs;
1417
1418 reset_gc_time_stamp();
1419 // Since everything potentially moved, we will clear all remembered
1420 // sets, and clear all cards. Later we will rebuild remebered
1421 // sets. We will also reset the GC time stamps of the regions.
1422 clear_rsets_post_compaction();
1423 check_gc_time_stamps();
1424
1425 // Resize the heap if necessary.
1426 resize_if_necessary_after_full_collection(explicit_gc ? 0 : word_size);
1427
1428 if (_hr_printer.is_active()) {
1429 // We should do this after we potentially resize the heap so
1430 // that all the COMMIT / UNCOMMIT events are generated before
1431 // the end GC event.
1432
1433 print_hrs_post_compaction();
1434 _hr_printer.end_gc(true /* full */, (size_t) total_collections());
1435 }
1436
1437 G1HotCardCache* hot_card_cache = _cg1r->hot_card_cache();
1438 if (hot_card_cache->use_cache()) {
1439 hot_card_cache->reset_card_counts();
1440 hot_card_cache->reset_hot_cache();
1441 }
1442
1443 // Rebuild remembered sets of all regions.
1444 if (G1CollectedHeap::use_parallel_gc_threads()) {
1445 uint n_workers =
1446 AdaptiveSizePolicy::calc_active_workers(workers()->total_workers(),
1447 workers()->active_workers(),
1448 Threads::number_of_non_daemon_threads());
1449 assert(UseDynamicNumberOfGCThreads ||
1450 n_workers == workers()->total_workers(),
1451 "If not dynamic should be using all the workers");
1452 workers()->set_active_workers(n_workers);
1453 // Set parallel threads in the heap (_n_par_threads) only
1454 // before a parallel phase and always reset it to 0 after
1455 // the phase so that the number of parallel threads does
1456 // no get carried forward to a serial phase where there
1457 // may be code that is "possibly_parallel".
1458 set_par_threads(n_workers);
1459
1460 ParRebuildRSTask rebuild_rs_task(this);
1727 if (expand(expand_bytes)) {
1728 _hrs.verify_optional();
1729 verify_region_sets_optional();
1730 return attempt_allocation_at_safepoint(word_size,
1731 false /* expect_null_mutator_alloc_region */);
1732 }
1733 return NULL;
1734 }
1735
1736 void G1CollectedHeap::update_committed_space(HeapWord* old_end,
1737 HeapWord* new_end) {
1738 assert(old_end != new_end, "don't call this otherwise");
1739 assert((HeapWord*) _g1_storage.high() == new_end, "invariant");
1740
1741 // Update the committed mem region.
1742 _g1_committed.set_end(new_end);
1743 // Tell the card table about the update.
1744 Universe::heap()->barrier_set()->resize_covered_region(_g1_committed);
1745 // Tell the BOT about the update.
1746 _bot_shared->resize(_g1_committed.word_size());
1747 // Tell the hot card cache about the update
1748 _cg1r->hot_card_cache()->resize_card_counts(capacity());
1749 }
1750
1751 bool G1CollectedHeap::expand(size_t expand_bytes) {
1752 size_t old_mem_size = _g1_storage.committed_size();
1753 size_t aligned_expand_bytes = ReservedSpace::page_align_size_up(expand_bytes);
1754 aligned_expand_bytes = align_size_up(aligned_expand_bytes,
1755 HeapRegion::GrainBytes);
1756 ergo_verbose2(ErgoHeapSizing,
1757 "expand the heap",
1758 ergo_format_byte("requested expansion amount")
1759 ergo_format_byte("attempted expansion amount"),
1760 expand_bytes, aligned_expand_bytes);
1761
1762 // First commit the memory.
1763 HeapWord* old_end = (HeapWord*) _g1_storage.high();
1764 bool successful = _g1_storage.expand_by(aligned_expand_bytes);
1765 if (successful) {
1766 // Then propagate this update to the necessary data structures.
1767 HeapWord* new_end = (HeapWord*) _g1_storage.high();
1768 update_committed_space(old_end, new_end);
1970 MutexLocker x(Heap_lock);
1971
1972 // We have to initialize the printer before committing the heap, as
1973 // it will be used then.
1974 _hr_printer.set_active(G1PrintHeapRegions);
1975
1976 // While there are no constraints in the GC code that HeapWordSize
1977 // be any particular value, there are multiple other areas in the
1978 // system which believe this to be true (e.g. oop->object_size in some
1979 // cases incorrectly returns the size in wordSize units rather than
1980 // HeapWordSize).
1981 guarantee(HeapWordSize == wordSize, "HeapWordSize must equal wordSize");
1982
1983 size_t init_byte_size = collector_policy()->initial_heap_byte_size();
1984 size_t max_byte_size = collector_policy()->max_heap_byte_size();
1985
1986 // Ensure that the sizes are properly aligned.
1987 Universe::check_alignment(init_byte_size, HeapRegion::GrainBytes, "g1 heap");
1988 Universe::check_alignment(max_byte_size, HeapRegion::GrainBytes, "g1 heap");
1989
1990 _cg1r = new ConcurrentG1Refine(this);
1991
1992 // Reserve the maximum.
1993
1994 // When compressed oops are enabled, the preferred heap base
1995 // is calculated by subtracting the requested size from the
1996 // 32Gb boundary and using the result as the base address for
1997 // heap reservation. If the requested size is not aligned to
1998 // HeapRegion::GrainBytes (i.e. the alignment that is passed
1999 // into the ReservedHeapSpace constructor) then the actual
2000 // base of the reserved heap may end up differing from the
2001 // address that was requested (i.e. the preferred heap base).
2002 // If this happens then we could end up using a non-optimal
2003 // compressed oops mode.
2004
2005 // Since max_byte_size is aligned to the size of a heap region (checked
2006 // above).
2007 Universe::check_alignment(max_byte_size, HeapRegion::GrainBytes, "g1 heap");
2008
2009 ReservedSpace heap_rs = Universe::reserve_heap(max_byte_size,
2010 HeapRegion::GrainBytes);
2031 // Also create a G1 rem set.
2032 if (mr_bs()->is_a(BarrierSet::CardTableModRef)) {
2033 _g1_rem_set = new G1RemSet(this, (CardTableModRefBS*)mr_bs());
2034 } else {
2035 vm_exit_during_initialization("G1 requires a cardtable mod ref bs.");
2036 return JNI_ENOMEM;
2037 }
2038
2039 // Carve out the G1 part of the heap.
2040
2041 ReservedSpace g1_rs = heap_rs.first_part(max_byte_size);
2042 _g1_reserved = MemRegion((HeapWord*)g1_rs.base(),
2043 g1_rs.size()/HeapWordSize);
2044
2045 _g1_storage.initialize(g1_rs, 0);
2046 _g1_committed = MemRegion((HeapWord*)_g1_storage.low(), (size_t) 0);
2047 _hrs.initialize((HeapWord*) _g1_reserved.start(),
2048 (HeapWord*) _g1_reserved.end(),
2049 _expansion_regions);
2050
2051 // Do later initialization work for concurrent refinement.
2052 _cg1r->init();
2053
2054 // 6843694 - ensure that the maximum region index can fit
2055 // in the remembered set structures.
2056 const uint max_region_idx = (1U << (sizeof(RegionIdx_t)*BitsPerByte-1)) - 1;
2057 guarantee((max_regions() - 1) <= max_region_idx, "too many regions");
2058
2059 size_t max_cards_per_region = ((size_t)1 << (sizeof(CardIdx_t)*BitsPerByte-1)) - 1;
2060 guarantee(HeapRegion::CardsPerRegion > 0, "make sure it's initialized");
2061 guarantee(HeapRegion::CardsPerRegion < max_cards_per_region,
2062 "too many cards per region");
2063
2064 HeapRegionSet::set_unrealistically_long_length(max_regions() + 1);
2065
2066 _bot_shared = new G1BlockOffsetSharedArray(_reserved,
2067 heap_word_size(init_byte_size));
2068
2069 _g1h = this;
2070
2071 _in_cset_fast_test_length = max_regions();
2072 _in_cset_fast_test_base =
2073 NEW_C_HEAP_ARRAY(bool, (size_t) _in_cset_fast_test_length, mtGC);
2126 DirtyCardQ_FL_lock,
2127 -1, // never trigger processing
2128 -1, // no limit on length
2129 Shared_DirtyCardQ_lock,
2130 &JavaThread::dirty_card_queue_set());
2131 }
2132
2133 // Initialize the card queue set used to hold cards containing
2134 // references into the collection set.
2135 _into_cset_dirty_card_queue_set.initialize(DirtyCardQ_CBL_mon,
2136 DirtyCardQ_FL_lock,
2137 -1, // never trigger processing
2138 -1, // no limit on length
2139 Shared_DirtyCardQ_lock,
2140 &JavaThread::dirty_card_queue_set());
2141
2142 // In case we're keeping closure specialization stats, initialize those
2143 // counts and that mechanism.
2144 SpecializationStats::clear();
2145
2146 // Here we allocate the dummy full region that is required by the
2147 // G1AllocRegion class. If we don't pass an address in the reserved
2148 // space here, lots of asserts fire.
2149
2150 HeapRegion* dummy_region = new_heap_region(0 /* index of bottom region */,
2151 _g1_reserved.start());
2152 // We'll re-use the same region whether the alloc region will
2153 // require BOT updates or not and, if it doesn't, then a non-young
2154 // region will complain that it cannot support allocations without
2155 // BOT updates. So we'll tag the dummy region as young to avoid that.
2156 dummy_region->set_young();
2157 // Make sure it's full.
2158 dummy_region->set_top(dummy_region->end());
2159 G1AllocRegion::setup(this, dummy_region);
2160
2161 init_mutator_alloc_region();
2162
2163 // Do create of the monitoring and management support so that
2164 // values in the heap have been properly initialized.
2165 _g1mm = new G1MonitoringSupport(this);
2284 _failures = true;
2285 }
2286 return false;
2287 }
2288
2289 bool failures() { return _failures; }
2290 };
2291
2292 void G1CollectedHeap::check_gc_time_stamps() {
2293 CheckGCTimeStampsHRClosure cl(_gc_time_stamp);
2294 heap_region_iterate(&cl);
2295 guarantee(!cl.failures(), "all GC time stamps should have been reset");
2296 }
2297 #endif // PRODUCT
2298
2299 void G1CollectedHeap::iterate_dirty_card_closure(CardTableEntryClosure* cl,
2300 DirtyCardQueue* into_cset_dcq,
2301 bool concurrent,
2302 int worker_i) {
2303 // Clean cards in the hot card cache
2304 G1HotCardCache* hot_card_cache = _cg1r->hot_card_cache();
2305 hot_card_cache->drain(worker_i, g1_rem_set(), into_cset_dcq);
2306
2307 DirtyCardQueueSet& dcqs = JavaThread::dirty_card_queue_set();
2308 int n_completed_buffers = 0;
2309 while (dcqs.apply_closure_to_completed_buffer(cl, worker_i, 0, true)) {
2310 n_completed_buffers++;
2311 }
2312 g1_policy()->phase_times()->record_update_rs_processed_buffers(worker_i, n_completed_buffers);
2313 dcqs.clear_n_completed_buffers();
2314 assert(!dcqs.completed_buffers_exist_dirty(), "Completed buffers exist!");
2315 }
2316
2317
2318 // Computes the sum of the storage used by the various regions.
2319
2320 size_t G1CollectedHeap::used() const {
2321 assert(Heap_lock->owner() != NULL,
2322 "Should be owned on this thread's behalf.");
2323 size_t result = _summary_bytes_used;
2324 // Read only once in case it is set to NULL concurrently
2325 HeapRegion* hr = _mutator_alloc_region.get();
5582 rp->verify_no_references_recorded();
5583 assert(!rp->discovery_enabled(), "should have been disabled");
5584
5585 // FIXME
5586 // CM's reference processing also cleans up the string and symbol tables.
5587 // Should we do that here also? We could, but it is a serial operation
5588 // and could signicantly increase the pause time.
5589
5590 double ref_enq_time = os::elapsedTime() - ref_enq_start;
5591 g1_policy()->phase_times()->record_ref_enq_time(ref_enq_time * 1000.0);
5592 }
5593
5594 void G1CollectedHeap::evacuate_collection_set() {
5595 _expand_heap_after_alloc_failure = true;
5596 set_evacuation_failed(false);
5597
5598 // Should G1EvacuationFailureALot be in effect for this GC?
5599 NOT_PRODUCT(set_evacuation_failure_alot_for_current_gc();)
5600
5601 g1_rem_set()->prepare_for_oops_into_collection_set_do();
5602
5603 // Disable the hot card cache.
5604 G1HotCardCache* hot_card_cache = _cg1r->hot_card_cache();
5605 hot_card_cache->reset_hot_cache_claimed_index();
5606 hot_card_cache->set_use_cache(false);
5607
5608 uint n_workers;
5609 if (G1CollectedHeap::use_parallel_gc_threads()) {
5610 n_workers =
5611 AdaptiveSizePolicy::calc_active_workers(workers()->total_workers(),
5612 workers()->active_workers(),
5613 Threads::number_of_non_daemon_threads());
5614 assert(UseDynamicNumberOfGCThreads ||
5615 n_workers == workers()->total_workers(),
5616 "If not dynamic should be using all the workers");
5617 workers()->set_active_workers(n_workers);
5618 set_par_threads(n_workers);
5619 } else {
5620 assert(n_par_threads() == 0,
5621 "Should be the original non-parallel value");
5622 n_workers = 1;
5623 }
5624
5625 G1ParTask g1_par_task(this, _task_queues);
5626
5668 // Process any discovered reference objects - we have
5669 // to do this _before_ we retire the GC alloc regions
5670 // as we may have to copy some 'reachable' referent
5671 // objects (and their reachable sub-graphs) that were
5672 // not copied during the pause.
5673 process_discovered_references(n_workers);
5674
5675 // Weak root processing.
5676 // Note: when JSR 292 is enabled and code blobs can contain
5677 // non-perm oops then we will need to process the code blobs
5678 // here too.
5679 {
5680 G1STWIsAliveClosure is_alive(this);
5681 G1KeepAliveClosure keep_alive(this);
5682 JNIHandles::weak_oops_do(&is_alive, &keep_alive);
5683 }
5684
5685 release_gc_alloc_regions(n_workers);
5686 g1_rem_set()->cleanup_after_oops_into_collection_set_do();
5687
5688 // Reset and re-enable the hot card cache.
5689 // Note the counts for the cards in the regions in the
5690 // collection set are reset when the collection set is freed.
5691 hot_card_cache->reset_hot_cache();
5692 hot_card_cache->set_use_cache(true);
5693
5694 finalize_for_evac_failure();
5695
5696 if (evacuation_failed()) {
5697 remove_self_forwarding_pointers();
5698
5699 // Reset the G1EvacuationFailureALot counters and flags
5700 // Note: the values are reset only when an actual
5701 // evacuation failure occurs.
5702 NOT_PRODUCT(reset_evacuation_should_fail();)
5703 }
5704
5705 // Enqueue any remaining references remaining on the STW
5706 // reference processor's discovered lists. We need to do
5707 // this after the card table is cleaned (and verified) as
5708 // the act of enqueuing entries on to the pending list
5709 // will log these updates (and dirty their associated
5710 // cards). We need these updates logged to update any
5711 // RSets.
5712 enqueue_discovered_references(n_workers);
5734 if (hr->isHumongous()) {
5735 assert(hr->startsHumongous(), "we should only see starts humongous");
5736 free_humongous_region(hr, pre_used, free_list, humongous_proxy_set, par);
5737 } else {
5738 _old_set.remove_with_proxy(hr, old_proxy_set);
5739 free_region(hr, pre_used, free_list, par);
5740 }
5741 } else {
5742 hr->rem_set()->do_cleanup_work(hrrs_cleanup_task);
5743 }
5744 }
5745
5746 void G1CollectedHeap::free_region(HeapRegion* hr,
5747 size_t* pre_used,
5748 FreeRegionList* free_list,
5749 bool par) {
5750 assert(!hr->isHumongous(), "this is only for non-humongous regions");
5751 assert(!hr->is_empty(), "the region should not be empty");
5752 assert(free_list != NULL, "pre-condition");
5753
5754 // Clear the card counts for this region.
5755 // Note: we only need to do this if the region is not young
5756 // (since we don't refine cards in young regions).
5757 if (!hr->is_young()) {
5758 _cg1r->hot_card_cache()->reset_card_counts(hr);
5759 }
5760 *pre_used += hr->used();
5761 hr->hr_clear(par, true /* clear_space */);
5762 free_list->add_as_head(hr);
5763 }
5764
5765 void G1CollectedHeap::free_humongous_region(HeapRegion* hr,
5766 size_t* pre_used,
5767 FreeRegionList* free_list,
5768 HumongousRegionSet* humongous_proxy_set,
5769 bool par) {
5770 assert(hr->startsHumongous(), "this is only for starts humongous regions");
5771 assert(free_list != NULL, "pre-condition");
5772 assert(humongous_proxy_set != NULL, "pre-condition");
5773
5774 size_t hr_used = hr->used();
5775 size_t hr_capacity = hr->capacity();
5776 size_t hr_pre_used = 0;
5777 _humongous_set.remove_with_proxy(hr, humongous_proxy_set);
5778 // We need to read this before we make the region non-humongous,
5779 // otherwise the information will be gone.
|