115 class RedirtyLoggedCardTableEntryClosure : public G1CardTableEntryClosure {
116 private:
117 size_t _num_dirtied;
118 G1CollectedHeap* _g1h;
119 G1CardTable* _g1_ct;
120
121 HeapRegion* region_for_card(CardValue* card_ptr) const {
122 return _g1h->heap_region_containing(_g1_ct->addr_for(card_ptr));
123 }
124
125 bool will_become_free(HeapRegion* hr) const {
126 // A region will be freed by free_collection_set if the region is in the
127 // collection set and has not had an evacuation failure.
128 return _g1h->is_in_cset(hr) && !hr->evacuation_failed();
129 }
130
131 public:
132 RedirtyLoggedCardTableEntryClosure(G1CollectedHeap* g1h) : G1CardTableEntryClosure(),
133 _num_dirtied(0), _g1h(g1h), _g1_ct(g1h->card_table()) { }
134
135 void do_card_ptr(CardValue* card_ptr, uint worker_i) {
136 HeapRegion* hr = region_for_card(card_ptr);
137
138 // Should only dirty cards in regions that won't be freed.
139 if (!will_become_free(hr)) {
140 *card_ptr = G1CardTable::dirty_card_val();
141 _num_dirtied++;
142 }
143 }
144
145 size_t num_dirtied() const { return _num_dirtied; }
146 };
147
148
149 void G1RegionMappingChangedListener::reset_from_card_cache(uint start_idx, size_t num_regions) {
150 HeapRegionRemSet::invalidate_from_card_cache(start_idx, num_regions);
151 }
152
153 void G1RegionMappingChangedListener::on_commit(uint start_idx, size_t num_regions, bool zero_filled) {
154 // The from card cache is not the memory that is actually committed. So we cannot
155 // take advantage of the zero_filled parameter.
1921 ParallelGCThreads, // degree of mt processing
1922 (ParallelGCThreads > 1), // mt discovery
1923 ParallelGCThreads, // degree of mt discovery
1924 true, // Reference discovery is atomic
1925 &_is_alive_closure_stw, // is alive closure
1926 true); // allow changes to number of processing threads
1927 }
1928
1929 SoftRefPolicy* G1CollectedHeap::soft_ref_policy() {
1930 return &_soft_ref_policy;
1931 }
1932
1933 size_t G1CollectedHeap::capacity() const {
1934 return _hrm->length() * HeapRegion::GrainBytes;
1935 }
1936
1937 size_t G1CollectedHeap::unused_committed_regions_in_bytes() const {
1938 return _hrm->total_free_bytes();
1939 }
1940
1941 void G1CollectedHeap::iterate_hcc_closure(G1CardTableEntryClosure* cl, uint worker_i) {
1942 _hot_card_cache->drain(cl, worker_i);
1943 }
1944
1945 // Computes the sum of the storage used by the various regions.
1946 size_t G1CollectedHeap::used() const {
1947 size_t result = _summary_bytes_used + _allocator->used_in_alloc_regions();
1948 if (_archive_allocator != NULL) {
1949 result += _archive_allocator->used();
1950 }
1951 return result;
1952 }
1953
1954 size_t G1CollectedHeap::used_unlocked() const {
1955 return _summary_bytes_used;
1956 }
1957
1958 class SumUsedClosure: public HeapRegionClosure {
1959 size_t _used;
1960 public:
1961 SumUsedClosure() : _used(0) {}
1962 bool do_heap_region(HeapRegion* r) {
|
115 class RedirtyLoggedCardTableEntryClosure : public G1CardTableEntryClosure {
116 private:
117 size_t _num_dirtied;
118 G1CollectedHeap* _g1h;
119 G1CardTable* _g1_ct;
120
121 HeapRegion* region_for_card(CardValue* card_ptr) const {
122 return _g1h->heap_region_containing(_g1_ct->addr_for(card_ptr));
123 }
124
125 bool will_become_free(HeapRegion* hr) const {
126 // A region will be freed by free_collection_set if the region is in the
127 // collection set and has not had an evacuation failure.
128 return _g1h->is_in_cset(hr) && !hr->evacuation_failed();
129 }
130
131 public:
132 RedirtyLoggedCardTableEntryClosure(G1CollectedHeap* g1h) : G1CardTableEntryClosure(),
133 _num_dirtied(0), _g1h(g1h), _g1_ct(g1h->card_table()) { }
134
135 void do_card_ptr(CardValue* card_ptr, uint worker_id) {
136 HeapRegion* hr = region_for_card(card_ptr);
137
138 // Should only dirty cards in regions that won't be freed.
139 if (!will_become_free(hr)) {
140 *card_ptr = G1CardTable::dirty_card_val();
141 _num_dirtied++;
142 }
143 }
144
145 size_t num_dirtied() const { return _num_dirtied; }
146 };
147
148
149 void G1RegionMappingChangedListener::reset_from_card_cache(uint start_idx, size_t num_regions) {
150 HeapRegionRemSet::invalidate_from_card_cache(start_idx, num_regions);
151 }
152
153 void G1RegionMappingChangedListener::on_commit(uint start_idx, size_t num_regions, bool zero_filled) {
154 // The from card cache is not the memory that is actually committed. So we cannot
155 // take advantage of the zero_filled parameter.
1921 ParallelGCThreads, // degree of mt processing
1922 (ParallelGCThreads > 1), // mt discovery
1923 ParallelGCThreads, // degree of mt discovery
1924 true, // Reference discovery is atomic
1925 &_is_alive_closure_stw, // is alive closure
1926 true); // allow changes to number of processing threads
1927 }
1928
1929 SoftRefPolicy* G1CollectedHeap::soft_ref_policy() {
1930 return &_soft_ref_policy;
1931 }
1932
1933 size_t G1CollectedHeap::capacity() const {
1934 return _hrm->length() * HeapRegion::GrainBytes;
1935 }
1936
1937 size_t G1CollectedHeap::unused_committed_regions_in_bytes() const {
1938 return _hrm->total_free_bytes();
1939 }
1940
1941 void G1CollectedHeap::iterate_hcc_closure(G1CardTableEntryClosure* cl, uint worker_id) {
1942 _hot_card_cache->drain(cl, worker_id);
1943 }
1944
1945 // Computes the sum of the storage used by the various regions.
1946 size_t G1CollectedHeap::used() const {
1947 size_t result = _summary_bytes_used + _allocator->used_in_alloc_regions();
1948 if (_archive_allocator != NULL) {
1949 result += _archive_allocator->used();
1950 }
1951 return result;
1952 }
1953
1954 size_t G1CollectedHeap::used_unlocked() const {
1955 return _summary_bytes_used;
1956 }
1957
1958 class SumUsedClosure: public HeapRegionClosure {
1959 size_t _used;
1960 public:
1961 SumUsedClosure() : _used(0) {}
1962 bool do_heap_region(HeapRegion* r) {
|