8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #ifndef SHARE_VM_GC_PARALLEL_PARALLELSCAVENGEHEAP_HPP
26 #define SHARE_VM_GC_PARALLEL_PARALLELSCAVENGEHEAP_HPP
27
28 #include "gc/parallel/generationSizer.hpp"
29 #include "gc/parallel/objectStartArray.hpp"
30 #include "gc/parallel/psGCAdaptivePolicyCounters.hpp"
31 #include "gc/parallel/psOldGen.hpp"
32 #include "gc/parallel/psYoungGen.hpp"
33 #include "gc/shared/collectedHeap.hpp"
34 #include "gc/shared/collectorPolicy.hpp"
35 #include "gc/shared/gcPolicyCounters.hpp"
36 #include "gc/shared/gcWhen.hpp"
37 #include "gc/shared/strongRootsScope.hpp"
38 #include "memory/metaspace.hpp"
39 #include "utilities/ostream.hpp"
40
41 class AdjoiningGenerations;
42 class GCHeapSummary;
43 class GCTaskManager;
44 class PSAdaptiveSizePolicy;
45 class PSHeapSummary;
46
47 class ParallelScavengeHeap : public CollectedHeap {
48 friend class VMStructs;
49 private:
50 static PSYoungGen* _young_gen;
51 static PSOldGen* _old_gen;
52
171 inline void invoke_scavenge();
172
173 // Perform a full collection
174 virtual void do_full_collection(bool clear_all_soft_refs);
175
176 bool supports_inline_contig_alloc() const { return !UseNUMA; }
177
178 HeapWord* volatile* top_addr() const { return !UseNUMA ? young_gen()->top_addr() : (HeapWord* volatile*)-1; }
179 HeapWord** end_addr() const { return !UseNUMA ? young_gen()->end_addr() : (HeapWord**)-1; }
180
181 void ensure_parsability(bool retire_tlabs);
182 void accumulate_statistics_all_tlabs();
183 void resize_all_tlabs();
184
185 bool supports_tlab_allocation() const { return true; }
186
187 size_t tlab_capacity(Thread* thr) const;
188 size_t tlab_used(Thread* thr) const;
189 size_t unsafe_max_tlab_alloc(Thread* thr) const;
190
191 // Can a compiler initialize a new object without store barriers?
192 // This permission only extends from the creation of a new object
193 // via a TLAB up to the first subsequent safepoint.
194 virtual bool can_elide_tlab_store_barriers() const {
195 return true;
196 }
197
198 virtual bool card_mark_must_follow_store() const {
199 return false;
200 }
201
202 // Return true if we don't we need a store barrier for
203 // initializing stores to an object at this address.
204 virtual bool can_elide_initializing_store_barrier(oop new_obj);
205
206 void object_iterate(ObjectClosure* cl);
207 void safe_object_iterate(ObjectClosure* cl) { object_iterate(cl); }
208
209 HeapWord* block_start(const void* addr) const;
210 size_t block_size(const HeapWord* addr) const;
211 bool block_is_obj(const HeapWord* addr) const;
212
213 jlong millis_since_last_gc();
214
215 void prepare_for_verify();
216 PSHeapSummary create_ps_heap_summary();
217 virtual void print_on(outputStream* st) const;
218 virtual void print_on_error(outputStream* st) const;
219 virtual void print_gc_threads_on(outputStream* st) const;
220 virtual void gc_threads_do(ThreadClosure* tc) const;
221 virtual void print_tracing_info() const;
222
223 void verify(VerifyOption option /* ignored */);
224
225 // Resize the young generation. The reserved space for the
226 // generation may be expanded in preparation for the resize.
227 void resize_young_gen(size_t eden_size, size_t survivor_size);
228
229 // Resize the old generation. The reserved space for the
230 // generation may be expanded in preparation for the resize.
231 void resize_old_gen(size_t desired_free_space);
232
233 // Save the tops of the spaces in all generations
234 void record_gen_tops_before_GC() PRODUCT_RETURN;
235
236 // Mangle the unused parts of all spaces in the heap
237 void gen_mangle_unused_area() PRODUCT_RETURN;
238
239 // Call these in sequential code around the processing of strong roots.
240 class ParStrongRootsScope : public MarkScope {
241 public:
242 ParStrongRootsScope();
243 ~ParStrongRootsScope();
244 };
245 };
246
247 // Simple class for storing info about the heap at the start of GC, to be used
248 // after GC for comparison/printing.
249 class PreGCValues {
250 public:
251 PreGCValues(ParallelScavengeHeap* heap) :
252 _heap_used(heap->used()),
253 _young_gen_used(heap->young_gen()->used_in_bytes()),
254 _old_gen_used(heap->old_gen()->used_in_bytes()),
255 _metadata_used(MetaspaceAux::used_bytes()) { };
256
257 size_t heap_used() const { return _heap_used; }
|
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #ifndef SHARE_VM_GC_PARALLEL_PARALLELSCAVENGEHEAP_HPP
26 #define SHARE_VM_GC_PARALLEL_PARALLELSCAVENGEHEAP_HPP
27
28 #include "gc/parallel/psCardTable.hpp"
29 #include "gc/parallel/generationSizer.hpp"
30 #include "gc/parallel/objectStartArray.hpp"
31 #include "gc/parallel/psGCAdaptivePolicyCounters.hpp"
32 #include "gc/parallel/psOldGen.hpp"
33 #include "gc/parallel/psYoungGen.hpp"
34 #include "gc/shared/cardTableModRefBS.hpp"
35 #include "gc/shared/collectedHeap.hpp"
36 #include "gc/shared/collectorPolicy.hpp"
37 #include "gc/shared/gcPolicyCounters.hpp"
38 #include "gc/shared/gcWhen.hpp"
39 #include "gc/shared/strongRootsScope.hpp"
40 #include "memory/metaspace.hpp"
41 #include "utilities/ostream.hpp"
42
43 class AdjoiningGenerations;
44 class GCHeapSummary;
45 class GCTaskManager;
46 class PSAdaptiveSizePolicy;
47 class PSHeapSummary;
48
49 class ParallelScavengeHeap : public CollectedHeap {
50 friend class VMStructs;
51 private:
52 static PSYoungGen* _young_gen;
53 static PSOldGen* _old_gen;
54
173 inline void invoke_scavenge();
174
175 // Perform a full collection
176 virtual void do_full_collection(bool clear_all_soft_refs);
177
178 bool supports_inline_contig_alloc() const { return !UseNUMA; }
179
180 HeapWord* volatile* top_addr() const { return !UseNUMA ? young_gen()->top_addr() : (HeapWord* volatile*)-1; }
181 HeapWord** end_addr() const { return !UseNUMA ? young_gen()->end_addr() : (HeapWord**)-1; }
182
183 void ensure_parsability(bool retire_tlabs);
184 void accumulate_statistics_all_tlabs();
185 void resize_all_tlabs();
186
187 bool supports_tlab_allocation() const { return true; }
188
189 size_t tlab_capacity(Thread* thr) const;
190 size_t tlab_used(Thread* thr) const;
191 size_t unsafe_max_tlab_alloc(Thread* thr) const;
192
193 void object_iterate(ObjectClosure* cl);
194 void safe_object_iterate(ObjectClosure* cl) { object_iterate(cl); }
195
196 HeapWord* block_start(const void* addr) const;
197 size_t block_size(const HeapWord* addr) const;
198 bool block_is_obj(const HeapWord* addr) const;
199
200 jlong millis_since_last_gc();
201
202 void prepare_for_verify();
203 PSHeapSummary create_ps_heap_summary();
204 virtual void print_on(outputStream* st) const;
205 virtual void print_on_error(outputStream* st) const;
206 virtual void print_gc_threads_on(outputStream* st) const;
207 virtual void gc_threads_do(ThreadClosure* tc) const;
208 virtual void print_tracing_info() const;
209
210 void verify(VerifyOption option /* ignored */);
211
212 // Resize the young generation. The reserved space for the
213 // generation may be expanded in preparation for the resize.
214 void resize_young_gen(size_t eden_size, size_t survivor_size);
215
216 // Resize the old generation. The reserved space for the
217 // generation may be expanded in preparation for the resize.
218 void resize_old_gen(size_t desired_free_space);
219
220 // Save the tops of the spaces in all generations
221 void record_gen_tops_before_GC() PRODUCT_RETURN;
222
223 // Mangle the unused parts of all spaces in the heap
224 void gen_mangle_unused_area() PRODUCT_RETURN;
225
226 CardTableModRefBS* barrier_set() {
227 return barrier_set_cast<CardTableModRefBS>(CollectedHeap::barrier_set());
228 }
229
230 PSCardTable* card_table() {
231 return static_cast<PSCardTable*>(barrier_set()->card_table());
232 }
233
234 // Call these in sequential code around the processing of strong roots.
235 class ParStrongRootsScope : public MarkScope {
236 public:
237 ParStrongRootsScope();
238 ~ParStrongRootsScope();
239 };
240 };
241
242 // Simple class for storing info about the heap at the start of GC, to be used
243 // after GC for comparison/printing.
244 class PreGCValues {
245 public:
246 PreGCValues(ParallelScavengeHeap* heap) :
247 _heap_used(heap->used()),
248 _young_gen_used(heap->young_gen()->used_in_bytes()),
249 _old_gen_used(heap->old_gen()->used_in_bytes()),
250 _metadata_used(MetaspaceAux::used_bytes()) { };
251
252 size_t heap_used() const { return _heap_used; }
|