77 SubTasksDone* _process_strong_tasks;
78
79 // Collects the given generation.
80 void collect_generation(Generation* gen, bool full, size_t size, bool is_tlab,
81 bool run_verification, bool clear_soft_refs,
82 bool restore_marks_for_biased_locking);
83
84 // In block contents verification, the number of header words to skip
85 NOT_PRODUCT(static size_t _skip_header_HeapWords;)
86
87 FlexibleWorkGang* _workers;
88
89 protected:
90 // Helper functions for allocation
91 HeapWord* attempt_allocation(size_t size,
92 bool is_tlab,
93 bool first_only);
94
95 // Helper function for two callbacks below.
96 // Considers collection of the first max_level+1 generations.
97 void do_collection(bool full,
98 bool clear_all_soft_refs,
99 size_t size,
100 bool is_tlab,
101 int max_level);
102
103 // Callback from VM_GenCollectForAllocation operation.
104 // This function does everything necessary/possible to satisfy an
105 // allocation request that failed in the youngest generation that should
106 // have handled it (including collection, expansion, etc.)
107 HeapWord* satisfy_failed_allocation(size_t size, bool is_tlab);
108
109 // Callback from VM_GenCollectFull operation.
110 // Perform a full collection of the first max_level+1 generations.
111 virtual void do_full_collection(bool clear_all_soft_refs);
112 void do_full_collection(bool clear_all_soft_refs, int max_level);
113
114 // Does the "cause" of GC indicate that
115 // we absolutely __must__ clear soft refs?
116 bool must_clear_all_soft_refs();
117
118 public:
119 GenCollectedHeap(GenCollectorPolicy *policy);
120
121 FlexibleWorkGang* workers() const { return _workers; }
122
123 GCStats* gc_stats(int level) const;
124
125 // Returns JNI_OK on success
126 virtual jint initialize();
127
128 // Reserve aligned space for the heap as needed by the contained generations.
129 char* allocate(size_t alignment, ReservedSpace* heap_rs);
130
131 // Does operations required after initialization has been done.
132 void post_initialize();
133
134 // Initialize ("weak") refs processing support
135 virtual void ref_processing_init();
136
137 virtual Name kind() const {
138 return CollectedHeap::GenCollectedHeap;
139 }
140
141 Generation* young_gen() const { return _young_gen; }
142 Generation* old_gen() const { return _old_gen; }
143
144 // The generational collector policy.
145 GenCollectorPolicy* gen_policy() const { return _gen_policy; }
146
147 virtual CollectorPolicy* collector_policy() const { return (CollectorPolicy*) gen_policy(); }
148
149 // Adaptive size policy
150 virtual AdaptiveSizePolicy* size_policy() {
151 return gen_policy()->size_policy();
152 }
153
154 // Return the (conservative) maximum heap alignment
155 static size_t conservative_max_heap_alignment() {
156 return Generation::GenGrain;
157 }
158
159 size_t capacity() const;
160 size_t used() const;
161
162 // Save the "used_region" for generations level and lower.
163 void save_used_regions(int level);
164
165 size_t max_capacity() const;
166
167 HeapWord* mem_allocate(size_t size,
168 bool* gc_overhead_limit_was_exceeded);
169
170 // We may support a shared contiguous allocation area, if the youngest
171 // generation does.
172 bool supports_inline_contig_alloc() const;
173 HeapWord** top_addr() const;
174 HeapWord** end_addr() const;
175
176 // Perform a full collection of the heap; intended for use in implementing
177 // "System.gc". This implies as full a collection as the CollectedHeap
178 // supports. Caller does not hold the Heap_lock on entry.
179 void collect(GCCause::Cause cause);
180
181 // The same as above but assume that the caller holds the Heap_lock.
182 void collect_locked(GCCause::Cause cause);
183
184 // Perform a full collection of the first max_level+1 generations.
185 // Mostly used for testing purposes. Caller does not hold the Heap_lock on entry.
186 void collect(GCCause::Cause cause, int max_level);
187
188 // Returns "TRUE" iff "p" points into the committed areas of the heap.
189 // The methods is_in(), is_in_closed_subset() and is_in_youngest() may
190 // be expensive to compute in general, so, to prevent
191 // their inadvertent use in product jvm's, we restrict their use to
192 // assertion checking or verification only.
193 bool is_in(const void* p) const;
194
195 // override
196 bool is_in_closed_subset(const void* p) const {
197 if (UseConcMarkSweepGC) {
198 return is_in_reserved(p);
199 } else {
200 return is_in(p);
201 }
202 }
203
204 // Returns true if the reference is to an object in the reserved space
205 // for the young generation.
206 // Assumes the the young gen address range is less than that of the old gen.
296
297 // Total number of full collections completed.
298 unsigned int total_full_collections_completed() {
299 assert(_full_collections_completed <= _total_full_collections,
300 "Can't complete more collections than were started");
301 return _full_collections_completed;
302 }
303
304 // Update above counter, as appropriate, at the end of a stop-world GC cycle
305 unsigned int update_full_collections_completed();
306 // Update above counter, as appropriate, at the end of a concurrent GC cycle
307 unsigned int update_full_collections_completed(unsigned int count);
308
309 // Update "time of last gc" for all generations to "now".
310 void update_time_of_last_gc(jlong now) {
311 _young_gen->update_time_of_last_gc(now);
312 _old_gen->update_time_of_last_gc(now);
313 }
314
315 // Update the gc statistics for each generation.
316 // "level" is the level of the latest collection.
317 void update_gc_stats(int current_level, bool full) {
318 _young_gen->update_gc_stats(current_level, full);
319 _old_gen->update_gc_stats(current_level, full);
320 }
321
322 bool no_gc_in_progress() { return !is_gc_active(); }
323
324 // Override.
325 void prepare_for_verify();
326
327 // Override.
328 void verify(bool silent, VerifyOption option);
329
330 // Override.
331 virtual void print_on(outputStream* st) const;
332 virtual void print_gc_threads_on(outputStream* st) const;
333 virtual void gc_threads_do(ThreadClosure* tc) const;
334 virtual void print_tracing_info() const;
335 virtual void print_on_error(outputStream* st) const;
336
337 // PrintGC, PrintGCDetails support
338 void print_heap_change(size_t prev_used) const;
339
350 // If "old_to_young" determines the order.
351 void generation_iterate(GenClosure* cl, bool old_to_young);
352
353 // Return "true" if all generations have reached the
354 // maximal committed limit that they can reach, without a garbage
355 // collection.
356 virtual bool is_maximal_no_gc() const;
357
358 // This function returns the "GenRemSet" object that allows us to scan
359 // generations in a fully generational heap.
360 GenRemSet* rem_set() { return _rem_set; }
361
362 // Convenience function to be used in situations where the heap type can be
363 // asserted to be this type.
364 static GenCollectedHeap* heap();
365
366 void set_par_threads(uint t);
367 void set_n_termination(uint t);
368
369 // Invoke the "do_oop" method of one of the closures "not_older_gens"
370 // or "older_gens" on root locations for the generation at
371 // "level". (The "older_gens" closure is used for scanning references
372 // from older generations; "not_older_gens" is used everywhere else.)
373 // If "younger_gens_as_roots" is false, younger generations are
374 // not scanned as roots; in this case, the caller must be arranging to
375 // scan the younger generations itself. (For example, a generation might
376 // explicitly mark reachable objects in younger generations, to avoid
377 // excess storage retention.)
378 // The "so" argument determines which of the roots
379 // the closure is applied to:
380 // "SO_None" does none;
381 enum ScanningOption {
382 SO_None = 0x0,
383 SO_AllCodeCache = 0x8,
384 SO_ScavengeCodeCache = 0x10
385 };
386
387 private:
388 void process_roots(bool activate_scope,
389 ScanningOption so,
390 OopClosure* strong_roots,
391 OopClosure* weak_roots,
392 CLDClosure* strong_cld_closure,
393 CLDClosure* weak_cld_closure,
394 CodeBlobClosure* code_roots);
395
396 void gen_process_roots(int level,
397 bool younger_gens_as_roots,
398 bool activate_scope,
399 ScanningOption so,
400 OopsInGenClosure* not_older_gens,
401 OopsInGenClosure* weak_roots,
402 OopsInGenClosure* older_gens,
403 CLDClosure* cld_closure,
404 CLDClosure* weak_cld_closure,
405 CodeBlobClosure* code_closure);
406
407 public:
408 static const bool StrongAndWeakRoots = false;
409 static const bool StrongRootsOnly = true;
410
411 void gen_process_roots(int level,
412 bool younger_gens_as_roots,
413 bool activate_scope,
414 ScanningOption so,
415 bool only_strong_roots,
416 OopsInGenClosure* not_older_gens,
417 OopsInGenClosure* older_gens,
418 CLDClosure* cld_closure);
419
420 // Apply "root_closure" to all the weak roots of the system.
421 // These include JNI weak roots, string table,
422 // and referents of reachable weak refs.
423 void gen_process_weak_roots(OopClosure* root_closure);
424
425 // Set the saved marks of generations, if that makes sense.
426 // In particular, if any generation might iterate over the oops
427 // in other generations, it should call this method.
428 void save_marks();
429
430 // Apply "cur->do_oop" or "older->do_oop" to all the oops in objects
431 // allocated since the last call to save_marks in generations at or above
432 // "level". The "cur" closure is
433 // applied to references in the generation at "level", and the "older"
434 // closure to older generations.
435 #define GCH_SINCE_SAVE_MARKS_ITERATE_DECL(OopClosureType, nv_suffix) \
436 void oop_since_save_marks_iterate(int level, \
437 OopClosureType* cur, \
438 OopClosureType* older);
439
440 ALL_SINCE_SAVE_MARKS_CLOSURES(GCH_SINCE_SAVE_MARKS_ITERATE_DECL)
441
442 #undef GCH_SINCE_SAVE_MARKS_ITERATE_DECL
443
444 // Returns "true" iff no allocations have occurred in any generation at
445 // "level" or above since the last
446 // call to "save_marks".
447 bool no_allocs_since_save_marks(int level);
448
449 // Returns true if an incremental collection is likely to fail.
450 // We optionally consult the young gen, if asked to do so;
451 // otherwise we base our answer on whether the previous incremental
452 // collection attempt failed with no corrective action as of yet.
453 bool incremental_collection_will_fail(bool consult_young) {
454 // Assumes a 2-generation system; the first disjunct remembers if an
455 // incremental collection failed, even when we thought (second disjunct)
456 // that it would not.
457 assert(heap()->collector_policy()->is_generation_policy(),
458 "the following definition may not be suitable for an n(>2)-generation system");
459 return incremental_collection_failed() ||
460 (consult_young && !_young_gen->collection_attempt_is_safe());
461 }
462
463 // If a generation bails out of an incremental collection,
464 // it sets this flag.
465 bool incremental_collection_failed() const {
466 return _incremental_collection_failed;
467 }
468 void set_incremental_collection_failed() {
469 _incremental_collection_failed = true;
470 }
471 void clear_incremental_collection_failed() {
472 _incremental_collection_failed = false;
473 }
474
475 // Promotion of obj into gen failed. Try to promote obj to higher
476 // gens in ascending order; return the new location of obj if successful.
477 // Otherwise, try expand-and-allocate for obj in both the young and old
478 // generation; return the new location of obj if successful. Otherwise, return NULL.
479 oop handle_failed_promotion(Generation* old_gen,
480 oop obj,
481 size_t obj_size);
482
483 private:
484 // Accessor for memory state verification support
485 NOT_PRODUCT(
486 static size_t skip_header_HeapWords() { return _skip_header_HeapWords; }
487 )
488
489 // Override
490 void check_for_non_bad_heap_word_value(HeapWord* addr,
491 size_t size) PRODUCT_RETURN;
492
493 // For use by mark-sweep. As implemented, mark-sweep-compact is global
494 // in an essential way: compaction is performed across generations, by
495 // iterating over spaces.
496 void prepare_for_compaction();
497
498 // Perform a full collection of the first max_level+1 generations.
499 // This is the low level interface used by the public versions of
500 // collect() and collect_locked(). Caller holds the Heap_lock on entry.
501 void collect_locked(GCCause::Cause cause, int max_level);
502
503 // Returns success or failure.
504 bool create_cms_collector();
505
506 // In support of ExplicitGCInvokesConcurrent functionality
507 bool should_do_concurrent_full_gc(GCCause::Cause cause);
508 void collect_mostly_concurrent(GCCause::Cause cause);
509
510 // Save the tops of the spaces in all generations
511 void record_gen_tops_before_GC() PRODUCT_RETURN;
512
513 protected:
514 void gc_prologue(bool full);
515 void gc_epilogue(bool full);
516 };
517
518 #endif // SHARE_VM_GC_SHARED_GENCOLLECTEDHEAP_HPP
|
77 SubTasksDone* _process_strong_tasks;
78
79 // Collects the given generation.
80 void collect_generation(Generation* gen, bool full, size_t size, bool is_tlab,
81 bool run_verification, bool clear_soft_refs,
82 bool restore_marks_for_biased_locking);
83
84 // In block contents verification, the number of header words to skip
85 NOT_PRODUCT(static size_t _skip_header_HeapWords;)
86
87 FlexibleWorkGang* _workers;
88
89 protected:
90 // Helper functions for allocation
91 HeapWord* attempt_allocation(size_t size,
92 bool is_tlab,
93 bool first_only);
94
95 // Helper function for two callbacks below.
96 // Considers collection of the first max_level+1 generations.
97 void do_collection(bool full,
98 bool clear_all_soft_refs,
99 size_t size,
100 bool is_tlab,
101 Generation::Type max_generation);
102
103 // Callback from VM_GenCollectForAllocation operation.
104 // This function does everything necessary/possible to satisfy an
105 // allocation request that failed in the youngest generation that should
106 // have handled it (including collection, expansion, etc.)
107 HeapWord* satisfy_failed_allocation(size_t size, bool is_tlab);
108
109 // Callback from VM_GenCollectFull operation.
110 // Perform a full collection of the first max_level+1 generations.
111 virtual void do_full_collection(bool clear_all_soft_refs);
112 void do_full_collection(bool clear_all_soft_refs, Generation::Type max_generation);
113
114 // Does the "cause" of GC indicate that
115 // we absolutely __must__ clear soft refs?
116 bool must_clear_all_soft_refs();
117
118 public:
119 GenCollectedHeap(GenCollectorPolicy *policy);
120
121 FlexibleWorkGang* workers() const { return _workers; }
122
123 GCStats* gc_stats(Generation* generation) const;
124
125 // Returns JNI_OK on success
126 virtual jint initialize();
127
128 // Reserve aligned space for the heap as needed by the contained generations.
129 char* allocate(size_t alignment, ReservedSpace* heap_rs);
130
131 // Does operations required after initialization has been done.
132 void post_initialize();
133
134 // Initialize ("weak") refs processing support
135 virtual void ref_processing_init();
136
137 virtual Name kind() const {
138 return CollectedHeap::GenCollectedHeap;
139 }
140
141 Generation* young_gen() const { return _young_gen; }
142 Generation* old_gen() const { return _old_gen; }
143
144 // The generational collector policy.
145 GenCollectorPolicy* gen_policy() const { return _gen_policy; }
146
147 virtual CollectorPolicy* collector_policy() const { return (CollectorPolicy*) gen_policy(); }
148
149 // Adaptive size policy
150 virtual AdaptiveSizePolicy* size_policy() {
151 return gen_policy()->size_policy();
152 }
153
154 // Return the (conservative) maximum heap alignment
155 static size_t conservative_max_heap_alignment() {
156 return Generation::GenGrain;
157 }
158
159 size_t capacity() const;
160 size_t used() const;
161
162 // Save the "used_region" for both generations.
163 void save_used_regions();
164
165 size_t max_capacity() const;
166
167 HeapWord* mem_allocate(size_t size,
168 bool* gc_overhead_limit_was_exceeded);
169
170 // We may support a shared contiguous allocation area, if the youngest
171 // generation does.
172 bool supports_inline_contig_alloc() const;
173 HeapWord** top_addr() const;
174 HeapWord** end_addr() const;
175
176 // Perform a full collection of the heap; intended for use in implementing
177 // "System.gc". This implies as full a collection as the CollectedHeap
178 // supports. Caller does not hold the Heap_lock on entry.
179 void collect(GCCause::Cause cause);
180
181 // The same as above but assume that the caller holds the Heap_lock.
182 void collect_locked(GCCause::Cause cause);
183
184 // Perform a full collection of generations up to and including max_generation.
185 // Mostly used for testing purposes. Caller does not hold the Heap_lock on entry.
186 void collect(GCCause::Cause cause, Generation::Type max_generation);
187
188 // Returns "TRUE" iff "p" points into the committed areas of the heap.
189 // The methods is_in(), is_in_closed_subset() and is_in_youngest() may
190 // be expensive to compute in general, so, to prevent
191 // their inadvertent use in product jvm's, we restrict their use to
192 // assertion checking or verification only.
193 bool is_in(const void* p) const;
194
195 // override
196 bool is_in_closed_subset(const void* p) const {
197 if (UseConcMarkSweepGC) {
198 return is_in_reserved(p);
199 } else {
200 return is_in(p);
201 }
202 }
203
204 // Returns true if the reference is to an object in the reserved space
205 // for the young generation.
206 // Assumes the the young gen address range is less than that of the old gen.
296
297 // Total number of full collections completed.
298 unsigned int total_full_collections_completed() {
299 assert(_full_collections_completed <= _total_full_collections,
300 "Can't complete more collections than were started");
301 return _full_collections_completed;
302 }
303
304 // Update above counter, as appropriate, at the end of a stop-world GC cycle
305 unsigned int update_full_collections_completed();
306 // Update above counter, as appropriate, at the end of a concurrent GC cycle
307 unsigned int update_full_collections_completed(unsigned int count);
308
309 // Update "time of last gc" for all generations to "now".
310 void update_time_of_last_gc(jlong now) {
311 _young_gen->update_time_of_last_gc(now);
312 _old_gen->update_time_of_last_gc(now);
313 }
314
315 // Update the gc statistics for each generation.
316 void update_gc_stats(Generation* current_generation, bool full) {
317 _old_gen->update_gc_stats(current_generation, full);
318 }
319
320 bool no_gc_in_progress() { return !is_gc_active(); }
321
322 // Override.
323 void prepare_for_verify();
324
325 // Override.
326 void verify(bool silent, VerifyOption option);
327
328 // Override.
329 virtual void print_on(outputStream* st) const;
330 virtual void print_gc_threads_on(outputStream* st) const;
331 virtual void gc_threads_do(ThreadClosure* tc) const;
332 virtual void print_tracing_info() const;
333 virtual void print_on_error(outputStream* st) const;
334
335 // PrintGC, PrintGCDetails support
336 void print_heap_change(size_t prev_used) const;
337
348 // If "old_to_young" determines the order.
349 void generation_iterate(GenClosure* cl, bool old_to_young);
350
351 // Return "true" if all generations have reached the
352 // maximal committed limit that they can reach, without a garbage
353 // collection.
354 virtual bool is_maximal_no_gc() const;
355
356 // This function returns the "GenRemSet" object that allows us to scan
357 // generations in a fully generational heap.
358 GenRemSet* rem_set() { return _rem_set; }
359
360 // Convenience function to be used in situations where the heap type can be
361 // asserted to be this type.
362 static GenCollectedHeap* heap();
363
364 void set_par_threads(uint t);
365 void set_n_termination(uint t);
366
367 // Invoke the "do_oop" method of one of the closures "not_older_gens"
368 // or "older_gens" on root locations for the generations depending on
369 // the type. (The "older_gens" closure is used for scanning references
370 // from older generations; "not_older_gens" is used everywhere else.)
371 // If "younger_gens_as_roots" is false, younger generations are
372 // not scanned as roots; in this case, the caller must be arranging to
373 // scan the younger generations itself. (For example, a generation might
374 // explicitly mark reachable objects in younger generations, to avoid
375 // excess storage retention.)
376 // The "so" argument determines which of the roots
377 // the closure is applied to:
378 // "SO_None" does none;
379 enum ScanningOption {
380 SO_None = 0x0,
381 SO_AllCodeCache = 0x8,
382 SO_ScavengeCodeCache = 0x10
383 };
384
385 private:
386 void process_roots(bool activate_scope,
387 ScanningOption so,
388 OopClosure* strong_roots,
389 OopClosure* weak_roots,
390 CLDClosure* strong_cld_closure,
391 CLDClosure* weak_cld_closure,
392 CodeBlobClosure* code_roots);
393
394 void gen_process_roots(Generation::Type type,
395 bool younger_gens_as_roots,
396 bool activate_scope,
397 ScanningOption so,
398 OopsInGenClosure* not_older_gens,
399 OopsInGenClosure* weak_roots,
400 OopsInGenClosure* older_gens,
401 CLDClosure* cld_closure,
402 CLDClosure* weak_cld_closure,
403 CodeBlobClosure* code_closure);
404
405 public:
406 static const bool StrongAndWeakRoots = false;
407 static const bool StrongRootsOnly = true;
408
409 void gen_process_roots(Generation::Type type,
410 bool younger_gens_as_roots,
411 bool activate_scope,
412 ScanningOption so,
413 bool only_strong_roots,
414 OopsInGenClosure* not_older_gens,
415 OopsInGenClosure* older_gens,
416 CLDClosure* cld_closure);
417
418 // Apply "root_closure" to all the weak roots of the system.
419 // These include JNI weak roots, string table,
420 // and referents of reachable weak refs.
421 void gen_process_weak_roots(OopClosure* root_closure);
422
423 // Set the saved marks of generations, if that makes sense.
424 // In particular, if any generation might iterate over the oops
425 // in other generations, it should call this method.
426 void save_marks();
427
428 // Apply "cur->do_oop" or "older->do_oop" to all the oops in objects
429 // allocated since the last call to save_marks in generations at or above
430 // "level". The "cur" closure is
431 // applied to references in the generation at "level", and the "older"
432 // closure to older generations.
433 #define GCH_SINCE_SAVE_MARKS_ITERATE_DECL(OopClosureType, nv_suffix) \
434 void oop_since_save_marks_iterate(Generation::Type start_gen, \
435 OopClosureType* cur, \
436 OopClosureType* older);
437
438 ALL_SINCE_SAVE_MARKS_CLOSURES(GCH_SINCE_SAVE_MARKS_ITERATE_DECL)
439
440 #undef GCH_SINCE_SAVE_MARKS_ITERATE_DECL
441
442 // Returns "true" iff no allocations have occurred since the last
443 // call to "save_marks".
444 bool no_allocs_since_save_marks(bool include_young);
445
446 // Returns true if an incremental collection is likely to fail.
447 // We optionally consult the young gen, if asked to do so;
448 // otherwise we base our answer on whether the previous incremental
449 // collection attempt failed with no corrective action as of yet.
450 bool incremental_collection_will_fail(bool consult_young) {
451 // The first disjunct remembers if an incremental collection failed, even
452 // when we thought (second disjunct) that it would not.
453 return incremental_collection_failed() ||
454 (consult_young && !_young_gen->collection_attempt_is_safe());
455 }
456
457 // If a generation bails out of an incremental collection,
458 // it sets this flag.
459 bool incremental_collection_failed() const {
460 return _incremental_collection_failed;
461 }
462 void set_incremental_collection_failed() {
463 _incremental_collection_failed = true;
464 }
465 void clear_incremental_collection_failed() {
466 _incremental_collection_failed = false;
467 }
468
469 // Promotion of obj into gen failed. Try to promote obj to higher
470 // gens in ascending order; return the new location of obj if successful.
471 // Otherwise, try expand-and-allocate for obj in both the young and old
472 // generation; return the new location of obj if successful. Otherwise, return NULL.
473 oop handle_failed_promotion(Generation* old_gen,
474 oop obj,
475 size_t obj_size);
476
477 private:
478 // Accessor for memory state verification support
479 NOT_PRODUCT(
480 static size_t skip_header_HeapWords() { return _skip_header_HeapWords; }
481 )
482
483 // Override
484 void check_for_non_bad_heap_word_value(HeapWord* addr,
485 size_t size) PRODUCT_RETURN;
486
487 // For use by mark-sweep. As implemented, mark-sweep-compact is global
488 // in an essential way: compaction is performed across generations, by
489 // iterating over spaces.
490 void prepare_for_compaction();
491
492 // Perform a full collection of the generations up to and including max_generation.
493 // This is the low level interface used by the public versions of
494 // collect() and collect_locked(). Caller holds the Heap_lock on entry.
495 void collect_locked(GCCause::Cause cause, Generation::Type max_generation);
496
497 // Returns success or failure.
498 bool create_cms_collector();
499
500 // In support of ExplicitGCInvokesConcurrent functionality
501 bool should_do_concurrent_full_gc(GCCause::Cause cause);
502 void collect_mostly_concurrent(GCCause::Cause cause);
503
504 // Save the tops of the spaces in all generations
505 void record_gen_tops_before_GC() PRODUCT_RETURN;
506
507 protected:
508 void gc_prologue(bool full);
509 void gc_epilogue(bool full);
510 };
511
512 #endif // SHARE_VM_GC_SHARED_GENCOLLECTEDHEAP_HPP
|