143 virtual CollectorPolicy* collector_policy() const { return (CollectorPolicy*) gen_policy(); }
144
145 // Adaptive size policy
146 virtual AdaptiveSizePolicy* size_policy() {
147 return gen_policy()->size_policy();
148 }
149
150 // Return the (conservative) maximum heap alignment
151 static size_t conservative_max_heap_alignment() {
152 return Generation::GenGrain;
153 }
154
155 size_t capacity() const;
156 size_t used() const;
157
158 // Save the "used_region" for both generations.
159 void save_used_regions();
160
161 size_t max_capacity() const;
162
163 HeapWord* mem_allocate(size_t size,
164 bool* gc_overhead_limit_was_exceeded);
165
166 // We may support a shared contiguous allocation area, if the youngest
167 // generation does.
168 bool supports_inline_contig_alloc() const;
169 HeapWord** top_addr() const;
170 HeapWord** end_addr() const;
171
172 // Does this heap support heap inspection? (+PrintClassHistogram)
173 virtual bool supports_heap_inspection() const { return true; }
174
175 // Perform a full collection of the heap; intended for use in implementing
176 // "System.gc". This implies as full a collection as the CollectedHeap
177 // supports. Caller does not hold the Heap_lock on entry.
178 void collect(GCCause::Cause cause);
179
180 // The same as above but assume that the caller holds the Heap_lock.
181 void collect_locked(GCCause::Cause cause);
182
183 // Perform a full collection of generations up to and including max_gen.
184 // Mostly used for testing purposes. Caller does not hold the Heap_lock on entry.
|
143 virtual CollectorPolicy* collector_policy() const { return (CollectorPolicy*) gen_policy(); }
144
145 // Adaptive size policy
146 virtual AdaptiveSizePolicy* size_policy() {
147 return gen_policy()->size_policy();
148 }
149
150 // Return the (conservative) maximum heap alignment
151 static size_t conservative_max_heap_alignment() {
152 return Generation::GenGrain;
153 }
154
155 size_t capacity() const;
156 size_t used() const;
157
158 // Save the "used_region" for both generations.
159 void save_used_regions();
160
161 size_t max_capacity() const;
162
163 HeapWord* mem_allocate(size_t size, bool* gc_overhead_limit_was_exceeded);
164
165 // We may support a shared contiguous allocation area, if the youngest
166 // generation does.
167 bool supports_inline_contig_alloc() const;
168 HeapWord** top_addr() const;
169 HeapWord** end_addr() const;
170
171 // Does this heap support heap inspection? (+PrintClassHistogram)
172 virtual bool supports_heap_inspection() const { return true; }
173
174 // Perform a full collection of the heap; intended for use in implementing
175 // "System.gc". This implies as full a collection as the CollectedHeap
176 // supports. Caller does not hold the Heap_lock on entry.
177 void collect(GCCause::Cause cause);
178
179 // The same as above but assume that the caller holds the Heap_lock.
180 void collect_locked(GCCause::Cause cause);
181
182 // Perform a full collection of generations up to and including max_gen.
183 // Mostly used for testing purposes. Caller does not hold the Heap_lock on entry.
|