256 virtual size_t unsafe_max_tlab_alloc(Thread* thr) const;
257 virtual HeapWord* allocate_new_tlab(size_t size);
258
259 // Can a compiler initialize a new object without store barriers?
260 // This permission only extends from the creation of a new object
261 // via a TLAB up to the first subsequent safepoint.
262 virtual bool can_elide_tlab_store_barriers() const {
263 return true;
264 }
265
266 virtual bool card_mark_must_follow_store() const {
267 return UseConcMarkSweepGC;
268 }
269
270 // We don't need barriers for stores to objects in the
271 // young gen and, a fortiori, for initializing stores to
272 // objects therein. This applies to DefNew+Tenured and ParNew+CMS
273 // only and may need to be re-examined in case other
274 // kinds of collectors are implemented in the future.
275 virtual bool can_elide_initializing_store_barrier(oop new_obj) {
276 // We wanted to assert that:-
277 // assert(UseSerialGC || UseConcMarkSweepGC,
278 // "Check can_elide_initializing_store_barrier() for this collector");
279 // but unfortunately the flag UseSerialGC need not necessarily always
280 // be set when DefNew+Tenured are being used.
281 return is_in_young(new_obj);
282 }
283
284 // The "requestor" generation is performing some garbage collection
285 // action for which it would be useful to have scratch space. The
286 // requestor promises to allocate no more than "max_alloc_words" in any
287 // older generation (via promotion say.) Any blocks of space that can
288 // be provided are returned as a list of ScratchBlocks, sorted by
289 // decreasing size.
290 ScratchBlock* gather_scratch(Generation* requestor, size_t max_alloc_words);
291 // Allow each generation to reset any scratch space that it has
292 // contributed as it needs.
293 void release_scratch();
294
295 // Ensure parsability: override
296 virtual void ensure_parsability(bool retire_tlabs);
297
298 // Time in ms since the longest time a collector ran in
299 // in any generation.
300 virtual jlong millis_since_last_gc();
|
256 virtual size_t unsafe_max_tlab_alloc(Thread* thr) const;
257 virtual HeapWord* allocate_new_tlab(size_t size);
258
259 // Can a compiler initialize a new object without store barriers?
260 // This permission only extends from the creation of a new object
261 // via a TLAB up to the first subsequent safepoint.
262 virtual bool can_elide_tlab_store_barriers() const {
263 return true;
264 }
265
266 virtual bool card_mark_must_follow_store() const {
267 return UseConcMarkSweepGC;
268 }
269
270 // We don't need barriers for stores to objects in the
271 // young gen and, a fortiori, for initializing stores to
272 // objects therein. This applies to DefNew+Tenured and ParNew+CMS
273 // only and may need to be re-examined in case other
274 // kinds of collectors are implemented in the future.
275 virtual bool can_elide_initializing_store_barrier(oop new_obj) {
276 return is_in_young(new_obj);
277 }
278
279 // The "requestor" generation is performing some garbage collection
280 // action for which it would be useful to have scratch space. The
281 // requestor promises to allocate no more than "max_alloc_words" in any
282 // older generation (via promotion say.) Any blocks of space that can
283 // be provided are returned as a list of ScratchBlocks, sorted by
284 // decreasing size.
285 ScratchBlock* gather_scratch(Generation* requestor, size_t max_alloc_words);
286 // Allow each generation to reset any scratch space that it has
287 // contributed as it needs.
288 void release_scratch();
289
290 // Ensure parsability: override
291 virtual void ensure_parsability(bool retire_tlabs);
292
293 // Time in ms since the longest time a collector ran in
294 // in any generation.
295 virtual jlong millis_since_last_gc();
|