< prev index next >

src/share/vm/gc/shared/collectedHeap.hpp

Print this page
rev 13280 : imported patch CollectedHeap_register_nmethod
rev 13281 : [mq]: CollectedHeap_register_nmethod_v2


 376   // The argument "retire_tlabs" controls whether existing TLABs
 377   // are merely filled or also retired, thus preventing further
 378   // allocation from them and necessitating allocation of new TLABs.
 379   virtual void ensure_parsability(bool retire_tlabs);
 380 
 381   // Section on thread-local allocation buffers (TLABs)
 382   // If the heap supports thread-local allocation buffers, it should override
 383   // the following methods:
 384   // Returns "true" iff the heap supports thread-local allocation buffers.
 385   // The default is "no".
 386   virtual bool supports_tlab_allocation() const = 0;
 387 
 388   // The amount of space available for thread-local allocation buffers.
 389   virtual size_t tlab_capacity(Thread *thr) const = 0;
 390 
 391   // The amount of used space for thread-local allocation buffers for the given thread.
 392   virtual size_t tlab_used(Thread *thr) const = 0;
 393 
 394   virtual size_t max_tlab_size() const;
 395 
 396   virtual void verify_nmethod_roots(nmethod* nmethod);
 397 
 398   // An estimate of the maximum allocation that could be performed
 399   // for thread-local allocation buffers without triggering any
 400   // collection or expansion activity.
 401   virtual size_t unsafe_max_tlab_alloc(Thread *thr) const {
 402     guarantee(false, "thread-local allocation buffers not supported");
 403     return 0;
 404   }
 405 
 406   // Can a compiler initialize a new object without store barriers?
 407   // This permission only extends from the creation of a new object
 408   // via a TLAB up to the first subsequent safepoint. If such permission
 409   // is granted for this heap type, the compiler promises to call
 410   // defer_store_barrier() below on any slow path allocation of
 411   // a new object for which such initializing store barriers will
 412   // have been elided.
 413   virtual bool can_elide_tlab_store_barriers() const = 0;
 414 
 415   // If a compiler is eliding store barriers for TLAB-allocated objects,
 416   // there is probably a corresponding slow path which can produce
 417   // an object allocated anywhere.  The compiler's runtime support


 556   // used by this heap.
 557   virtual void print_gc_threads_on(outputStream* st) const = 0;
 558   // The default behavior is to call print_gc_threads_on() on tty.
 559   void print_gc_threads() {
 560     print_gc_threads_on(tty);
 561   }
 562   // Iterator for all GC threads (other than VM thread)
 563   virtual void gc_threads_do(ThreadClosure* tc) const = 0;
 564 
 565   // Print any relevant tracing info that flags imply.
 566   // Default implementation does nothing.
 567   virtual void print_tracing_info() const = 0;
 568 
 569   void print_heap_before_gc();
 570   void print_heap_after_gc();
 571 
 572   // Registering and unregistering an nmethod (compiled code) with the heap.
 573   // Override with specific mechanism for each specialized heap type.
 574   virtual void register_nmethod(nmethod* nm);
 575   virtual void unregister_nmethod(nmethod* nm);

 576 
 577   void trace_heap_before_gc(const GCTracer* gc_tracer);
 578   void trace_heap_after_gc(const GCTracer* gc_tracer);
 579 
 580   // Heap verification
 581   virtual void verify(VerifyOption option) = 0;
 582 
 583   // Return true if concurrent phase control (via
 584   // request_concurrent_phase_control) is supported by this collector.
 585   // The default implementation returns false.
 586   virtual bool supports_concurrent_phase_control() const;
 587 
 588   // Return a NULL terminated array of concurrent phase names provided
 589   // by this collector.  Supports Whitebox testing.  These are the
 590   // names recognized by request_concurrent_phase(). The default
 591   // implementation returns an array of one NULL element.
 592   virtual const char* const* concurrent_phases() const;
 593 
 594   // Request the collector enter the indicated concurrent phase, and
 595   // wait until it does so.  Supports WhiteBox testing.  Only one




 376   // The argument "retire_tlabs" controls whether existing TLABs
 377   // are merely filled or also retired, thus preventing further
 378   // allocation from them and necessitating allocation of new TLABs.
 379   virtual void ensure_parsability(bool retire_tlabs);
 380 
 381   // Section on thread-local allocation buffers (TLABs)
 382   // If the heap supports thread-local allocation buffers, it should override
 383   // the following methods:
 384   // Returns "true" iff the heap supports thread-local allocation buffers.
 385   // The default is "no".
 386   virtual bool supports_tlab_allocation() const = 0;
 387 
 388   // The amount of space available for thread-local allocation buffers.
 389   virtual size_t tlab_capacity(Thread *thr) const = 0;
 390 
 391   // The amount of used space for thread-local allocation buffers for the given thread.
 392   virtual size_t tlab_used(Thread *thr) const = 0;
 393 
 394   virtual size_t max_tlab_size() const;
 395 


 396   // An estimate of the maximum allocation that could be performed
 397   // for thread-local allocation buffers without triggering any
 398   // collection or expansion activity.
 399   virtual size_t unsafe_max_tlab_alloc(Thread *thr) const {
 400     guarantee(false, "thread-local allocation buffers not supported");
 401     return 0;
 402   }
 403 
 404   // Can a compiler initialize a new object without store barriers?
 405   // This permission only extends from the creation of a new object
 406   // via a TLAB up to the first subsequent safepoint. If such permission
 407   // is granted for this heap type, the compiler promises to call
 408   // defer_store_barrier() below on any slow path allocation of
 409   // a new object for which such initializing store barriers will
 410   // have been elided.
 411   virtual bool can_elide_tlab_store_barriers() const = 0;
 412 
 413   // If a compiler is eliding store barriers for TLAB-allocated objects,
 414   // there is probably a corresponding slow path which can produce
 415   // an object allocated anywhere.  The compiler's runtime support


 554   // used by this heap.
 555   virtual void print_gc_threads_on(outputStream* st) const = 0;
 556   // The default behavior is to call print_gc_threads_on() on tty.
 557   void print_gc_threads() {
 558     print_gc_threads_on(tty);
 559   }
 560   // Iterator for all GC threads (other than VM thread)
 561   virtual void gc_threads_do(ThreadClosure* tc) const = 0;
 562 
 563   // Print any relevant tracing info that flags imply.
 564   // Default implementation does nothing.
 565   virtual void print_tracing_info() const = 0;
 566 
 567   void print_heap_before_gc();
 568   void print_heap_after_gc();
 569 
 570   // Registering and unregistering an nmethod (compiled code) with the heap.
 571   // Override with specific mechanism for each specialized heap type.
 572   virtual void register_nmethod(nmethod* nm);
 573   virtual void unregister_nmethod(nmethod* nm);
 574   virtual void verify_nmethod_roots(nmethod* nmethod);
 575 
 576   void trace_heap_before_gc(const GCTracer* gc_tracer);
 577   void trace_heap_after_gc(const GCTracer* gc_tracer);
 578 
 579   // Heap verification
 580   virtual void verify(VerifyOption option) = 0;
 581 
 582   // Return true if concurrent phase control (via
 583   // request_concurrent_phase_control) is supported by this collector.
 584   // The default implementation returns false.
 585   virtual bool supports_concurrent_phase_control() const;
 586 
 587   // Return a NULL terminated array of concurrent phase names provided
 588   // by this collector.  Supports Whitebox testing.  These are the
 589   // names recognized by request_concurrent_phase(). The default
 590   // implementation returns an array of one NULL element.
 591   virtual const char* const* concurrent_phases() const;
 592 
 593   // Request the collector enter the indicated concurrent phase, and
 594   // wait until it does so.  Supports WhiteBox testing.  Only one


< prev index next >