< prev index next >

src/hotspot/share/gc/epsilon/epsilonHeap.hpp

Print this page




  86   }
  87 
  88   // Allocation
  89   HeapWord* allocate_work(size_t size);
  90   virtual HeapWord* mem_allocate(size_t size, bool* gc_overhead_limit_was_exceeded);
  91   virtual HeapWord* allocate_new_tlab(size_t min_size,
  92                                       size_t requested_size,
  93                                       size_t* actual_size);
  94 
  95   // TLAB allocation
  96   virtual bool supports_tlab_allocation()           const { return true;           }
  97   virtual size_t tlab_capacity(Thread* thr)         const { return capacity();     }
  98   virtual size_t tlab_used(Thread* thr)             const { return used();         }
  99   virtual size_t max_tlab_size()                    const { return _max_tlab_size; }
 100   virtual size_t unsafe_max_tlab_alloc(Thread* thr) const;
 101 
 102   virtual void collect(GCCause::Cause cause);
 103   virtual void do_full_collection(bool clear_all_soft_refs);
 104 
 105   // Heap walking support
 106   virtual void safe_object_iterate(ObjectClosure* cl);
 107   virtual void object_iterate(ObjectClosure* cl) {
 108     safe_object_iterate(cl);
 109   }
 110 
 111   // Object pinning support: every object is implicitly pinned
 112   virtual bool supports_object_pinning() const           { return true; }
 113   virtual oop pin_object(JavaThread* thread, oop obj)    { return obj; }
 114   virtual void unpin_object(JavaThread* thread, oop obj) { }
 115 
 116   // No support for block parsing.
 117   HeapWord* block_start(const void* addr) const { return NULL;  }
 118   bool block_is_obj(const HeapWord* addr) const { return false; }
 119 
 120   // No GC threads
 121   virtual void print_gc_threads_on(outputStream* st) const {}
 122   virtual void gc_threads_do(ThreadClosure* tc) const {}
 123 
 124   // No nmethod handling
 125   virtual void register_nmethod(nmethod* nm) {}
 126   virtual void unregister_nmethod(nmethod* nm) {}
 127   virtual void flush_nmethod(nmethod* nm) {}
 128   virtual void verify_nmethod(nmethod* nm) {}
 129 




  86   }
  87 
  88   // Allocation
  89   HeapWord* allocate_work(size_t size);
  90   virtual HeapWord* mem_allocate(size_t size, bool* gc_overhead_limit_was_exceeded);
  91   virtual HeapWord* allocate_new_tlab(size_t min_size,
  92                                       size_t requested_size,
  93                                       size_t* actual_size);
  94 
  95   // TLAB allocation
  96   virtual bool supports_tlab_allocation()           const { return true;           }
  97   virtual size_t tlab_capacity(Thread* thr)         const { return capacity();     }
  98   virtual size_t tlab_used(Thread* thr)             const { return used();         }
  99   virtual size_t max_tlab_size()                    const { return _max_tlab_size; }
 100   virtual size_t unsafe_max_tlab_alloc(Thread* thr) const;
 101 
 102   virtual void collect(GCCause::Cause cause);
 103   virtual void do_full_collection(bool clear_all_soft_refs);
 104 
 105   // Heap walking support
 106   virtual void object_iterate(ObjectClosure* cl);



 107 
 108   // Object pinning support: every object is implicitly pinned
 109   virtual bool supports_object_pinning() const           { return true; }
 110   virtual oop pin_object(JavaThread* thread, oop obj)    { return obj; }
 111   virtual void unpin_object(JavaThread* thread, oop obj) { }
 112 
 113   // No support for block parsing.
 114   HeapWord* block_start(const void* addr) const { return NULL;  }
 115   bool block_is_obj(const HeapWord* addr) const { return false; }
 116 
 117   // No GC threads
 118   virtual void print_gc_threads_on(outputStream* st) const {}
 119   virtual void gc_threads_do(ThreadClosure* tc) const {}
 120 
 121   // No nmethod handling
 122   virtual void register_nmethod(nmethod* nm) {}
 123   virtual void unregister_nmethod(nmethod* nm) {}
 124   virtual void flush_nmethod(nmethod* nm) {}
 125   virtual void verify_nmethod(nmethod* nm) {}
 126 


< prev index next >