< prev index next >

src/hotspot/share/gc/shared/barrierSet.hpp

Print this page




  98   virtual void write_ref_array_pre(oop* dst, int length,
  99                                    bool dest_uninitialized = false) {}
 100   virtual void write_ref_array_pre(narrowOop* dst, int length,
 101                                    bool dest_uninitialized = false) {}
 102   // Below count is the # array elements being written, starting
 103   // at the address "start", which may not necessarily be HeapWord-aligned
 104   inline void write_ref_array(HeapWord* start, size_t count);
 105 
 106   // Static versions, suitable for calling from generated code;
 107   // count is # array elements being written, starting with "start",
 108   // which may not necessarily be HeapWord-aligned.
 109   static void static_write_ref_array_pre(HeapWord* start, size_t count);
 110   static void static_write_ref_array_post(HeapWord* start, size_t count);
 111 
 112   // Support for optimizing compilers to call the barrier set on slow path allocations
 113   // that did not enter a TLAB. Used for e.g. ReduceInitialCardMarks.
 114   // The allocation is safe to use iff it returns true. If not, the slow-path allocation
 115   // is redone until it succeeds. This can e.g. prevent allocations from the slow path
 116   // to be in old.
 117   virtual void on_slowpath_allocation_exit(JavaThread* thread, oop new_obj) {}
 118   virtual void flush_deferred_barriers(JavaThread* thread) {}

 119   virtual void make_parsable(JavaThread* thread) {}
 120 
 121 protected:
 122   virtual void write_ref_array_work(MemRegion mr) = 0;
 123 
 124 public:
 125   // Inform the BarrierSet that the the covered heap region that starts
 126   // with "base" has been changed to have the given size (possibly from 0,
 127   // for initialization.)
 128   virtual void resize_covered_region(MemRegion new_region) = 0;
 129 
 130   // If the barrier set imposes any alignment restrictions on boundaries
 131   // within the heap, this function tells whether they are met.
 132   virtual bool is_aligned(HeapWord* addr) = 0;
 133 
 134   // Print a description of the memory for the barrier set
 135   virtual void print_on(outputStream* st) const = 0;
 136 
 137   static void set_bs(BarrierSet* bs) { _bs = bs; }
 138 




  98   virtual void write_ref_array_pre(oop* dst, int length,
  99                                    bool dest_uninitialized = false) {}
 100   virtual void write_ref_array_pre(narrowOop* dst, int length,
 101                                    bool dest_uninitialized = false) {}
 102   // Below count is the # array elements being written, starting
 103   // at the address "start", which may not necessarily be HeapWord-aligned
 104   inline void write_ref_array(HeapWord* start, size_t count);
 105 
 106   // Static versions, suitable for calling from generated code;
 107   // count is # array elements being written, starting with "start",
 108   // which may not necessarily be HeapWord-aligned.
 109   static void static_write_ref_array_pre(HeapWord* start, size_t count);
 110   static void static_write_ref_array_post(HeapWord* start, size_t count);
 111 
 112   // Support for optimizing compilers to call the barrier set on slow path allocations
 113   // that did not enter a TLAB. Used for e.g. ReduceInitialCardMarks.
 114   // The allocation is safe to use iff it returns true. If not, the slow-path allocation
 115   // is redone until it succeeds. This can e.g. prevent allocations from the slow path
 116   // to be in old.
 117   virtual void on_slowpath_allocation_exit(JavaThread* thread, oop new_obj) {}
 118   virtual void on_thread_create(JavaThread* thread) {}
 119   virtual void on_thread_destroy(JavaThread* thread) {}
 120   virtual void make_parsable(JavaThread* thread) {}
 121 
 122 protected:
 123   virtual void write_ref_array_work(MemRegion mr) = 0;
 124 
 125 public:
 126   // Inform the BarrierSet that the the covered heap region that starts
 127   // with "base" has been changed to have the given size (possibly from 0,
 128   // for initialization.)
 129   virtual void resize_covered_region(MemRegion new_region) = 0;
 130 
 131   // If the barrier set imposes any alignment restrictions on boundaries
 132   // within the heap, this function tells whether they are met.
 133   virtual bool is_aligned(HeapWord* addr) = 0;
 134 
 135   // Print a description of the memory for the barrier set
 136   virtual void print_on(outputStream* st) const = 0;
 137 
 138   static void set_bs(BarrierSet* bs) { _bs = bs; }
 139 


< prev index next >