< prev index next >

src/share/vm/gc/shared/barrierSet.inline.hpp

Print this page




  56   } else {
  57     write_ref_field_work(field, new_val, release);
  58   }
  59 }
  60 
  61 // count is number of array elements being written
  62 void BarrierSet::write_ref_array(HeapWord* start, size_t count) {
  63   assert(count <= (size_t)max_intx, "count too large");
  64   HeapWord* end = (HeapWord*)((char*)start + (count*heapOopSize));
  65   // In the case of compressed oops, start and end may potentially be misaligned;
  66   // so we need to conservatively align the first downward (this is not
  67   // strictly necessary for current uses, but a case of good hygiene and,
  68   // if you will, aesthetics) and the second upward (this is essential for
  69   // current uses) to a HeapWord boundary, so we mark all cards overlapping
  70   // this write. If this evolves in the future to calling a
  71   // logging barrier of narrow oop granularity, like the pre-barrier for G1
  72   // (mentioned here merely by way of example), we will need to change this
  73   // interface, so it is "exactly precise" (if i may be allowed the adverbial
  74   // redundancy for emphasis) and does not include narrow oop slots not
  75   // included in the original write interval.
  76   HeapWord* aligned_start = align_ptr_down(start, HeapWordSize);
  77   HeapWord* aligned_end   = align_ptr_up  (end,   HeapWordSize);
  78   // If compressed oops were not being used, these should already be aligned
  79   assert(UseCompressedOops || (aligned_start == start && aligned_end == end),
  80          "Expected heap word alignment of start and end");
  81   write_ref_array_work(MemRegion(aligned_start, aligned_end));
  82 }
  83 
  84 
  85 inline void BarrierSet::write_region(MemRegion mr) {
  86   if (devirtualize_reference_writes()) {
  87     barrier_set_cast<CardTableModRefBS>(this)->inline_write_region(mr);
  88   } else {
  89     write_region_work(mr);
  90   }
  91 }
  92 
  93 #endif // SHARE_VM_GC_SHARED_BARRIERSET_INLINE_HPP


  56   } else {
  57     write_ref_field_work(field, new_val, release);
  58   }
  59 }
  60 
  61 // count is number of array elements being written
  62 void BarrierSet::write_ref_array(HeapWord* start, size_t count) {
  63   assert(count <= (size_t)max_intx, "count too large");
  64   HeapWord* end = (HeapWord*)((char*)start + (count*heapOopSize));
  65   // In the case of compressed oops, start and end may potentially be misaligned;
  66   // so we need to conservatively align the first downward (this is not
  67   // strictly necessary for current uses, but a case of good hygiene and,
  68   // if you will, aesthetics) and the second upward (this is essential for
  69   // current uses) to a HeapWord boundary, so we mark all cards overlapping
  70   // this write. If this evolves in the future to calling a
  71   // logging barrier of narrow oop granularity, like the pre-barrier for G1
  72   // (mentioned here merely by way of example), we will need to change this
  73   // interface, so it is "exactly precise" (if i may be allowed the adverbial
  74   // redundancy for emphasis) and does not include narrow oop slots not
  75   // included in the original write interval.
  76   HeapWord* aligned_start = align_down(start, HeapWordSize);
  77   HeapWord* aligned_end   = align_up  (end,   HeapWordSize);
  78   // If compressed oops were not being used, these should already be aligned
  79   assert(UseCompressedOops || (aligned_start == start && aligned_end == end),
  80          "Expected heap word alignment of start and end");
  81   write_ref_array_work(MemRegion(aligned_start, aligned_end));
  82 }
  83 
  84 
  85 inline void BarrierSet::write_region(MemRegion mr) {
  86   if (devirtualize_reference_writes()) {
  87     barrier_set_cast<CardTableModRefBS>(this)->inline_write_region(mr);
  88   } else {
  89     write_region_work(mr);
  90   }
  91 }
  92 
  93 #endif // SHARE_VM_GC_SHARED_BARRIERSET_INLINE_HPP
< prev index next >