< prev index next >

src/share/vm/gc/shared/space.cpp

Print this page
rev 12906 : [mq]: gc_interface


  31 #include "gc/shared/genCollectedHeap.hpp"
  32 #include "gc/shared/genOopClosures.inline.hpp"
  33 #include "gc/shared/space.hpp"
  34 #include "gc/shared/space.inline.hpp"
  35 #include "gc/shared/spaceDecorator.hpp"
  36 #include "memory/universe.inline.hpp"
  37 #include "oops/oop.inline.hpp"
  38 #include "runtime/atomic.hpp"
  39 #include "runtime/java.hpp"
  40 #include "runtime/orderAccess.inline.hpp"
  41 #include "runtime/prefetch.inline.hpp"
  42 #include "runtime/safepoint.hpp"
  43 #include "utilities/copy.hpp"
  44 #include "utilities/globalDefinitions.hpp"
  45 #include "utilities/macros.hpp"
  46 
  47 HeapWord* DirtyCardToOopClosure::get_actual_top(HeapWord* top,
  48                                                 HeapWord* top_obj) {
  49   if (top_obj != NULL) {
  50     if (_sp->block_is_obj(top_obj)) {
  51       if (_precision == CardTableModRefBS::ObjHeadPreciseArray) {
  52         if (oop(top_obj)->is_objArray() || oop(top_obj)->is_typeArray()) {
  53           // An arrayOop is starting on the dirty card - since we do exact
  54           // store checks for objArrays we are done.
  55         } else {
  56           // Otherwise, it is possible that the object starting on the dirty
  57           // card spans the entire card, and that the store happened on a
  58           // later card.  Figure out where the object ends.
  59           // Use the block_size() method of the space over which
  60           // the iteration is being done.  That space (e.g. CMS) may have
  61           // specific requirements on object sizes which will
  62           // be reflected in the block_size() method.
  63           top = top_obj + oop(top_obj)->size();
  64         }
  65       }
  66     } else {
  67       top = top_obj;
  68     }
  69   } else {
  70     assert(top == _sp->end(), "only case where top_obj == NULL");
  71   }


 107 // or planning to scan.
 108 void DirtyCardToOopClosure::do_MemRegion(MemRegion mr) {
 109 
 110   // Some collectors need to do special things whenever their dirty
 111   // cards are processed. For instance, CMS must remember mutator updates
 112   // (i.e. dirty cards) so as to re-scan mutated objects.
 113   // Such work can be piggy-backed here on dirty card scanning, so as to make
 114   // it slightly more efficient than doing a complete non-destructive pre-scan
 115   // of the card table.
 116   MemRegionClosure* pCl = _sp->preconsumptionDirtyCardClosure();
 117   if (pCl != NULL) {
 118     pCl->do_MemRegion(mr);
 119   }
 120 
 121   HeapWord* bottom = mr.start();
 122   HeapWord* last = mr.last();
 123   HeapWord* top = mr.end();
 124   HeapWord* bottom_obj;
 125   HeapWord* top_obj;
 126 
 127   assert(_precision == CardTableModRefBS::ObjHeadPreciseArray ||
 128          _precision == CardTableModRefBS::Precise,
 129          "Only ones we deal with for now.");
 130 
 131   assert(_precision != CardTableModRefBS::ObjHeadPreciseArray ||
 132          _cl->idempotent() || _last_bottom == NULL ||
 133          top <= _last_bottom,
 134          "Not decreasing");
 135   NOT_PRODUCT(_last_bottom = mr.start());
 136 
 137   bottom_obj = _sp->block_start(bottom);
 138   top_obj    = _sp->block_start(last);
 139 
 140   assert(bottom_obj <= bottom, "just checking");
 141   assert(top_obj    <= top,    "just checking");
 142 
 143   // Given what we think is the top of the memory region and
 144   // the start of the object at the top, get the actual
 145   // value of the top.
 146   top = get_actual_top(top, top_obj);
 147 
 148   // If the previous call did some part of this region, don't redo.
 149   if (_precision == CardTableModRefBS::ObjHeadPreciseArray &&
 150       _min_done != NULL &&
 151       _min_done < top) {
 152     top = _min_done;
 153   }
 154 
 155   // Top may have been reset, and in fact may be below bottom,
 156   // e.g. the dirty card region is entirely in a now free object
 157   // -- something that could happen with a concurrent sweeper.
 158   bottom = MIN2(bottom, top);
 159   MemRegion extended_mr = MemRegion(bottom, top);
 160   assert(bottom <= top &&
 161          (_precision != CardTableModRefBS::ObjHeadPreciseArray ||
 162           _min_done == NULL ||
 163           top <= _min_done),
 164          "overlap!");
 165 
 166   // Walk the region if it is not empty; otherwise there is nothing to do.
 167   if (!extended_mr.is_empty()) {
 168     walk_mem_region(extended_mr, bottom_obj, top);
 169   }
 170 
 171   // An idempotent closure might be applied in any order, so we don't
 172   // record a _min_done for it.
 173   if (!_cl->idempotent()) {
 174     _min_done = bottom;
 175   } else {
 176     assert(_min_done == _last_explicit_min_done,
 177            "Don't update _min_done for idempotent cl");
 178   }
 179 }
 180 
 181 DirtyCardToOopClosure* Space::new_dcto_cl(ExtendedOopClosure* cl,
 182                                           CardTableModRefBS::PrecisionStyle precision,
 183                                           HeapWord* boundary,
 184                                           bool parallel) {
 185   return new DirtyCardToOopClosure(this, cl, precision, boundary);
 186 }
 187 
 188 HeapWord* ContiguousSpaceDCTOC::get_actual_top(HeapWord* top,
 189                                                HeapWord* top_obj) {
 190   if (top_obj != NULL && top_obj < (_sp->toContiguousSpace())->top()) {
 191     if (_precision == CardTableModRefBS::ObjHeadPreciseArray) {
 192       if (oop(top_obj)->is_objArray() || oop(top_obj)->is_typeArray()) {
 193         // An arrayOop is starting on the dirty card - since we do exact
 194         // store checks for objArrays we are done.
 195       } else {
 196         // Otherwise, it is possible that the object starting on the dirty
 197         // card spans the entire card, and that the store happened on a
 198         // later card.  Figure out where the object ends.
 199         assert(_sp->block_size(top_obj) == (size_t) oop(top_obj)->size(),
 200           "Block size and object size mismatch");
 201         top = top_obj + oop(top_obj)->size();
 202       }
 203     }
 204   } else {
 205     top = (_sp->toContiguousSpace())->top();
 206   }
 207   return top;
 208 }
 209 
 210 void FilteringDCTOC::walk_mem_region(MemRegion mr,
 211                                      HeapWord* bottom,


 242     while (next_obj < top) {                                            \
 243       /* Bottom lies entirely below top, so we can call the */          \
 244       /* non-memRegion version of oop_iterate below. */                 \
 245       oop(bottom)->oop_iterate(cl);                                     \
 246       bottom = next_obj;                                                \
 247       next_obj = bottom + oop(bottom)->size();                          \
 248     }                                                                   \
 249     /* Last object. */                                                  \
 250     oop(bottom)->oop_iterate(cl, mr);                                   \
 251   }                                                                     \
 252 }
 253 
 254 // (There are only two of these, rather than N, because the split is due
 255 // only to the introduction of the FilteringClosure, a local part of the
 256 // impl of this abstraction.)
 257 ContiguousSpaceDCTOC__walk_mem_region_with_cl_DEFN(ExtendedOopClosure)
 258 ContiguousSpaceDCTOC__walk_mem_region_with_cl_DEFN(FilteringClosure)
 259 
 260 DirtyCardToOopClosure*
 261 ContiguousSpace::new_dcto_cl(ExtendedOopClosure* cl,
 262                              CardTableModRefBS::PrecisionStyle precision,
 263                              HeapWord* boundary,
 264                              bool parallel) {
 265   return new ContiguousSpaceDCTOC(this, cl, precision, boundary);
 266 }
 267 
 268 void Space::initialize(MemRegion mr,
 269                        bool clear_space,
 270                        bool mangle_space) {
 271   HeapWord* bottom = mr.start();
 272   HeapWord* end    = mr.end();
 273   assert(Universe::on_page_boundary(bottom) && Universe::on_page_boundary(end),
 274          "invalid space boundaries");
 275   set_bottom(bottom);
 276   set_end(end);
 277   if (clear_space) clear(mangle_space);
 278 }
 279 
 280 void Space::clear(bool mangle_space) {
 281   if (ZapUnusedHeapArea && mangle_space) {
 282     mangle_unused_area();




  31 #include "gc/shared/genCollectedHeap.hpp"
  32 #include "gc/shared/genOopClosures.inline.hpp"
  33 #include "gc/shared/space.hpp"
  34 #include "gc/shared/space.inline.hpp"
  35 #include "gc/shared/spaceDecorator.hpp"
  36 #include "memory/universe.inline.hpp"
  37 #include "oops/oop.inline.hpp"
  38 #include "runtime/atomic.hpp"
  39 #include "runtime/java.hpp"
  40 #include "runtime/orderAccess.inline.hpp"
  41 #include "runtime/prefetch.inline.hpp"
  42 #include "runtime/safepoint.hpp"
  43 #include "utilities/copy.hpp"
  44 #include "utilities/globalDefinitions.hpp"
  45 #include "utilities/macros.hpp"
  46 
  47 HeapWord* DirtyCardToOopClosure::get_actual_top(HeapWord* top,
  48                                                 HeapWord* top_obj) {
  49   if (top_obj != NULL) {
  50     if (_sp->block_is_obj(top_obj)) {
  51       if (_precision == CardTable::ObjHeadPreciseArray) {
  52         if (oop(top_obj)->is_objArray() || oop(top_obj)->is_typeArray()) {
  53           // An arrayOop is starting on the dirty card - since we do exact
  54           // store checks for objArrays we are done.
  55         } else {
  56           // Otherwise, it is possible that the object starting on the dirty
  57           // card spans the entire card, and that the store happened on a
  58           // later card.  Figure out where the object ends.
  59           // Use the block_size() method of the space over which
  60           // the iteration is being done.  That space (e.g. CMS) may have
  61           // specific requirements on object sizes which will
  62           // be reflected in the block_size() method.
  63           top = top_obj + oop(top_obj)->size();
  64         }
  65       }
  66     } else {
  67       top = top_obj;
  68     }
  69   } else {
  70     assert(top == _sp->end(), "only case where top_obj == NULL");
  71   }


 107 // or planning to scan.
 108 void DirtyCardToOopClosure::do_MemRegion(MemRegion mr) {
 109 
 110   // Some collectors need to do special things whenever their dirty
 111   // cards are processed. For instance, CMS must remember mutator updates
 112   // (i.e. dirty cards) so as to re-scan mutated objects.
 113   // Such work can be piggy-backed here on dirty card scanning, so as to make
 114   // it slightly more efficient than doing a complete non-destructive pre-scan
 115   // of the card table.
 116   MemRegionClosure* pCl = _sp->preconsumptionDirtyCardClosure();
 117   if (pCl != NULL) {
 118     pCl->do_MemRegion(mr);
 119   }
 120 
 121   HeapWord* bottom = mr.start();
 122   HeapWord* last = mr.last();
 123   HeapWord* top = mr.end();
 124   HeapWord* bottom_obj;
 125   HeapWord* top_obj;
 126 
 127   assert(_precision == CardTable::ObjHeadPreciseArray ||
 128          _precision == CardTable::Precise,
 129          "Only ones we deal with for now.");
 130 
 131   assert(_precision != CardTable::ObjHeadPreciseArray ||
 132          _cl->idempotent() || _last_bottom == NULL ||
 133          top <= _last_bottom,
 134          "Not decreasing");
 135   NOT_PRODUCT(_last_bottom = mr.start());
 136 
 137   bottom_obj = _sp->block_start(bottom);
 138   top_obj    = _sp->block_start(last);
 139 
 140   assert(bottom_obj <= bottom, "just checking");
 141   assert(top_obj    <= top,    "just checking");
 142 
 143   // Given what we think is the top of the memory region and
 144   // the start of the object at the top, get the actual
 145   // value of the top.
 146   top = get_actual_top(top, top_obj);
 147 
 148   // If the previous call did some part of this region, don't redo.
 149   if (_precision == CardTable::ObjHeadPreciseArray &&
 150       _min_done != NULL &&
 151       _min_done < top) {
 152     top = _min_done;
 153   }
 154 
 155   // Top may have been reset, and in fact may be below bottom,
 156   // e.g. the dirty card region is entirely in a now free object
 157   // -- something that could happen with a concurrent sweeper.
 158   bottom = MIN2(bottom, top);
 159   MemRegion extended_mr = MemRegion(bottom, top);
 160   assert(bottom <= top &&
 161          (_precision != CardTable::ObjHeadPreciseArray ||
 162           _min_done == NULL ||
 163           top <= _min_done),
 164          "overlap!");
 165 
 166   // Walk the region if it is not empty; otherwise there is nothing to do.
 167   if (!extended_mr.is_empty()) {
 168     walk_mem_region(extended_mr, bottom_obj, top);
 169   }
 170 
 171   // An idempotent closure might be applied in any order, so we don't
 172   // record a _min_done for it.
 173   if (!_cl->idempotent()) {
 174     _min_done = bottom;
 175   } else {
 176     assert(_min_done == _last_explicit_min_done,
 177            "Don't update _min_done for idempotent cl");
 178   }
 179 }
 180 
 181 DirtyCardToOopClosure* Space::new_dcto_cl(ExtendedOopClosure* cl,
 182                                           CardTable::PrecisionStyle precision,
 183                                           HeapWord* boundary,
 184                                           bool parallel) {
 185   return new DirtyCardToOopClosure(this, cl, precision, boundary);
 186 }
 187 
 188 HeapWord* ContiguousSpaceDCTOC::get_actual_top(HeapWord* top,
 189                                                HeapWord* top_obj) {
 190   if (top_obj != NULL && top_obj < (_sp->toContiguousSpace())->top()) {
 191     if (_precision == CardTable::ObjHeadPreciseArray) {
 192       if (oop(top_obj)->is_objArray() || oop(top_obj)->is_typeArray()) {
 193         // An arrayOop is starting on the dirty card - since we do exact
 194         // store checks for objArrays we are done.
 195       } else {
 196         // Otherwise, it is possible that the object starting on the dirty
 197         // card spans the entire card, and that the store happened on a
 198         // later card.  Figure out where the object ends.
 199         assert(_sp->block_size(top_obj) == (size_t) oop(top_obj)->size(),
 200           "Block size and object size mismatch");
 201         top = top_obj + oop(top_obj)->size();
 202       }
 203     }
 204   } else {
 205     top = (_sp->toContiguousSpace())->top();
 206   }
 207   return top;
 208 }
 209 
 210 void FilteringDCTOC::walk_mem_region(MemRegion mr,
 211                                      HeapWord* bottom,


 242     while (next_obj < top) {                                            \
 243       /* Bottom lies entirely below top, so we can call the */          \
 244       /* non-memRegion version of oop_iterate below. */                 \
 245       oop(bottom)->oop_iterate(cl);                                     \
 246       bottom = next_obj;                                                \
 247       next_obj = bottom + oop(bottom)->size();                          \
 248     }                                                                   \
 249     /* Last object. */                                                  \
 250     oop(bottom)->oop_iterate(cl, mr);                                   \
 251   }                                                                     \
 252 }
 253 
 254 // (There are only two of these, rather than N, because the split is due
 255 // only to the introduction of the FilteringClosure, a local part of the
 256 // impl of this abstraction.)
 257 ContiguousSpaceDCTOC__walk_mem_region_with_cl_DEFN(ExtendedOopClosure)
 258 ContiguousSpaceDCTOC__walk_mem_region_with_cl_DEFN(FilteringClosure)
 259 
 260 DirtyCardToOopClosure*
 261 ContiguousSpace::new_dcto_cl(ExtendedOopClosure* cl,
 262                              CardTable::PrecisionStyle precision,
 263                              HeapWord* boundary,
 264                              bool parallel) {
 265   return new ContiguousSpaceDCTOC(this, cl, precision, boundary);
 266 }
 267 
 268 void Space::initialize(MemRegion mr,
 269                        bool clear_space,
 270                        bool mangle_space) {
 271   HeapWord* bottom = mr.start();
 272   HeapWord* end    = mr.end();
 273   assert(Universe::on_page_boundary(bottom) && Universe::on_page_boundary(end),
 274          "invalid space boundaries");
 275   set_bottom(bottom);
 276   set_end(end);
 277   if (clear_space) clear(mangle_space);
 278 }
 279 
 280 void Space::clear(bool mangle_space) {
 281   if (ZapUnusedHeapArea && mangle_space) {
 282     mangle_unused_area();


< prev index next >