1 /*
   2  * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #ifndef SHARE_VM_MEMORY_SPACE_HPP
  26 #define SHARE_VM_MEMORY_SPACE_HPP
  27 
  28 #include "memory/allocation.hpp"
  29 #include "memory/blockOffsetTable.hpp"
  30 #include "memory/cardTableModRefBS.hpp"
  31 #include "memory/iterator.hpp"
  32 #include "memory/memRegion.hpp"
  33 #include "memory/watermark.hpp"
  34 #include "oops/markOop.hpp"
  35 #include "runtime/mutexLocker.hpp"
  36 #include "utilities/macros.hpp"
  37 #include "utilities/workgroup.hpp"
  38 
  39 // A space is an abstraction for the "storage units" backing
  40 // up the generation abstraction. It includes specific
  41 // implementations for keeping track of free and used space,
  42 // for iterating over objects and free blocks, etc.
  43 
  44 // Here's the Space hierarchy:
  45 //
  46 // - Space               -- an abstract base class describing a heap area
  47 //   - CompactibleSpace  -- a space supporting compaction
  48 //     - CompactibleFreeListSpace -- (used for CMS generation)
  49 //     - ContiguousSpace -- a compactible space in which all free space
  50 //                          is contiguous
  51 //       - EdenSpace     -- contiguous space used as nursery
  52 //         - ConcEdenSpace -- contiguous space with a 'soft end safe' allocation
  53 //       - OffsetTableContigSpace -- contiguous space with a block offset array
  54 //                          that allows "fast" block_start calls
  55 //         - TenuredSpace -- (used for TenuredGeneration)
  56 
  57 // Forward decls.
  58 class Space;
  59 class BlockOffsetArray;
  60 class BlockOffsetArrayContigSpace;
  61 class Generation;
  62 class CompactibleSpace;
  63 class BlockOffsetTable;
  64 class GenRemSet;
  65 class CardTableRS;
  66 class DirtyCardToOopClosure;
  67 
  68 // A Space describes a heap area. Class Space is an abstract
  69 // base class.
  70 //
  71 // Space supports allocation, size computation and GC support is provided.
  72 //
  73 // Invariant: bottom() and end() are on page_size boundaries and
  74 // bottom() <= top() <= end()
  75 // top() is inclusive and end() is exclusive.
  76 
  77 class Space: public CHeapObj<mtGC> {
  78   friend class VMStructs;
  79  protected:
  80   HeapWord* _bottom;
  81   HeapWord* _end;
  82 
  83   // Used in support of save_marks()
  84   HeapWord* _saved_mark_word;
  85 
  86   MemRegionClosure* _preconsumptionDirtyCardClosure;
  87 
  88   // A sequential tasks done structure. This supports
  89   // parallel GC, where we have threads dynamically
  90   // claiming sub-tasks from a larger parallel task.
  91   SequentialSubTasksDone _par_seq_tasks;
  92 
  93   Space():
  94     _bottom(NULL), _end(NULL), _preconsumptionDirtyCardClosure(NULL) { }
  95 
  96  public:
  97   // Accessors
  98   HeapWord* bottom() const         { return _bottom; }
  99   HeapWord* end() const            { return _end;    }
 100   virtual void set_bottom(HeapWord* value) { _bottom = value; }
 101   virtual void set_end(HeapWord* value)    { _end = value; }
 102 
 103   virtual HeapWord* saved_mark_word() const  { return _saved_mark_word; }
 104 
 105   void set_saved_mark_word(HeapWord* p) { _saved_mark_word = p; }
 106 
 107   // Returns true if this object has been allocated since a
 108   // generation's "save_marks" call.
 109   virtual bool obj_allocated_since_save_marks(const oop obj) const {
 110     return (HeapWord*)obj >= saved_mark_word();
 111   }
 112 
 113   MemRegionClosure* preconsumptionDirtyCardClosure() const {
 114     return _preconsumptionDirtyCardClosure;
 115   }
 116   void setPreconsumptionDirtyCardClosure(MemRegionClosure* cl) {
 117     _preconsumptionDirtyCardClosure = cl;
 118   }
 119 
 120   // Returns a subregion of the space containing only the allocated objects in
 121   // the space.
 122   virtual MemRegion used_region() const = 0;
 123 
 124   // Returns a region that is guaranteed to contain (at least) all objects
 125   // allocated at the time of the last call to "save_marks".  If the space
 126   // initializes its DirtyCardToOopClosure's specifying the "contig" option
 127   // (that is, if the space is contiguous), then this region must contain only
 128   // such objects: the memregion will be from the bottom of the region to the
 129   // saved mark.  Otherwise, the "obj_allocated_since_save_marks" method of
 130   // the space must distinguish between objects in the region allocated before
 131   // and after the call to save marks.
 132   MemRegion used_region_at_save_marks() const {
 133     return MemRegion(bottom(), saved_mark_word());
 134   }
 135 
 136   // Initialization.
 137   // "initialize" should be called once on a space, before it is used for
 138   // any purpose.  The "mr" arguments gives the bounds of the space, and
 139   // the "clear_space" argument should be true unless the memory in "mr" is
 140   // known to be zeroed.
 141   virtual void initialize(MemRegion mr, bool clear_space, bool mangle_space);
 142 
 143   // The "clear" method must be called on a region that may have
 144   // had allocation performed in it, but is now to be considered empty.
 145   virtual void clear(bool mangle_space);
 146 
 147   // For detecting GC bugs.  Should only be called at GC boundaries, since
 148   // some unused space may be used as scratch space during GC's.
 149   // Default implementation does nothing. We also call this when expanding
 150   // a space to satisfy an allocation request. See bug #4668531
 151   virtual void mangle_unused_area() {}
 152   virtual void mangle_unused_area_complete() {}
 153   virtual void mangle_region(MemRegion mr) {}
 154 
 155   // Testers
 156   bool is_empty() const              { return used() == 0; }
 157   bool not_empty() const             { return used() > 0; }
 158 
 159   // Returns true iff the given the space contains the
 160   // given address as part of an allocated object. For
 161   // certain kinds of spaces, this might be a potentially
 162   // expensive operation. To prevent performance problems
 163   // on account of its inadvertent use in product jvm's,
 164   // we restrict its use to assertion checks only.
 165   bool is_in(const void* p) const {
 166     return used_region().contains(p);
 167   }
 168 
 169   // Returns true iff the given reserved memory of the space contains the
 170   // given address.
 171   bool is_in_reserved(const void* p) const { return _bottom <= p && p < _end; }
 172 
 173   // Returns true iff the given block is not allocated.
 174   virtual bool is_free_block(const HeapWord* p) const = 0;
 175 
 176   // Test whether p is double-aligned
 177   static bool is_aligned(void* p) {
 178     return ((intptr_t)p & (sizeof(double)-1)) == 0;
 179   }
 180 
 181   // Size computations.  Sizes are in bytes.
 182   size_t capacity()     const { return byte_size(bottom(), end()); }
 183   virtual size_t used() const = 0;
 184   virtual size_t free() const = 0;
 185 
 186   // Iterate over all the ref-containing fields of all objects in the
 187   // space, calling "cl.do_oop" on each.  Fields in objects allocated by
 188   // applications of the closure are not included in the iteration.
 189   virtual void oop_iterate(ExtendedOopClosure* cl);
 190 
 191   // Iterate over all objects in the space, calling "cl.do_object" on
 192   // each.  Objects allocated by applications of the closure are not
 193   // included in the iteration.
 194   virtual void object_iterate(ObjectClosure* blk) = 0;
 195   // Similar to object_iterate() except only iterates over
 196   // objects whose internal references point to objects in the space.
 197   virtual void safe_object_iterate(ObjectClosure* blk) = 0;
 198 
 199   // Create and return a new dirty card to oop closure. Can be
 200   // overridden to return the appropriate type of closure
 201   // depending on the type of space in which the closure will
 202   // operate. ResourceArea allocated.
 203   virtual DirtyCardToOopClosure* new_dcto_cl(ExtendedOopClosure* cl,
 204                                              CardTableModRefBS::PrecisionStyle precision,
 205                                              HeapWord* boundary = NULL);
 206 
 207   // If "p" is in the space, returns the address of the start of the
 208   // "block" that contains "p".  We say "block" instead of "object" since
 209   // some heaps may not pack objects densely; a chunk may either be an
 210   // object or a non-object.  If "p" is not in the space, return NULL.
 211   virtual HeapWord* block_start_const(const void* p) const = 0;
 212 
 213   // The non-const version may have benevolent side effects on the data
 214   // structure supporting these calls, possibly speeding up future calls.
 215   // The default implementation, however, is simply to call the const
 216   // version.
 217   inline virtual HeapWord* block_start(const void* p);
 218 
 219   // Requires "addr" to be the start of a chunk, and returns its size.
 220   // "addr + size" is required to be the start of a new chunk, or the end
 221   // of the active area of the heap.
 222   virtual size_t block_size(const HeapWord* addr) const = 0;
 223 
 224   // Requires "addr" to be the start of a block, and returns "TRUE" iff
 225   // the block is an object.
 226   virtual bool block_is_obj(const HeapWord* addr) const = 0;
 227 
 228   // Requires "addr" to be the start of a block, and returns "TRUE" iff
 229   // the block is an object and the object is alive.
 230   virtual bool obj_is_alive(const HeapWord* addr) const;
 231 
 232   // Allocation (return NULL if full).  Assumes the caller has established
 233   // mutually exclusive access to the space.
 234   virtual HeapWord* allocate(size_t word_size) = 0;
 235 
 236   // Allocation (return NULL if full).  Enforces mutual exclusion internally.
 237   virtual HeapWord* par_allocate(size_t word_size) = 0;
 238 
 239   // Mark-sweep-compact support: all spaces can update pointers to objects
 240   // moving as a part of compaction.
 241   virtual void adjust_pointers();
 242 
 243   // PrintHeapAtGC support
 244   virtual void print() const;
 245   virtual void print_on(outputStream* st) const;
 246   virtual void print_short() const;
 247   virtual void print_short_on(outputStream* st) const;
 248 
 249 
 250   // Accessor for parallel sequential tasks.
 251   SequentialSubTasksDone* par_seq_tasks() { return &_par_seq_tasks; }
 252 
 253   // IF "this" is a ContiguousSpace, return it, else return NULL.
 254   virtual ContiguousSpace* toContiguousSpace() {
 255     return NULL;
 256   }
 257 
 258   // Debugging
 259   virtual void verify() const = 0;
 260 };
 261 
 262 // A MemRegionClosure (ResourceObj) whose "do_MemRegion" function applies an
 263 // OopClosure to (the addresses of) all the ref-containing fields that could
 264 // be modified by virtue of the given MemRegion being dirty. (Note that
 265 // because of the imprecise nature of the write barrier, this may iterate
 266 // over oops beyond the region.)
 267 // This base type for dirty card to oop closures handles memory regions
 268 // in non-contiguous spaces with no boundaries, and should be sub-classed
 269 // to support other space types. See ContiguousDCTOC for a sub-class
 270 // that works with ContiguousSpaces.
 271 
 272 class DirtyCardToOopClosure: public MemRegionClosureRO {
 273 protected:
 274   ExtendedOopClosure* _cl;
 275   Space* _sp;
 276   CardTableModRefBS::PrecisionStyle _precision;
 277   HeapWord* _boundary;          // If non-NULL, process only non-NULL oops
 278                                 // pointing below boundary.
 279   HeapWord* _min_done;          // ObjHeadPreciseArray precision requires
 280                                 // a downwards traversal; this is the
 281                                 // lowest location already done (or,
 282                                 // alternatively, the lowest address that
 283                                 // shouldn't be done again.  NULL means infinity.)
 284   NOT_PRODUCT(HeapWord* _last_bottom;)
 285   NOT_PRODUCT(HeapWord* _last_explicit_min_done;)
 286 
 287   // Get the actual top of the area on which the closure will
 288   // operate, given where the top is assumed to be (the end of the
 289   // memory region passed to do_MemRegion) and where the object
 290   // at the top is assumed to start. For example, an object may
 291   // start at the top but actually extend past the assumed top,
 292   // in which case the top becomes the end of the object.
 293   virtual HeapWord* get_actual_top(HeapWord* top, HeapWord* top_obj);
 294 
 295   // Walk the given memory region from bottom to (actual) top
 296   // looking for objects and applying the oop closure (_cl) to
 297   // them. The base implementation of this treats the area as
 298   // blocks, where a block may or may not be an object. Sub-
 299   // classes should override this to provide more accurate
 300   // or possibly more efficient walking.
 301   virtual void walk_mem_region(MemRegion mr, HeapWord* bottom, HeapWord* top);
 302 
 303 public:
 304   DirtyCardToOopClosure(Space* sp, ExtendedOopClosure* cl,
 305                         CardTableModRefBS::PrecisionStyle precision,
 306                         HeapWord* boundary) :
 307     _sp(sp), _cl(cl), _precision(precision), _boundary(boundary),
 308     _min_done(NULL) {
 309     NOT_PRODUCT(_last_bottom = NULL);
 310     NOT_PRODUCT(_last_explicit_min_done = NULL);
 311   }
 312 
 313   void do_MemRegion(MemRegion mr);
 314 
 315   void set_min_done(HeapWord* min_done) {
 316     _min_done = min_done;
 317     NOT_PRODUCT(_last_explicit_min_done = _min_done);
 318   }
 319 #ifndef PRODUCT
 320   void set_last_bottom(HeapWord* last_bottom) {
 321     _last_bottom = last_bottom;
 322   }
 323 #endif
 324 };
 325 
 326 // A structure to represent a point at which objects are being copied
 327 // during compaction.
 328 class CompactPoint : public StackObj {
 329 public:
 330   Generation* gen;
 331   CompactibleSpace* space;
 332   HeapWord* threshold;
 333 
 334   CompactPoint(Generation* g = NULL) :
 335     gen(g), space(NULL), threshold(0) {}
 336 };
 337 
 338 // A space that supports compaction operations.  This is usually, but not
 339 // necessarily, a space that is normally contiguous.  But, for example, a
 340 // free-list-based space whose normal collection is a mark-sweep without
 341 // compaction could still support compaction in full GC's.
 342 
 343 class CompactibleSpace: public Space {
 344   friend class VMStructs;
 345   friend class CompactibleFreeListSpace;
 346 private:
 347   HeapWord* _compaction_top;
 348   CompactibleSpace* _next_compaction_space;
 349 
 350 public:
 351   CompactibleSpace() :
 352    _compaction_top(NULL), _next_compaction_space(NULL) {}
 353 
 354   virtual void initialize(MemRegion mr, bool clear_space, bool mangle_space);
 355   virtual void clear(bool mangle_space);
 356 
 357   // Used temporarily during a compaction phase to hold the value
 358   // top should have when compaction is complete.
 359   HeapWord* compaction_top() const { return _compaction_top;    }
 360 
 361   void set_compaction_top(HeapWord* value) {
 362     assert(value == NULL || (value >= bottom() && value <= end()),
 363       "should point inside space");
 364     _compaction_top = value;
 365   }
 366 
 367   // Perform operations on the space needed after a compaction
 368   // has been performed.
 369   virtual void reset_after_compaction() = 0;
 370 
 371   // Returns the next space (in the current generation) to be compacted in
 372   // the global compaction order.  Also is used to select the next
 373   // space into which to compact.
 374 
 375   virtual CompactibleSpace* next_compaction_space() const {
 376     return _next_compaction_space;
 377   }
 378 
 379   void set_next_compaction_space(CompactibleSpace* csp) {
 380     _next_compaction_space = csp;
 381   }
 382 
 383   // MarkSweep support phase2
 384 
 385   // Start the process of compaction of the current space: compute
 386   // post-compaction addresses, and insert forwarding pointers.  The fields
 387   // "cp->gen" and "cp->compaction_space" are the generation and space into
 388   // which we are currently compacting.  This call updates "cp" as necessary,
 389   // and leaves the "compaction_top" of the final value of
 390   // "cp->compaction_space" up-to-date.  Offset tables may be updated in
 391   // this phase as if the final copy had occurred; if so, "cp->threshold"
 392   // indicates when the next such action should be taken.
 393   virtual void prepare_for_compaction(CompactPoint* cp);
 394   // MarkSweep support phase3
 395   virtual void adjust_pointers();
 396   // MarkSweep support phase4
 397   virtual void compact();
 398 
 399   // The maximum percentage of objects that can be dead in the compacted
 400   // live part of a compacted space ("deadwood" support.)
 401   virtual size_t allowed_dead_ratio() const { return 0; };
 402 
 403   // Some contiguous spaces may maintain some data structures that should
 404   // be updated whenever an allocation crosses a boundary.  This function
 405   // returns the first such boundary.
 406   // (The default implementation returns the end of the space, so the
 407   // boundary is never crossed.)
 408   virtual HeapWord* initialize_threshold() { return end(); }
 409 
 410   // "q" is an object of the given "size" that should be forwarded;
 411   // "cp" names the generation ("gen") and containing "this" (which must
 412   // also equal "cp->space").  "compact_top" is where in "this" the
 413   // next object should be forwarded to.  If there is room in "this" for
 414   // the object, insert an appropriate forwarding pointer in "q".
 415   // If not, go to the next compaction space (there must
 416   // be one, since compaction must succeed -- we go to the first space of
 417   // the previous generation if necessary, updating "cp"), reset compact_top
 418   // and then forward.  In either case, returns the new value of "compact_top".
 419   // If the forwarding crosses "cp->threshold", invokes the "cross_threshold"
 420   // function of the then-current compaction space, and updates "cp->threshold
 421   // accordingly".
 422   virtual HeapWord* forward(oop q, size_t size, CompactPoint* cp,
 423                     HeapWord* compact_top);
 424 
 425   // Return a size with adjustments as required of the space.
 426   virtual size_t adjust_object_size_v(size_t size) const { return size; }
 427 
 428 protected:
 429   // Used during compaction.
 430   HeapWord* _first_dead;
 431   HeapWord* _end_of_live;
 432 
 433   // Minimum size of a free block.
 434   virtual size_t minimum_free_block_size() const { return 0; }
 435 
 436   // This the function is invoked when an allocation of an object covering
 437   // "start" to "end occurs crosses the threshold; returns the next
 438   // threshold.  (The default implementation does nothing.)
 439   virtual HeapWord* cross_threshold(HeapWord* start, HeapWord* the_end) {
 440     return end();
 441   }
 442 
 443   // Requires "allowed_deadspace_words > 0", that "q" is the start of a
 444   // free block of the given "word_len", and that "q", were it an object,
 445   // would not move if forwarded.  If the size allows, fill the free
 446   // block with an object, to prevent excessive compaction.  Returns "true"
 447   // iff the free region was made deadspace, and modifies
 448   // "allowed_deadspace_words" to reflect the number of available deadspace
 449   // words remaining after this operation.
 450   bool insert_deadspace(size_t& allowed_deadspace_words, HeapWord* q,
 451                         size_t word_len);
 452 };
 453 
 454 class GenSpaceMangler;
 455 
 456 // A space in which the free area is contiguous.  It therefore supports
 457 // faster allocation, and compaction.
 458 class ContiguousSpace: public CompactibleSpace {
 459   friend class OneContigSpaceCardGeneration;
 460   friend class VMStructs;
 461  protected:
 462   HeapWord* _top;
 463   HeapWord* _concurrent_iteration_safe_limit;
 464   // A helper for mangling the unused area of the space in debug builds.
 465   GenSpaceMangler* _mangler;
 466 
 467   GenSpaceMangler* mangler() { return _mangler; }
 468 
 469   // Allocation helpers (return NULL if full).
 470   inline HeapWord* allocate_impl(size_t word_size, HeapWord* end_value);
 471   inline HeapWord* par_allocate_impl(size_t word_size, HeapWord* end_value);
 472 
 473  public:
 474   ContiguousSpace();
 475   ~ContiguousSpace();
 476 
 477   virtual void initialize(MemRegion mr, bool clear_space, bool mangle_space);
 478   virtual void clear(bool mangle_space);
 479 
 480   // Accessors
 481   HeapWord* top() const            { return _top;    }
 482   void set_top(HeapWord* value)    { _top = value; }
 483 
 484   void set_saved_mark()            { _saved_mark_word = top();    }
 485   void reset_saved_mark()          { _saved_mark_word = bottom(); }
 486 
 487   WaterMark bottom_mark()     { return WaterMark(this, bottom()); }
 488   WaterMark top_mark()        { return WaterMark(this, top()); }
 489   WaterMark saved_mark()      { return WaterMark(this, saved_mark_word()); }
 490   bool saved_mark_at_top() const { return saved_mark_word() == top(); }
 491 
 492   // In debug mode mangle (write it with a particular bit
 493   // pattern) the unused part of a space.
 494 
 495   // Used to save the an address in a space for later use during mangling.
 496   void set_top_for_allocations(HeapWord* v) PRODUCT_RETURN;
 497   // Used to save the space's current top for later use during mangling.
 498   void set_top_for_allocations() PRODUCT_RETURN;
 499 
 500   // Mangle regions in the space from the current top up to the
 501   // previously mangled part of the space.
 502   void mangle_unused_area() PRODUCT_RETURN;
 503   // Mangle [top, end)
 504   void mangle_unused_area_complete() PRODUCT_RETURN;
 505   // Mangle the given MemRegion.
 506   void mangle_region(MemRegion mr) PRODUCT_RETURN;
 507 
 508   // Do some sparse checking on the area that should have been mangled.
 509   void check_mangled_unused_area(HeapWord* limit) PRODUCT_RETURN;
 510   // Check the complete area that should have been mangled.
 511   // This code may be NULL depending on the macro DEBUG_MANGLING.
 512   void check_mangled_unused_area_complete() PRODUCT_RETURN;
 513 
 514   // Size computations: sizes in bytes.
 515   size_t capacity() const        { return byte_size(bottom(), end()); }
 516   size_t used() const            { return byte_size(bottom(), top()); }
 517   size_t free() const            { return byte_size(top(),    end()); }
 518 
 519   virtual bool is_free_block(const HeapWord* p) const;
 520 
 521   // In a contiguous space we have a more obvious bound on what parts
 522   // contain objects.
 523   MemRegion used_region() const { return MemRegion(bottom(), top()); }
 524 
 525   // Allocation (return NULL if full)
 526   virtual HeapWord* allocate(size_t word_size);
 527   virtual HeapWord* par_allocate(size_t word_size);
 528   HeapWord* allocate_aligned(size_t word_size);
 529 
 530   // Iteration
 531   void oop_iterate(ExtendedOopClosure* cl);
 532   void object_iterate(ObjectClosure* blk);
 533   // For contiguous spaces this method will iterate safely over objects
 534   // in the space (i.e., between bottom and top) when at a safepoint.
 535   void safe_object_iterate(ObjectClosure* blk);
 536 
 537   // Iterate over as many initialized objects in the space as possible,
 538   // calling "cl.do_object_careful" on each. Return NULL if all objects
 539   // in the space (at the start of the iteration) were iterated over.
 540   // Return an address indicating the extent of the iteration in the
 541   // event that the iteration had to return because of finding an
 542   // uninitialized object in the space, or if the closure "cl"
 543   // signaled early termination.
 544   HeapWord* object_iterate_careful(ObjectClosureCareful* cl);
 545   HeapWord* concurrent_iteration_safe_limit() {
 546     assert(_concurrent_iteration_safe_limit <= top(),
 547            "_concurrent_iteration_safe_limit update missed");
 548     return _concurrent_iteration_safe_limit;
 549   }
 550   // changes the safe limit, all objects from bottom() to the new
 551   // limit should be properly initialized
 552   void set_concurrent_iteration_safe_limit(HeapWord* new_limit) {
 553     assert(new_limit <= top(), "uninitialized objects in the safe range");
 554     _concurrent_iteration_safe_limit = new_limit;
 555   }
 556 
 557 
 558 #if INCLUDE_ALL_GCS
 559   // In support of parallel oop_iterate.
 560   #define ContigSpace_PAR_OOP_ITERATE_DECL(OopClosureType, nv_suffix)  \
 561     void par_oop_iterate(MemRegion mr, OopClosureType* blk);
 562 
 563     ALL_PAR_OOP_ITERATE_CLOSURES(ContigSpace_PAR_OOP_ITERATE_DECL)
 564   #undef ContigSpace_PAR_OOP_ITERATE_DECL
 565 #endif // INCLUDE_ALL_GCS
 566 
 567   // Compaction support
 568   virtual void reset_after_compaction() {
 569     assert(compaction_top() >= bottom() && compaction_top() <= end(), "should point inside space");
 570     set_top(compaction_top());
 571     // set new iteration safe limit
 572     set_concurrent_iteration_safe_limit(compaction_top());
 573   }
 574 
 575   // Override.
 576   DirtyCardToOopClosure* new_dcto_cl(ExtendedOopClosure* cl,
 577                                      CardTableModRefBS::PrecisionStyle precision,
 578                                      HeapWord* boundary = NULL);
 579 
 580   // Apply "blk->do_oop" to the addresses of all reference fields in objects
 581   // starting with the _saved_mark_word, which was noted during a generation's
 582   // save_marks and is required to denote the head of an object.
 583   // Fields in objects allocated by applications of the closure
 584   // *are* included in the iteration.
 585   // Updates _saved_mark_word to point to just after the last object
 586   // iterated over.
 587 #define ContigSpace_OOP_SINCE_SAVE_MARKS_DECL(OopClosureType, nv_suffix)  \
 588   void oop_since_save_marks_iterate##nv_suffix(OopClosureType* blk);
 589 
 590   ALL_SINCE_SAVE_MARKS_CLOSURES(ContigSpace_OOP_SINCE_SAVE_MARKS_DECL)
 591 #undef ContigSpace_OOP_SINCE_SAVE_MARKS_DECL
 592 
 593   // Same as object_iterate, but starting from "mark", which is required
 594   // to denote the start of an object.  Objects allocated by
 595   // applications of the closure *are* included in the iteration.
 596   virtual void object_iterate_from(WaterMark mark, ObjectClosure* blk);
 597 
 598   // Very inefficient implementation.
 599   virtual HeapWord* block_start_const(const void* p) const;
 600   size_t block_size(const HeapWord* p) const;
 601   // If a block is in the allocated area, it is an object.
 602   bool block_is_obj(const HeapWord* p) const { return p < top(); }
 603 
 604   // Addresses for inlined allocation
 605   HeapWord** top_addr() { return &_top; }
 606   HeapWord** end_addr() { return &_end; }
 607 
 608   // Overrides for more efficient compaction support.
 609   void prepare_for_compaction(CompactPoint* cp);
 610 
 611   // PrintHeapAtGC support.
 612   virtual void print_on(outputStream* st) const;
 613 
 614   // Checked dynamic downcasts.
 615   virtual ContiguousSpace* toContiguousSpace() {
 616     return this;
 617   }
 618 
 619   // Debugging
 620   virtual void verify() const;
 621 
 622   // Used to increase collection frequency.  "factor" of 0 means entire
 623   // space.
 624   void allocate_temporary_filler(int factor);
 625 
 626 };
 627 
 628 
 629 // A dirty card to oop closure that does filtering.
 630 // It knows how to filter out objects that are outside of the _boundary.
 631 class Filtering_DCTOC : public DirtyCardToOopClosure {
 632 protected:
 633   // Override.
 634   void walk_mem_region(MemRegion mr,
 635                        HeapWord* bottom, HeapWord* top);
 636 
 637   // Walk the given memory region, from bottom to top, applying
 638   // the given oop closure to (possibly) all objects found. The
 639   // given oop closure may or may not be the same as the oop
 640   // closure with which this closure was created, as it may
 641   // be a filtering closure which makes use of the _boundary.
 642   // We offer two signatures, so the FilteringClosure static type is
 643   // apparent.
 644   virtual void walk_mem_region_with_cl(MemRegion mr,
 645                                        HeapWord* bottom, HeapWord* top,
 646                                        ExtendedOopClosure* cl) = 0;
 647   virtual void walk_mem_region_with_cl(MemRegion mr,
 648                                        HeapWord* bottom, HeapWord* top,
 649                                        FilteringClosure* cl) = 0;
 650 
 651 public:
 652   Filtering_DCTOC(Space* sp, ExtendedOopClosure* cl,
 653                   CardTableModRefBS::PrecisionStyle precision,
 654                   HeapWord* boundary) :
 655     DirtyCardToOopClosure(sp, cl, precision, boundary) {}
 656 };
 657 
 658 // A dirty card to oop closure for contiguous spaces
 659 // (ContiguousSpace and sub-classes).
 660 // It is a FilteringClosure, as defined above, and it knows:
 661 //
 662 // 1. That the actual top of any area in a memory region
 663 //    contained by the space is bounded by the end of the contiguous
 664 //    region of the space.
 665 // 2. That the space is really made up of objects and not just
 666 //    blocks.
 667 
 668 class ContiguousSpaceDCTOC : public Filtering_DCTOC {
 669 protected:
 670   // Overrides.
 671   HeapWord* get_actual_top(HeapWord* top, HeapWord* top_obj);
 672 
 673   virtual void walk_mem_region_with_cl(MemRegion mr,
 674                                        HeapWord* bottom, HeapWord* top,
 675                                        ExtendedOopClosure* cl);
 676   virtual void walk_mem_region_with_cl(MemRegion mr,
 677                                        HeapWord* bottom, HeapWord* top,
 678                                        FilteringClosure* cl);
 679 
 680 public:
 681   ContiguousSpaceDCTOC(ContiguousSpace* sp, ExtendedOopClosure* cl,
 682                        CardTableModRefBS::PrecisionStyle precision,
 683                        HeapWord* boundary) :
 684     Filtering_DCTOC(sp, cl, precision, boundary)
 685   {}
 686 };
 687 
 688 
 689 // Class EdenSpace describes eden-space in new generation.
 690 
 691 class DefNewGeneration;
 692 
 693 class EdenSpace : public ContiguousSpace {
 694   friend class VMStructs;
 695  private:
 696   DefNewGeneration* _gen;
 697 
 698   // _soft_end is used as a soft limit on allocation.  As soft limits are
 699   // reached, the slow-path allocation code can invoke other actions and then
 700   // adjust _soft_end up to a new soft limit or to end().
 701   HeapWord* _soft_end;
 702 
 703  public:
 704   EdenSpace(DefNewGeneration* gen) :
 705    _gen(gen), _soft_end(NULL) {}
 706 
 707   // Get/set just the 'soft' limit.
 708   HeapWord* soft_end()               { return _soft_end; }
 709   HeapWord** soft_end_addr()         { return &_soft_end; }
 710   void set_soft_end(HeapWord* value) { _soft_end = value; }
 711 
 712   // Override.
 713   void clear(bool mangle_space);
 714 
 715   // Set both the 'hard' and 'soft' limits (_end and _soft_end).
 716   void set_end(HeapWord* value) {
 717     set_soft_end(value);
 718     ContiguousSpace::set_end(value);
 719   }
 720 
 721   // Allocation (return NULL if full)
 722   HeapWord* allocate(size_t word_size);
 723   HeapWord* par_allocate(size_t word_size);
 724 };
 725 
 726 // Class ConcEdenSpace extends EdenSpace for the sake of safe
 727 // allocation while soft-end is being modified concurrently
 728 
 729 class ConcEdenSpace : public EdenSpace {
 730  public:
 731   ConcEdenSpace(DefNewGeneration* gen) : EdenSpace(gen) { }
 732 
 733   // Allocation (return NULL if full)
 734   HeapWord* par_allocate(size_t word_size);
 735 };
 736 
 737 
 738 // A ContigSpace that Supports an efficient "block_start" operation via
 739 // a BlockOffsetArray (whose BlockOffsetSharedArray may be shared with
 740 // other spaces.)  This is the abstract base class for old generation
 741 // (tenured) spaces.
 742 
 743 class OffsetTableContigSpace: public ContiguousSpace {
 744   friend class VMStructs;
 745  protected:
 746   BlockOffsetArrayContigSpace _offsets;
 747   Mutex _par_alloc_lock;
 748 
 749  public:
 750   // Constructor
 751   OffsetTableContigSpace(BlockOffsetSharedArray* sharedOffsetArray,
 752                          MemRegion mr);
 753 
 754   void set_bottom(HeapWord* value);
 755   void set_end(HeapWord* value);
 756 
 757   void clear(bool mangle_space);
 758 
 759   inline HeapWord* block_start_const(const void* p) const;
 760 
 761   // Add offset table update.
 762   virtual inline HeapWord* allocate(size_t word_size);
 763   inline HeapWord* par_allocate(size_t word_size);
 764 
 765   // MarkSweep support phase3
 766   virtual HeapWord* initialize_threshold();
 767   virtual HeapWord* cross_threshold(HeapWord* start, HeapWord* end);
 768 
 769   virtual void print_on(outputStream* st) const;
 770 
 771   // Debugging
 772   void verify() const;
 773 };
 774 
 775 
 776 // Class TenuredSpace is used by TenuredGeneration
 777 
 778 class TenuredSpace: public OffsetTableContigSpace {
 779   friend class VMStructs;
 780  protected:
 781   // Mark sweep support
 782   size_t allowed_dead_ratio() const;
 783  public:
 784   // Constructor
 785   TenuredSpace(BlockOffsetSharedArray* sharedOffsetArray,
 786                MemRegion mr) :
 787     OffsetTableContigSpace(sharedOffsetArray, mr) {}
 788 };
 789 #endif // SHARE_VM_MEMORY_SPACE_HPP