1 /*
   2  * Copyright (c) 1997, 2016, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #ifndef SHARE_VM_GC_SHARED_SPACE_HPP
  26 #define SHARE_VM_GC_SHARED_SPACE_HPP
  27 
  28 #include "gc/shared/blockOffsetTable.hpp"
  29 #include "gc/shared/cardTableModRefBS.hpp"
  30 #include "gc/shared/workgroup.hpp"
  31 #include "memory/allocation.hpp"
  32 #include "memory/iterator.hpp"
  33 #include "memory/memRegion.hpp"
  34 #include "oops/markOop.hpp"
  35 #include "runtime/mutexLocker.hpp"
  36 #include "utilities/align.hpp"
  37 #include "utilities/macros.hpp"
  38 
  39 // A space is an abstraction for the "storage units" backing
  40 // up the generation abstraction. It includes specific
  41 // implementations for keeping track of free and used space,
  42 // for iterating over objects and free blocks, etc.
  43 
  44 // Forward decls.
  45 class Space;
  46 class BlockOffsetArray;
  47 class BlockOffsetArrayContigSpace;
  48 class Generation;
  49 class CompactibleSpace;
  50 class BlockOffsetTable;
  51 class CardTableRS;
  52 class DirtyCardToOopClosure;
  53 
  54 // A Space describes a heap area. Class Space is an abstract
  55 // base class.
  56 //
  57 // Space supports allocation, size computation and GC support is provided.
  58 //
  59 // Invariant: bottom() and end() are on page_size boundaries and
  60 // bottom() <= top() <= end()
  61 // top() is inclusive and end() is exclusive.
  62 
  63 class Space: public CHeapObj<mtGC> {
  64   friend class VMStructs;
  65  protected:
  66   HeapWord* _bottom;
  67   HeapWord* _end;
  68 
  69   // Used in support of save_marks()
  70   HeapWord* _saved_mark_word;
  71 
  72   // A sequential tasks done structure. This supports
  73   // parallel GC, where we have threads dynamically
  74   // claiming sub-tasks from a larger parallel task.
  75   SequentialSubTasksDone _par_seq_tasks;
  76 
  77   Space():
  78     _bottom(NULL), _end(NULL) { }
  79 
  80  public:
  81   // Accessors
  82   HeapWord* bottom() const         { return _bottom; }
  83   HeapWord* end() const            { return _end;    }
  84   virtual void set_bottom(HeapWord* value) { _bottom = value; }
  85   virtual void set_end(HeapWord* value)    { _end = value; }
  86 
  87   virtual HeapWord* saved_mark_word() const  { return _saved_mark_word; }
  88 
  89   void set_saved_mark_word(HeapWord* p) { _saved_mark_word = p; }
  90 
  91   // Returns true if this object has been allocated since a
  92   // generation's "save_marks" call.
  93   virtual bool obj_allocated_since_save_marks(const oop obj) const {
  94     return (HeapWord*)obj >= saved_mark_word();
  95   }
  96 
  97   virtual MemRegionClosure* preconsumptionDirtyCardClosure() const {
  98     return NULL;
  99   }
 100 
 101   // Returns a subregion of the space containing only the allocated objects in
 102   // the space.
 103   virtual MemRegion used_region() const = 0;
 104 
 105   // Returns a region that is guaranteed to contain (at least) all objects
 106   // allocated at the time of the last call to "save_marks".  If the space
 107   // initializes its DirtyCardToOopClosure's specifying the "contig" option
 108   // (that is, if the space is contiguous), then this region must contain only
 109   // such objects: the memregion will be from the bottom of the region to the
 110   // saved mark.  Otherwise, the "obj_allocated_since_save_marks" method of
 111   // the space must distinguish between objects in the region allocated before
 112   // and after the call to save marks.
 113   MemRegion used_region_at_save_marks() const {
 114     return MemRegion(bottom(), saved_mark_word());
 115   }
 116 
 117   // Initialization.
 118   // "initialize" should be called once on a space, before it is used for
 119   // any purpose.  The "mr" arguments gives the bounds of the space, and
 120   // the "clear_space" argument should be true unless the memory in "mr" is
 121   // known to be zeroed.
 122   virtual void initialize(MemRegion mr, bool clear_space, bool mangle_space);
 123 
 124   // The "clear" method must be called on a region that may have
 125   // had allocation performed in it, but is now to be considered empty.
 126   virtual void clear(bool mangle_space);
 127 
 128   // For detecting GC bugs.  Should only be called at GC boundaries, since
 129   // some unused space may be used as scratch space during GC's.
 130   // We also call this when expanding a space to satisfy an allocation
 131   // request. See bug #4668531
 132   virtual void mangle_unused_area() = 0;
 133   virtual void mangle_unused_area_complete() = 0;
 134 
 135   // Testers
 136   bool is_empty() const              { return used() == 0; }
 137   bool not_empty() const             { return used() > 0; }
 138 
 139   // Returns true iff the given the space contains the
 140   // given address as part of an allocated object. For
 141   // certain kinds of spaces, this might be a potentially
 142   // expensive operation. To prevent performance problems
 143   // on account of its inadvertent use in product jvm's,
 144   // we restrict its use to assertion checks only.
 145   bool is_in(const void* p) const {
 146     return used_region().contains(p);
 147   }
 148 
 149   // Returns true iff the given reserved memory of the space contains the
 150   // given address.
 151   bool is_in_reserved(const void* p) const { return _bottom <= p && p < _end; }
 152 
 153   // Returns true iff the given block is not allocated.
 154   virtual bool is_free_block(const HeapWord* p) const = 0;
 155 
 156   // Test whether p is double-aligned
 157   static bool is_aligned(void* p) {
 158     return ::is_aligned(p, sizeof(double));
 159   }
 160 
 161   // Size computations.  Sizes are in bytes.
 162   size_t capacity()     const { return byte_size(bottom(), end()); }
 163   virtual size_t used() const = 0;
 164   virtual size_t free() const = 0;
 165 
 166   // Iterate over all the ref-containing fields of all objects in the
 167   // space, calling "cl.do_oop" on each.  Fields in objects allocated by
 168   // applications of the closure are not included in the iteration.
 169   virtual void oop_iterate(ExtendedOopClosure* cl);
 170 
 171   // Iterate over all objects in the space, calling "cl.do_object" on
 172   // each.  Objects allocated by applications of the closure are not
 173   // included in the iteration.
 174   virtual void object_iterate(ObjectClosure* blk) = 0;
 175   // Similar to object_iterate() except only iterates over
 176   // objects whose internal references point to objects in the space.
 177   virtual void safe_object_iterate(ObjectClosure* blk) = 0;
 178 
 179   // Create and return a new dirty card to oop closure. Can be
 180   // overridden to return the appropriate type of closure
 181   // depending on the type of space in which the closure will
 182   // operate. ResourceArea allocated.
 183   virtual DirtyCardToOopClosure* new_dcto_cl(ExtendedOopClosure* cl,
 184                                              CardTableModRefBS::PrecisionStyle precision,
 185                                              HeapWord* boundary,
 186                                              bool parallel);
 187 
 188   // If "p" is in the space, returns the address of the start of the
 189   // "block" that contains "p".  We say "block" instead of "object" since
 190   // some heaps may not pack objects densely; a chunk may either be an
 191   // object or a non-object.  If "p" is not in the space, return NULL.
 192   virtual HeapWord* block_start_const(const void* p) const = 0;
 193 
 194   // The non-const version may have benevolent side effects on the data
 195   // structure supporting these calls, possibly speeding up future calls.
 196   // The default implementation, however, is simply to call the const
 197   // version.
 198   virtual HeapWord* block_start(const void* p);
 199 
 200   // Requires "addr" to be the start of a chunk, and returns its size.
 201   // "addr + size" is required to be the start of a new chunk, or the end
 202   // of the active area of the heap.
 203   virtual size_t block_size(const HeapWord* addr) const = 0;
 204 
 205   // Requires "addr" to be the start of a block, and returns "TRUE" iff
 206   // the block is an object.
 207   virtual bool block_is_obj(const HeapWord* addr) const = 0;
 208 
 209   // Requires "addr" to be the start of a block, and returns "TRUE" iff
 210   // the block is an object and the object is alive.
 211   virtual bool obj_is_alive(const HeapWord* addr) const;
 212 
 213   // Allocation (return NULL if full).  Assumes the caller has established
 214   // mutually exclusive access to the space.
 215   virtual HeapWord* allocate(size_t word_size) = 0;
 216 
 217   // Allocation (return NULL if full).  Enforces mutual exclusion internally.
 218   virtual HeapWord* par_allocate(size_t word_size) = 0;
 219 
 220   // Mark-sweep-compact support: all spaces can update pointers to objects
 221   // moving as a part of compaction.
 222   virtual void adjust_pointers() = 0;
 223 
 224   virtual void print() const;
 225   virtual void print_on(outputStream* st) const;
 226   virtual void print_short() const;
 227   virtual void print_short_on(outputStream* st) const;
 228 
 229 
 230   // Accessor for parallel sequential tasks.
 231   SequentialSubTasksDone* par_seq_tasks() { return &_par_seq_tasks; }
 232 
 233   // IF "this" is a ContiguousSpace, return it, else return NULL.
 234   virtual ContiguousSpace* toContiguousSpace() {
 235     return NULL;
 236   }
 237 
 238   // Debugging
 239   virtual void verify() const = 0;
 240 };
 241 
 242 // A MemRegionClosure (ResourceObj) whose "do_MemRegion" function applies an
 243 // OopClosure to (the addresses of) all the ref-containing fields that could
 244 // be modified by virtue of the given MemRegion being dirty. (Note that
 245 // because of the imprecise nature of the write barrier, this may iterate
 246 // over oops beyond the region.)
 247 // This base type for dirty card to oop closures handles memory regions
 248 // in non-contiguous spaces with no boundaries, and should be sub-classed
 249 // to support other space types. See ContiguousDCTOC for a sub-class
 250 // that works with ContiguousSpaces.
 251 
 252 class DirtyCardToOopClosure: public MemRegionClosureRO {
 253 protected:
 254   ExtendedOopClosure* _cl;
 255   Space* _sp;
 256   CardTableModRefBS::PrecisionStyle _precision;
 257   HeapWord* _boundary;          // If non-NULL, process only non-NULL oops
 258                                 // pointing below boundary.
 259   HeapWord* _min_done;          // ObjHeadPreciseArray precision requires
 260                                 // a downwards traversal; this is the
 261                                 // lowest location already done (or,
 262                                 // alternatively, the lowest address that
 263                                 // shouldn't be done again.  NULL means infinity.)
 264   NOT_PRODUCT(HeapWord* _last_bottom;)
 265   NOT_PRODUCT(HeapWord* _last_explicit_min_done;)
 266 
 267   // Get the actual top of the area on which the closure will
 268   // operate, given where the top is assumed to be (the end of the
 269   // memory region passed to do_MemRegion) and where the object
 270   // at the top is assumed to start. For example, an object may
 271   // start at the top but actually extend past the assumed top,
 272   // in which case the top becomes the end of the object.
 273   virtual HeapWord* get_actual_top(HeapWord* top, HeapWord* top_obj);
 274 
 275   // Walk the given memory region from bottom to (actual) top
 276   // looking for objects and applying the oop closure (_cl) to
 277   // them. The base implementation of this treats the area as
 278   // blocks, where a block may or may not be an object. Sub-
 279   // classes should override this to provide more accurate
 280   // or possibly more efficient walking.
 281   virtual void walk_mem_region(MemRegion mr, HeapWord* bottom, HeapWord* top);
 282 
 283 public:
 284   DirtyCardToOopClosure(Space* sp, ExtendedOopClosure* cl,
 285                         CardTableModRefBS::PrecisionStyle precision,
 286                         HeapWord* boundary) :
 287     _sp(sp), _cl(cl), _precision(precision), _boundary(boundary),
 288     _min_done(NULL) {
 289     NOT_PRODUCT(_last_bottom = NULL);
 290     NOT_PRODUCT(_last_explicit_min_done = NULL);
 291   }
 292 
 293   void do_MemRegion(MemRegion mr);
 294 
 295   void set_min_done(HeapWord* min_done) {
 296     _min_done = min_done;
 297     NOT_PRODUCT(_last_explicit_min_done = _min_done);
 298   }
 299 #ifndef PRODUCT
 300   void set_last_bottom(HeapWord* last_bottom) {
 301     _last_bottom = last_bottom;
 302   }
 303 #endif
 304 };
 305 
 306 // A structure to represent a point at which objects are being copied
 307 // during compaction.
 308 class CompactPoint : public StackObj {
 309 public:
 310   Generation* gen;
 311   CompactibleSpace* space;
 312   HeapWord* threshold;
 313 
 314   CompactPoint(Generation* g = NULL) :
 315     gen(g), space(NULL), threshold(0) {}
 316 };
 317 
 318 // A space that supports compaction operations.  This is usually, but not
 319 // necessarily, a space that is normally contiguous.  But, for example, a
 320 // free-list-based space whose normal collection is a mark-sweep without
 321 // compaction could still support compaction in full GC's.
 322 //
 323 // The compaction operations are implemented by the
 324 // scan_and_{adjust_pointers,compact,forward} function templates.
 325 // The following are, non-virtual, auxiliary functions used by these function templates:
 326 // - scan_limit()
 327 // - scanned_block_is_obj()
 328 // - scanned_block_size()
 329 // - adjust_obj_size()
 330 // - obj_size()
 331 // These functions are to be used exclusively by the scan_and_* function templates,
 332 // and must be defined for all (non-abstract) subclasses of CompactibleSpace.
 333 //
 334 // NOTE: Any subclasses to CompactibleSpace wanting to change/define the behavior
 335 // in any of the auxiliary functions must also override the corresponding
 336 // prepare_for_compaction/adjust_pointers/compact functions using them.
 337 // If not, such changes will not be used or have no effect on the compaction operations.
 338 //
 339 // This translates to the following dependencies:
 340 // Overrides/definitions of
 341 //  - scan_limit
 342 //  - scanned_block_is_obj
 343 //  - scanned_block_size
 344 // require override/definition of prepare_for_compaction().
 345 // Similar dependencies exist between
 346 //  - adjust_obj_size  and adjust_pointers()
 347 //  - obj_size         and compact().
 348 //
 349 // Additionally, this also means that changes to block_size() or block_is_obj() that
 350 // should be effective during the compaction operations must provide a corresponding
 351 // definition of scanned_block_size/scanned_block_is_obj respectively.
 352 class CompactibleSpace: public Space {
 353   friend class VMStructs;
 354   friend class CompactibleFreeListSpace;
 355 private:
 356   HeapWord* _compaction_top;
 357   CompactibleSpace* _next_compaction_space;
 358 
 359   // Auxiliary functions for scan_and_{forward,adjust_pointers,compact} support.
 360   inline size_t adjust_obj_size(size_t size) const {
 361     return size;
 362   }
 363 
 364   inline size_t obj_size(const HeapWord* addr) const;
 365 
 366   template <class SpaceType>
 367   static inline void verify_up_to_first_dead(SpaceType* space) NOT_DEBUG_RETURN;
 368 
 369   template <class SpaceType>
 370   static inline void clear_empty_region(SpaceType* space);
 371 
 372 public:
 373   CompactibleSpace() :
 374    _compaction_top(NULL), _next_compaction_space(NULL) {}
 375 
 376   virtual void initialize(MemRegion mr, bool clear_space, bool mangle_space);
 377   virtual void clear(bool mangle_space);
 378 
 379   // Used temporarily during a compaction phase to hold the value
 380   // top should have when compaction is complete.
 381   HeapWord* compaction_top() const { return _compaction_top;    }
 382 
 383   void set_compaction_top(HeapWord* value) {
 384     assert(value == NULL || (value >= bottom() && value <= end()),
 385       "should point inside space");
 386     _compaction_top = value;
 387   }
 388 
 389   // Perform operations on the space needed after a compaction
 390   // has been performed.
 391   virtual void reset_after_compaction() = 0;
 392 
 393   // Returns the next space (in the current generation) to be compacted in
 394   // the global compaction order.  Also is used to select the next
 395   // space into which to compact.
 396 
 397   virtual CompactibleSpace* next_compaction_space() const {
 398     return _next_compaction_space;
 399   }
 400 
 401   void set_next_compaction_space(CompactibleSpace* csp) {
 402     _next_compaction_space = csp;
 403   }
 404 
 405   // MarkSweep support phase2
 406 
 407   // Start the process of compaction of the current space: compute
 408   // post-compaction addresses, and insert forwarding pointers.  The fields
 409   // "cp->gen" and "cp->compaction_space" are the generation and space into
 410   // which we are currently compacting.  This call updates "cp" as necessary,
 411   // and leaves the "compaction_top" of the final value of
 412   // "cp->compaction_space" up-to-date.  Offset tables may be updated in
 413   // this phase as if the final copy had occurred; if so, "cp->threshold"
 414   // indicates when the next such action should be taken.
 415   virtual void prepare_for_compaction(CompactPoint* cp) = 0;
 416   // MarkSweep support phase3
 417   virtual void adjust_pointers();
 418   // MarkSweep support phase4
 419   virtual void compact();
 420 
 421   // The maximum percentage of objects that can be dead in the compacted
 422   // live part of a compacted space ("deadwood" support.)
 423   virtual size_t allowed_dead_ratio() const { return 0; };
 424 
 425   // Some contiguous spaces may maintain some data structures that should
 426   // be updated whenever an allocation crosses a boundary.  This function
 427   // returns the first such boundary.
 428   // (The default implementation returns the end of the space, so the
 429   // boundary is never crossed.)
 430   virtual HeapWord* initialize_threshold() { return end(); }
 431 
 432   // "q" is an object of the given "size" that should be forwarded;
 433   // "cp" names the generation ("gen") and containing "this" (which must
 434   // also equal "cp->space").  "compact_top" is where in "this" the
 435   // next object should be forwarded to.  If there is room in "this" for
 436   // the object, insert an appropriate forwarding pointer in "q".
 437   // If not, go to the next compaction space (there must
 438   // be one, since compaction must succeed -- we go to the first space of
 439   // the previous generation if necessary, updating "cp"), reset compact_top
 440   // and then forward.  In either case, returns the new value of "compact_top".
 441   // If the forwarding crosses "cp->threshold", invokes the "cross_threshold"
 442   // function of the then-current compaction space, and updates "cp->threshold
 443   // accordingly".
 444   virtual HeapWord* forward(oop q, size_t size, CompactPoint* cp,
 445                     HeapWord* compact_top);
 446 
 447   // Return a size with adjustments as required of the space.
 448   virtual size_t adjust_object_size_v(size_t size) const { return size; }
 449 
 450 protected:
 451   // Used during compaction.
 452   HeapWord* _first_dead;
 453   HeapWord* _end_of_live;
 454 
 455   // Minimum size of a free block.
 456   virtual size_t minimum_free_block_size() const { return 0; }
 457 
 458   // This the function is invoked when an allocation of an object covering
 459   // "start" to "end occurs crosses the threshold; returns the next
 460   // threshold.  (The default implementation does nothing.)
 461   virtual HeapWord* cross_threshold(HeapWord* start, HeapWord* the_end) {
 462     return end();
 463   }
 464 
 465   // Below are template functions for scan_and_* algorithms (avoiding virtual calls).
 466   // The space argument should be a subclass of CompactibleSpace, implementing
 467   // scan_limit(), scanned_block_is_obj(), and scanned_block_size(),
 468   // and possibly also overriding obj_size(), and adjust_obj_size().
 469   // These functions should avoid virtual calls whenever possible.
 470 
 471   // Frequently calls adjust_obj_size().
 472   template <class SpaceType>
 473   static inline void scan_and_adjust_pointers(SpaceType* space);
 474 
 475   // Frequently calls obj_size().
 476   template <class SpaceType>
 477   static inline void scan_and_compact(SpaceType* space);
 478 
 479   // Frequently calls scanned_block_is_obj() and scanned_block_size().
 480   // Requires the scan_limit() function.
 481   template <class SpaceType>
 482   static inline void scan_and_forward(SpaceType* space, CompactPoint* cp);
 483 };
 484 
 485 class GenSpaceMangler;
 486 
 487 // A space in which the free area is contiguous.  It therefore supports
 488 // faster allocation, and compaction.
 489 class ContiguousSpace: public CompactibleSpace {
 490   friend class VMStructs;
 491   // Allow scan_and_forward function to call (private) overrides for auxiliary functions on this class
 492   template <typename SpaceType>
 493   friend void CompactibleSpace::scan_and_forward(SpaceType* space, CompactPoint* cp);
 494 
 495  private:
 496   // Auxiliary functions for scan_and_forward support.
 497   // See comments for CompactibleSpace for more information.
 498   inline HeapWord* scan_limit() const {
 499     return top();
 500   }
 501 
 502   inline bool scanned_block_is_obj(const HeapWord* addr) const {
 503     return true; // Always true, since scan_limit is top
 504   }
 505 
 506   inline size_t scanned_block_size(const HeapWord* addr) const;
 507 
 508  protected:
 509   HeapWord* _top;
 510   HeapWord* _concurrent_iteration_safe_limit;
 511   // A helper for mangling the unused area of the space in debug builds.
 512   GenSpaceMangler* _mangler;
 513 
 514   GenSpaceMangler* mangler() { return _mangler; }
 515 
 516   // Allocation helpers (return NULL if full).
 517   inline HeapWord* allocate_impl(size_t word_size);
 518   inline HeapWord* par_allocate_impl(size_t word_size);
 519 
 520  public:
 521   ContiguousSpace();
 522   ~ContiguousSpace();
 523 
 524   virtual void initialize(MemRegion mr, bool clear_space, bool mangle_space);
 525   virtual void clear(bool mangle_space);
 526 
 527   // Accessors
 528   HeapWord* top() const            { return _top;    }
 529   void set_top(HeapWord* value)    { _top = value; }
 530 
 531   void set_saved_mark()            { _saved_mark_word = top();    }
 532   void reset_saved_mark()          { _saved_mark_word = bottom(); }
 533 
 534   bool saved_mark_at_top() const { return saved_mark_word() == top(); }
 535 
 536   // In debug mode mangle (write it with a particular bit
 537   // pattern) the unused part of a space.
 538 
 539   // Used to save the an address in a space for later use during mangling.
 540   void set_top_for_allocations(HeapWord* v) PRODUCT_RETURN;
 541   // Used to save the space's current top for later use during mangling.
 542   void set_top_for_allocations() PRODUCT_RETURN;
 543 
 544   // Mangle regions in the space from the current top up to the
 545   // previously mangled part of the space.
 546   void mangle_unused_area() PRODUCT_RETURN;
 547   // Mangle [top, end)
 548   void mangle_unused_area_complete() PRODUCT_RETURN;
 549 
 550   // Do some sparse checking on the area that should have been mangled.
 551   void check_mangled_unused_area(HeapWord* limit) PRODUCT_RETURN;
 552   // Check the complete area that should have been mangled.
 553   // This code may be NULL depending on the macro DEBUG_MANGLING.
 554   void check_mangled_unused_area_complete() PRODUCT_RETURN;
 555 
 556   // Size computations: sizes in bytes.
 557   size_t capacity() const        { return byte_size(bottom(), end()); }
 558   size_t used() const            { return byte_size(bottom(), top()); }
 559   size_t free() const            { return byte_size(top(),    end()); }
 560 
 561   virtual bool is_free_block(const HeapWord* p) const;
 562 
 563   // In a contiguous space we have a more obvious bound on what parts
 564   // contain objects.
 565   MemRegion used_region() const { return MemRegion(bottom(), top()); }
 566 
 567   // Allocation (return NULL if full)
 568   virtual HeapWord* allocate(size_t word_size);
 569   virtual HeapWord* par_allocate(size_t word_size);
 570   HeapWord* allocate_aligned(size_t word_size);
 571 
 572   // Iteration
 573   void oop_iterate(ExtendedOopClosure* cl);
 574   void object_iterate(ObjectClosure* blk);
 575   // For contiguous spaces this method will iterate safely over objects
 576   // in the space (i.e., between bottom and top) when at a safepoint.
 577   void safe_object_iterate(ObjectClosure* blk);
 578 
 579   // Iterate over as many initialized objects in the space as possible,
 580   // calling "cl.do_object_careful" on each. Return NULL if all objects
 581   // in the space (at the start of the iteration) were iterated over.
 582   // Return an address indicating the extent of the iteration in the
 583   // event that the iteration had to return because of finding an
 584   // uninitialized object in the space, or if the closure "cl"
 585   // signaled early termination.
 586   HeapWord* object_iterate_careful(ObjectClosureCareful* cl);
 587   HeapWord* concurrent_iteration_safe_limit() {
 588     assert(_concurrent_iteration_safe_limit <= top(),
 589            "_concurrent_iteration_safe_limit update missed");
 590     return _concurrent_iteration_safe_limit;
 591   }
 592   // changes the safe limit, all objects from bottom() to the new
 593   // limit should be properly initialized
 594   void set_concurrent_iteration_safe_limit(HeapWord* new_limit) {
 595     assert(new_limit <= top(), "uninitialized objects in the safe range");
 596     _concurrent_iteration_safe_limit = new_limit;
 597   }
 598 
 599 
 600 #if INCLUDE_ALL_GCS
 601   // In support of parallel oop_iterate.
 602   #define ContigSpace_PAR_OOP_ITERATE_DECL(OopClosureType, nv_suffix)  \
 603     void par_oop_iterate(MemRegion mr, OopClosureType* blk);
 604 
 605     ALL_PAR_OOP_ITERATE_CLOSURES(ContigSpace_PAR_OOP_ITERATE_DECL)
 606   #undef ContigSpace_PAR_OOP_ITERATE_DECL
 607 #endif // INCLUDE_ALL_GCS
 608 
 609   // Compaction support
 610   virtual void reset_after_compaction() {
 611     assert(compaction_top() >= bottom() && compaction_top() <= end(), "should point inside space");
 612     set_top(compaction_top());
 613     // set new iteration safe limit
 614     set_concurrent_iteration_safe_limit(compaction_top());
 615   }
 616 
 617   // Override.
 618   DirtyCardToOopClosure* new_dcto_cl(ExtendedOopClosure* cl,
 619                                      CardTableModRefBS::PrecisionStyle precision,
 620                                      HeapWord* boundary,
 621                                      bool parallel);
 622 
 623   // Apply "blk->do_oop" to the addresses of all reference fields in objects
 624   // starting with the _saved_mark_word, which was noted during a generation's
 625   // save_marks and is required to denote the head of an object.
 626   // Fields in objects allocated by applications of the closure
 627   // *are* included in the iteration.
 628   // Updates _saved_mark_word to point to just after the last object
 629   // iterated over.
 630 #define ContigSpace_OOP_SINCE_SAVE_MARKS_DECL(OopClosureType, nv_suffix)  \
 631   void oop_since_save_marks_iterate##nv_suffix(OopClosureType* blk);
 632 
 633   ALL_SINCE_SAVE_MARKS_CLOSURES(ContigSpace_OOP_SINCE_SAVE_MARKS_DECL)
 634 #undef ContigSpace_OOP_SINCE_SAVE_MARKS_DECL
 635 
 636   // Same as object_iterate, but starting from "mark", which is required
 637   // to denote the start of an object.  Objects allocated by
 638   // applications of the closure *are* included in the iteration.
 639   virtual void object_iterate_from(HeapWord* mark, ObjectClosure* blk);
 640 
 641   // Very inefficient implementation.
 642   virtual HeapWord* block_start_const(const void* p) const;
 643   size_t block_size(const HeapWord* p) const;
 644   // If a block is in the allocated area, it is an object.
 645   bool block_is_obj(const HeapWord* p) const { return p < top(); }
 646 
 647   // Addresses for inlined allocation
 648   HeapWord** top_addr() { return &_top; }
 649   HeapWord** end_addr() { return &_end; }
 650 
 651   // Overrides for more efficient compaction support.
 652   void prepare_for_compaction(CompactPoint* cp);
 653 
 654   virtual void print_on(outputStream* st) const;
 655 
 656   // Checked dynamic downcasts.
 657   virtual ContiguousSpace* toContiguousSpace() {
 658     return this;
 659   }
 660 
 661   // Debugging
 662   virtual void verify() const;
 663 
 664   // Used to increase collection frequency.  "factor" of 0 means entire
 665   // space.
 666   void allocate_temporary_filler(int factor);
 667 };
 668 
 669 
 670 // A dirty card to oop closure that does filtering.
 671 // It knows how to filter out objects that are outside of the _boundary.
 672 class FilteringDCTOC : public DirtyCardToOopClosure {
 673 protected:
 674   // Override.
 675   void walk_mem_region(MemRegion mr,
 676                        HeapWord* bottom, HeapWord* top);
 677 
 678   // Walk the given memory region, from bottom to top, applying
 679   // the given oop closure to (possibly) all objects found. The
 680   // given oop closure may or may not be the same as the oop
 681   // closure with which this closure was created, as it may
 682   // be a filtering closure which makes use of the _boundary.
 683   // We offer two signatures, so the FilteringClosure static type is
 684   // apparent.
 685   virtual void walk_mem_region_with_cl(MemRegion mr,
 686                                        HeapWord* bottom, HeapWord* top,
 687                                        ExtendedOopClosure* cl) = 0;
 688   virtual void walk_mem_region_with_cl(MemRegion mr,
 689                                        HeapWord* bottom, HeapWord* top,
 690                                        FilteringClosure* cl) = 0;
 691 
 692 public:
 693   FilteringDCTOC(Space* sp, ExtendedOopClosure* cl,
 694                   CardTableModRefBS::PrecisionStyle precision,
 695                   HeapWord* boundary) :
 696     DirtyCardToOopClosure(sp, cl, precision, boundary) {}
 697 };
 698 
 699 // A dirty card to oop closure for contiguous spaces
 700 // (ContiguousSpace and sub-classes).
 701 // It is a FilteringClosure, as defined above, and it knows:
 702 //
 703 // 1. That the actual top of any area in a memory region
 704 //    contained by the space is bounded by the end of the contiguous
 705 //    region of the space.
 706 // 2. That the space is really made up of objects and not just
 707 //    blocks.
 708 
 709 class ContiguousSpaceDCTOC : public FilteringDCTOC {
 710 protected:
 711   // Overrides.
 712   HeapWord* get_actual_top(HeapWord* top, HeapWord* top_obj);
 713 
 714   virtual void walk_mem_region_with_cl(MemRegion mr,
 715                                        HeapWord* bottom, HeapWord* top,
 716                                        ExtendedOopClosure* cl);
 717   virtual void walk_mem_region_with_cl(MemRegion mr,
 718                                        HeapWord* bottom, HeapWord* top,
 719                                        FilteringClosure* cl);
 720 
 721 public:
 722   ContiguousSpaceDCTOC(ContiguousSpace* sp, ExtendedOopClosure* cl,
 723                        CardTableModRefBS::PrecisionStyle precision,
 724                        HeapWord* boundary) :
 725     FilteringDCTOC(sp, cl, precision, boundary)
 726   {}
 727 };
 728 
 729 // A ContigSpace that Supports an efficient "block_start" operation via
 730 // a BlockOffsetArray (whose BlockOffsetSharedArray may be shared with
 731 // other spaces.)  This is the abstract base class for old generation
 732 // (tenured) spaces.
 733 
 734 class OffsetTableContigSpace: public ContiguousSpace {
 735   friend class VMStructs;
 736  protected:
 737   BlockOffsetArrayContigSpace _offsets;
 738   Mutex _par_alloc_lock;
 739 
 740  public:
 741   // Constructor
 742   OffsetTableContigSpace(BlockOffsetSharedArray* sharedOffsetArray,
 743                          MemRegion mr);
 744 
 745   void set_bottom(HeapWord* value);
 746   void set_end(HeapWord* value);
 747 
 748   void clear(bool mangle_space);
 749 
 750   inline HeapWord* block_start_const(const void* p) const;
 751 
 752   // Add offset table update.
 753   virtual inline HeapWord* allocate(size_t word_size);
 754   inline HeapWord* par_allocate(size_t word_size);
 755 
 756   // MarkSweep support phase3
 757   virtual HeapWord* initialize_threshold();
 758   virtual HeapWord* cross_threshold(HeapWord* start, HeapWord* end);
 759 
 760   virtual void print_on(outputStream* st) const;
 761 
 762   // Debugging
 763   void verify() const;
 764 };
 765 
 766 
 767 // Class TenuredSpace is used by TenuredGeneration
 768 
 769 class TenuredSpace: public OffsetTableContigSpace {
 770   friend class VMStructs;
 771  protected:
 772   // Mark sweep support
 773   size_t allowed_dead_ratio() const;
 774  public:
 775   // Constructor
 776   TenuredSpace(BlockOffsetSharedArray* sharedOffsetArray,
 777                MemRegion mr) :
 778     OffsetTableContigSpace(sharedOffsetArray, mr) {}
 779 };
 780 #endif // SHARE_VM_GC_SHARED_SPACE_HPP