1 /*
   2  * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #ifndef SHARE_VM_MEMORY_SPACE_HPP
  26 #define SHARE_VM_MEMORY_SPACE_HPP
  27 
  28 #include "memory/allocation.hpp"
  29 #include "memory/blockOffsetTable.hpp"
  30 #include "memory/cardTableModRefBS.hpp"
  31 #include "memory/iterator.hpp"
  32 #include "memory/memRegion.hpp"
  33 #include "memory/watermark.hpp"
  34 #include "oops/markOop.hpp"
  35 #include "runtime/mutexLocker.hpp"
  36 #include "runtime/prefetch.hpp"
  37 #include "utilities/workgroup.hpp"
  38 #ifdef TARGET_OS_FAMILY_linux
  39 # include "os_linux.inline.hpp"
  40 #endif
  41 #ifdef TARGET_OS_FAMILY_solaris
  42 # include "os_solaris.inline.hpp"
  43 #endif
  44 #ifdef TARGET_OS_FAMILY_windows
  45 # include "os_windows.inline.hpp"
  46 #endif
  47 
  48 // A space is an abstraction for the "storage units" backing
  49 // up the generation abstraction. It includes specific
  50 // implementations for keeping track of free and used space,
  51 // for iterating over objects and free blocks, etc.
  52 
  53 // Here's the Space hierarchy:
  54 //
  55 // - Space               -- an asbtract base class describing a heap area
  56 //   - CompactibleSpace  -- a space supporting compaction
  57 //     - CompactibleFreeListSpace -- (used for CMS generation)
  58 //     - ContiguousSpace -- a compactible space in which all free space
  59 //                          is contiguous
  60 //       - EdenSpace     -- contiguous space used as nursery
  61 //         - ConcEdenSpace -- contiguous space with a 'soft end safe' allocation
  62 //       - OffsetTableContigSpace -- contiguous space with a block offset array
  63 //                          that allows "fast" block_start calls
  64 //         - TenuredSpace -- (used for TenuredGeneration)
  65 //         - ContigPermSpace -- an offset table contiguous space for perm gen
  66 
  67 // Forward decls.
  68 class Space;
  69 class BlockOffsetArray;
  70 class BlockOffsetArrayContigSpace;
  71 class Generation;
  72 class CompactibleSpace;
  73 class BlockOffsetTable;
  74 class GenRemSet;
  75 class CardTableRS;
  76 class DirtyCardToOopClosure;
  77 
  78 // An oop closure that is circumscribed by a filtering memory region.
  79 class SpaceMemRegionOopsIterClosure: public OopClosure {
  80  private:
  81   OopClosure* _cl;
  82   MemRegion   _mr;
  83  protected:
  84   template <class T> void do_oop_work(T* p) {
  85     if (_mr.contains(p)) {
  86       _cl->do_oop(p);
  87     }
  88   }
  89  public:
  90   SpaceMemRegionOopsIterClosure(OopClosure* cl, MemRegion mr):
  91     _cl(cl), _mr(mr) {}
  92   virtual void do_oop(oop* p);
  93   virtual void do_oop(narrowOop* p);
  94 };
  95 
  96 // A Space describes a heap area. Class Space is an abstract
  97 // base class.
  98 //
  99 // Space supports allocation, size computation and GC support is provided.
 100 //
 101 // Invariant: bottom() and end() are on page_size boundaries and
 102 // bottom() <= top() <= end()
 103 // top() is inclusive and end() is exclusive.
 104 
 105 class Space: public CHeapObj {
 106   friend class VMStructs;
 107  protected:
 108   HeapWord* _bottom;
 109   HeapWord* _end;
 110 
 111   // Used in support of save_marks()
 112   HeapWord* _saved_mark_word;
 113 
 114   MemRegionClosure* _preconsumptionDirtyCardClosure;
 115 
 116   // A sequential tasks done structure. This supports
 117   // parallel GC, where we have threads dynamically
 118   // claiming sub-tasks from a larger parallel task.
 119   SequentialSubTasksDone _par_seq_tasks;
 120 
 121   Space():
 122     _bottom(NULL), _end(NULL), _preconsumptionDirtyCardClosure(NULL) { }
 123 
 124  public:
 125   // Accessors
 126   HeapWord* bottom() const         { return _bottom; }
 127   HeapWord* end() const            { return _end;    }
 128   virtual void set_bottom(HeapWord* value) { _bottom = value; }
 129   virtual void set_end(HeapWord* value)    { _end = value; }
 130 
 131   virtual HeapWord* saved_mark_word() const  { return _saved_mark_word; }
 132 
 133   void set_saved_mark_word(HeapWord* p) { _saved_mark_word = p; }
 134 
 135   MemRegionClosure* preconsumptionDirtyCardClosure() const {
 136     return _preconsumptionDirtyCardClosure;
 137   }
 138   void setPreconsumptionDirtyCardClosure(MemRegionClosure* cl) {
 139     _preconsumptionDirtyCardClosure = cl;
 140   }
 141 
 142   // Returns a subregion of the space containing all the objects in
 143   // the space.
 144   virtual MemRegion used_region() const { return MemRegion(bottom(), end()); }
 145 
 146   // Returns a region that is guaranteed to contain (at least) all objects
 147   // allocated at the time of the last call to "save_marks".  If the space
 148   // initializes its DirtyCardToOopClosure's specifying the "contig" option
 149   // (that is, if the space is contiguous), then this region must contain only
 150   // such objects: the memregion will be from the bottom of the region to the
 151   // saved mark.  Otherwise, the "obj_allocated_since_save_marks" method of
 152   // the space must distiguish between objects in the region allocated before
 153   // and after the call to save marks.
 154   virtual MemRegion used_region_at_save_marks() const {
 155     return MemRegion(bottom(), saved_mark_word());
 156   }
 157 
 158   // Initialization.
 159   // "initialize" should be called once on a space, before it is used for
 160   // any purpose.  The "mr" arguments gives the bounds of the space, and
 161   // the "clear_space" argument should be true unless the memory in "mr" is
 162   // known to be zeroed.
 163   virtual void initialize(MemRegion mr, bool clear_space, bool mangle_space);
 164 
 165   // The "clear" method must be called on a region that may have
 166   // had allocation performed in it, but is now to be considered empty.
 167   virtual void clear(bool mangle_space);
 168 
 169   // For detecting GC bugs.  Should only be called at GC boundaries, since
 170   // some unused space may be used as scratch space during GC's.
 171   // Default implementation does nothing. We also call this when expanding
 172   // a space to satisfy an allocation request. See bug #4668531
 173   virtual void mangle_unused_area() {}
 174   virtual void mangle_unused_area_complete() {}
 175   virtual void mangle_region(MemRegion mr) {}
 176 
 177   // Testers
 178   bool is_empty() const              { return used() == 0; }
 179   bool not_empty() const             { return used() > 0; }
 180 
 181   // Returns true iff the given the space contains the
 182   // given address as part of an allocated object. For
 183   // ceratin kinds of spaces, this might be a potentially
 184   // expensive operation. To prevent performance problems
 185   // on account of its inadvertent use in product jvm's,
 186   // we restrict its use to assertion checks only.
 187   virtual bool is_in(const void* p) const;
 188 
 189   // Returns true iff the given reserved memory of the space contains the
 190   // given address.
 191   bool is_in_reserved(const void* p) const { return _bottom <= p && p < _end; }
 192 
 193   // Returns true iff the given block is not allocated.
 194   virtual bool is_free_block(const HeapWord* p) const = 0;
 195 
 196   // Test whether p is double-aligned
 197   static bool is_aligned(void* p) {
 198     return ((intptr_t)p & (sizeof(double)-1)) == 0;
 199   }
 200 
 201   // Size computations.  Sizes are in bytes.
 202   size_t capacity()     const { return byte_size(bottom(), end()); }
 203   virtual size_t used() const = 0;
 204   virtual size_t free() const = 0;
 205 
 206   // Iterate over all the ref-containing fields of all objects in the
 207   // space, calling "cl.do_oop" on each.  Fields in objects allocated by
 208   // applications of the closure are not included in the iteration.
 209   virtual void oop_iterate(OopClosure* cl);
 210 
 211   // Same as above, restricted to the intersection of a memory region and
 212   // the space.  Fields in objects allocated by applications of the closure
 213   // are not included in the iteration.
 214   virtual void oop_iterate(MemRegion mr, OopClosure* cl) = 0;
 215 
 216   // Iterate over all objects in the space, calling "cl.do_object" on
 217   // each.  Objects allocated by applications of the closure are not
 218   // included in the iteration.
 219   virtual void object_iterate(ObjectClosure* blk) = 0;
 220   // Similar to object_iterate() except only iterates over
 221   // objects whose internal references point to objects in the space.
 222   virtual void safe_object_iterate(ObjectClosure* blk) = 0;
 223 
 224   // Iterate over all objects that intersect with mr, calling "cl->do_object"
 225   // on each.  There is an exception to this: if this closure has already
 226   // been invoked on an object, it may skip such objects in some cases.  This is
 227   // Most likely to happen in an "upwards" (ascending address) iteration of
 228   // MemRegions.
 229   virtual void object_iterate_mem(MemRegion mr, UpwardsObjectClosure* cl);
 230 
 231   // Iterate over as many initialized objects in the space as possible,
 232   // calling "cl.do_object_careful" on each. Return NULL if all objects
 233   // in the space (at the start of the iteration) were iterated over.
 234   // Return an address indicating the extent of the iteration in the
 235   // event that the iteration had to return because of finding an
 236   // uninitialized object in the space, or if the closure "cl"
 237   // signalled early termination.
 238   virtual HeapWord* object_iterate_careful(ObjectClosureCareful* cl);
 239   virtual HeapWord* object_iterate_careful_m(MemRegion mr,
 240                                              ObjectClosureCareful* cl);
 241 
 242   // Create and return a new dirty card to oop closure. Can be
 243   // overriden to return the appropriate type of closure
 244   // depending on the type of space in which the closure will
 245   // operate. ResourceArea allocated.
 246   virtual DirtyCardToOopClosure* new_dcto_cl(OopClosure* cl,
 247                                              CardTableModRefBS::PrecisionStyle precision,
 248                                              HeapWord* boundary = NULL);
 249 
 250   // If "p" is in the space, returns the address of the start of the
 251   // "block" that contains "p".  We say "block" instead of "object" since
 252   // some heaps may not pack objects densely; a chunk may either be an
 253   // object or a non-object.  If "p" is not in the space, return NULL.
 254   virtual HeapWord* block_start_const(const void* p) const = 0;
 255 
 256   // The non-const version may have benevolent side effects on the data
 257   // structure supporting these calls, possibly speeding up future calls.
 258   // The default implementation, however, is simply to call the const
 259   // version.
 260   inline virtual HeapWord* block_start(const void* p);
 261 
 262   // Requires "addr" to be the start of a chunk, and returns its size.
 263   // "addr + size" is required to be the start of a new chunk, or the end
 264   // of the active area of the heap.
 265   virtual size_t block_size(const HeapWord* addr) const = 0;
 266 
 267   // Requires "addr" to be the start of a block, and returns "TRUE" iff
 268   // the block is an object.
 269   virtual bool block_is_obj(const HeapWord* addr) const = 0;
 270 
 271   // Requires "addr" to be the start of a block, and returns "TRUE" iff
 272   // the block is an object and the object is alive.
 273   virtual bool obj_is_alive(const HeapWord* addr) const;
 274 
 275   // Allocation (return NULL if full).  Assumes the caller has established
 276   // mutually exclusive access to the space.
 277   virtual HeapWord* allocate(size_t word_size) = 0;
 278 
 279   // Allocation (return NULL if full).  Enforces mutual exclusion internally.
 280   virtual HeapWord* par_allocate(size_t word_size) = 0;
 281 
 282   // Returns true if this object has been allocated since a
 283   // generation's "save_marks" call.
 284   virtual bool obj_allocated_since_save_marks(const oop obj) const = 0;
 285 
 286   // Mark-sweep-compact support: all spaces can update pointers to objects
 287   // moving as a part of compaction.
 288   virtual void adjust_pointers();
 289 
 290   // PrintHeapAtGC support
 291   virtual void print() const;
 292   virtual void print_on(outputStream* st) const;
 293   virtual void print_short() const;
 294   virtual void print_short_on(outputStream* st) const;
 295 
 296 
 297   // Accessor for parallel sequential tasks.
 298   SequentialSubTasksDone* par_seq_tasks() { return &_par_seq_tasks; }
 299 
 300   // IF "this" is a ContiguousSpace, return it, else return NULL.
 301   virtual ContiguousSpace* toContiguousSpace() {
 302     return NULL;
 303   }
 304 
 305   // Debugging
 306   virtual void verify(bool allow_dirty) const = 0;
 307 };
 308 
 309 // A MemRegionClosure (ResourceObj) whose "do_MemRegion" function applies an
 310 // OopClosure to (the addresses of) all the ref-containing fields that could
 311 // be modified by virtue of the given MemRegion being dirty. (Note that
 312 // because of the imprecise nature of the write barrier, this may iterate
 313 // over oops beyond the region.)
 314 // This base type for dirty card to oop closures handles memory regions
 315 // in non-contiguous spaces with no boundaries, and should be sub-classed
 316 // to support other space types. See ContiguousDCTOC for a sub-class
 317 // that works with ContiguousSpaces.
 318 
 319 class DirtyCardToOopClosure: public MemRegionClosureRO {
 320 protected:
 321   OopClosure* _cl;
 322   Space* _sp;
 323   CardTableModRefBS::PrecisionStyle _precision;
 324   HeapWord* _boundary;          // If non-NULL, process only non-NULL oops
 325                                 // pointing below boundary.
 326   HeapWord* _min_done;          // ObjHeadPreciseArray precision requires
 327                                 // a downwards traversal; this is the
 328                                 // lowest location already done (or,
 329                                 // alternatively, the lowest address that
 330                                 // shouldn't be done again.  NULL means infinity.)
 331   NOT_PRODUCT(HeapWord* _last_bottom;)
 332   NOT_PRODUCT(HeapWord* _last_explicit_min_done;)
 333 
 334   // Get the actual top of the area on which the closure will
 335   // operate, given where the top is assumed to be (the end of the
 336   // memory region passed to do_MemRegion) and where the object
 337   // at the top is assumed to start. For example, an object may
 338   // start at the top but actually extend past the assumed top,
 339   // in which case the top becomes the end of the object.
 340   virtual HeapWord* get_actual_top(HeapWord* top, HeapWord* top_obj);
 341 
 342   // Walk the given memory region from bottom to (actual) top
 343   // looking for objects and applying the oop closure (_cl) to
 344   // them. The base implementation of this treats the area as
 345   // blocks, where a block may or may not be an object. Sub-
 346   // classes should override this to provide more accurate
 347   // or possibly more efficient walking.
 348   virtual void walk_mem_region(MemRegion mr, HeapWord* bottom, HeapWord* top);
 349 
 350 public:
 351   DirtyCardToOopClosure(Space* sp, OopClosure* cl,
 352                         CardTableModRefBS::PrecisionStyle precision,
 353                         HeapWord* boundary) :
 354     _sp(sp), _cl(cl), _precision(precision), _boundary(boundary),
 355     _min_done(NULL) {
 356     NOT_PRODUCT(_last_bottom = NULL);
 357     NOT_PRODUCT(_last_explicit_min_done = NULL);
 358   }
 359 
 360   void do_MemRegion(MemRegion mr);
 361 
 362   void set_min_done(HeapWord* min_done) {
 363     _min_done = min_done;
 364     NOT_PRODUCT(_last_explicit_min_done = _min_done);
 365   }
 366 #ifndef PRODUCT
 367   void set_last_bottom(HeapWord* last_bottom) {
 368     _last_bottom = last_bottom;
 369   }
 370 #endif
 371 };
 372 
 373 // A structure to represent a point at which objects are being copied
 374 // during compaction.
 375 class CompactPoint : public StackObj {
 376 public:
 377   Generation* gen;
 378   CompactibleSpace* space;
 379   HeapWord* threshold;
 380   CompactPoint(Generation* _gen, CompactibleSpace* _space,
 381                HeapWord* _threshold) :
 382     gen(_gen), space(_space), threshold(_threshold) {}
 383 };
 384 
 385 
 386 // A space that supports compaction operations.  This is usually, but not
 387 // necessarily, a space that is normally contiguous.  But, for example, a
 388 // free-list-based space whose normal collection is a mark-sweep without
 389 // compaction could still support compaction in full GC's.
 390 
 391 class CompactibleSpace: public Space {
 392   friend class VMStructs;
 393   friend class CompactibleFreeListSpace;
 394   friend class CompactingPermGenGen;
 395   friend class CMSPermGenGen;
 396 private:
 397   HeapWord* _compaction_top;
 398   CompactibleSpace* _next_compaction_space;
 399 
 400 public:
 401   CompactibleSpace() :
 402    _compaction_top(NULL), _next_compaction_space(NULL) {}
 403 
 404   virtual void initialize(MemRegion mr, bool clear_space, bool mangle_space);
 405   virtual void clear(bool mangle_space);
 406 
 407   // Used temporarily during a compaction phase to hold the value
 408   // top should have when compaction is complete.
 409   HeapWord* compaction_top() const { return _compaction_top;    }
 410 
 411   void set_compaction_top(HeapWord* value) {
 412     assert(value == NULL || (value >= bottom() && value <= end()),
 413       "should point inside space");
 414     _compaction_top = value;
 415   }
 416 
 417   // Perform operations on the space needed after a compaction
 418   // has been performed.
 419   virtual void reset_after_compaction() {}
 420 
 421   // Returns the next space (in the current generation) to be compacted in
 422   // the global compaction order.  Also is used to select the next
 423   // space into which to compact.
 424 
 425   virtual CompactibleSpace* next_compaction_space() const {
 426     return _next_compaction_space;
 427   }
 428 
 429   void set_next_compaction_space(CompactibleSpace* csp) {
 430     _next_compaction_space = csp;
 431   }
 432 
 433   // MarkSweep support phase2
 434 
 435   // Start the process of compaction of the current space: compute
 436   // post-compaction addresses, and insert forwarding pointers.  The fields
 437   // "cp->gen" and "cp->compaction_space" are the generation and space into
 438   // which we are currently compacting.  This call updates "cp" as necessary,
 439   // and leaves the "compaction_top" of the final value of
 440   // "cp->compaction_space" up-to-date.  Offset tables may be updated in
 441   // this phase as if the final copy had occurred; if so, "cp->threshold"
 442   // indicates when the next such action should be taken.
 443   virtual void prepare_for_compaction(CompactPoint* cp);
 444   // MarkSweep support phase3
 445   virtual void adjust_pointers();
 446   // MarkSweep support phase4
 447   virtual void compact();
 448 
 449   // The maximum percentage of objects that can be dead in the compacted
 450   // live part of a compacted space ("deadwood" support.)
 451   virtual size_t allowed_dead_ratio() const { return 0; };
 452 
 453   // Some contiguous spaces may maintain some data structures that should
 454   // be updated whenever an allocation crosses a boundary.  This function
 455   // returns the first such boundary.
 456   // (The default implementation returns the end of the space, so the
 457   // boundary is never crossed.)
 458   virtual HeapWord* initialize_threshold() { return end(); }
 459 
 460   // "q" is an object of the given "size" that should be forwarded;
 461   // "cp" names the generation ("gen") and containing "this" (which must
 462   // also equal "cp->space").  "compact_top" is where in "this" the
 463   // next object should be forwarded to.  If there is room in "this" for
 464   // the object, insert an appropriate forwarding pointer in "q".
 465   // If not, go to the next compaction space (there must
 466   // be one, since compaction must succeed -- we go to the first space of
 467   // the previous generation if necessary, updating "cp"), reset compact_top
 468   // and then forward.  In either case, returns the new value of "compact_top".
 469   // If the forwarding crosses "cp->threshold", invokes the "cross_threhold"
 470   // function of the then-current compaction space, and updates "cp->threshold
 471   // accordingly".
 472   virtual HeapWord* forward(oop q, size_t size, CompactPoint* cp,
 473                     HeapWord* compact_top);
 474 
 475   // Return a size with adjusments as required of the space.
 476   virtual size_t adjust_object_size_v(size_t size) const { return size; }
 477 
 478 protected:
 479   // Used during compaction.
 480   HeapWord* _first_dead;
 481   HeapWord* _end_of_live;
 482 
 483   // Minimum size of a free block.
 484   virtual size_t minimum_free_block_size() const = 0;
 485 
 486   // This the function is invoked when an allocation of an object covering
 487   // "start" to "end occurs crosses the threshold; returns the next
 488   // threshold.  (The default implementation does nothing.)
 489   virtual HeapWord* cross_threshold(HeapWord* start, HeapWord* the_end) {
 490     return end();
 491   }
 492 
 493   // Requires "allowed_deadspace_words > 0", that "q" is the start of a
 494   // free block of the given "word_len", and that "q", were it an object,
 495   // would not move if forwared.  If the size allows, fill the free
 496   // block with an object, to prevent excessive compaction.  Returns "true"
 497   // iff the free region was made deadspace, and modifies
 498   // "allowed_deadspace_words" to reflect the number of available deadspace
 499   // words remaining after this operation.
 500   bool insert_deadspace(size_t& allowed_deadspace_words, HeapWord* q,
 501                         size_t word_len);
 502 };
 503 
 504 #define SCAN_AND_FORWARD(cp,scan_limit,block_is_obj,block_size) {            \
 505   /* Compute the new addresses for the live objects and store it in the mark \
 506    * Used by universe::mark_sweep_phase2()                                   \
 507    */                                                                        \
 508   HeapWord* compact_top; /* This is where we are currently compacting to. */ \
 509                                                                              \
 510   /* We're sure to be here before any objects are compacted into this        \
 511    * space, so this is a good time to initialize this:                       \
 512    */                                                                        \
 513   set_compaction_top(bottom());                                              \
 514                                                                              \
 515   if (cp->space == NULL) {                                                   \
 516     assert(cp->gen != NULL, "need a generation");                            \
 517     assert(cp->threshold == NULL, "just checking");                          \
 518     assert(cp->gen->first_compaction_space() == this, "just checking");      \
 519     cp->space = cp->gen->first_compaction_space();                           \
 520     compact_top = cp->space->bottom();                                       \
 521     cp->space->set_compaction_top(compact_top);                              \
 522     cp->threshold = cp->space->initialize_threshold();                       \
 523   } else {                                                                   \
 524     compact_top = cp->space->compaction_top();                               \
 525   }                                                                          \
 526                                                                              \
 527   /* We allow some amount of garbage towards the bottom of the space, so     \
 528    * we don't start compacting before there is a significant gain to be made.\
 529    * Occasionally, we want to ensure a full compaction, which is determined  \
 530    * by the MarkSweepAlwaysCompactCount parameter.                           \
 531    */                                                                        \
 532   int invocations = SharedHeap::heap()->perm_gen()->stat_record()->invocations;\
 533   bool skip_dead = ((invocations % MarkSweepAlwaysCompactCount) != 0);       \
 534                                                                              \
 535   size_t allowed_deadspace = 0;                                              \
 536   if (skip_dead) {                                                           \
 537     const size_t ratio = allowed_dead_ratio();                               \
 538     allowed_deadspace = (capacity() * ratio / 100) / HeapWordSize;           \
 539   }                                                                          \
 540                                                                              \
 541   HeapWord* q = bottom();                                                    \
 542   HeapWord* t = scan_limit();                                                \
 543                                                                              \
 544   HeapWord*  end_of_live= q;    /* One byte beyond the last byte of the last \
 545                                    live object. */                           \
 546   HeapWord*  first_dead = end();/* The first dead object. */                 \
 547   LiveRange* liveRange  = NULL; /* The current live range, recorded in the   \
 548                                    first header of preceding free area. */   \
 549   _first_dead = first_dead;                                                  \
 550                                                                              \
 551   const intx interval = PrefetchScanIntervalInBytes;                         \
 552                                                                              \
 553   while (q < t) {                                                            \
 554     assert(!block_is_obj(q) ||                                               \
 555            oop(q)->mark()->is_marked() || oop(q)->mark()->is_unlocked() ||   \
 556            oop(q)->mark()->has_bias_pattern(),                               \
 557            "these are the only valid states during a mark sweep");           \
 558     if (block_is_obj(q) && oop(q)->is_gc_marked()) {                         \
 559       /* prefetch beyond q */                                                \
 560       Prefetch::write(q, interval);                                          \
 561       /* size_t size = oop(q)->size();  changing this for cms for perm gen */\
 562       size_t size = block_size(q);                                           \
 563       compact_top = cp->space->forward(oop(q), size, cp, compact_top);       \
 564       q += size;                                                             \
 565       end_of_live = q;                                                       \
 566     } else {                                                                 \
 567       /* run over all the contiguous dead objects */                         \
 568       HeapWord* end = q;                                                     \
 569       do {                                                                   \
 570         /* prefetch beyond end */                                            \
 571         Prefetch::write(end, interval);                                      \
 572         end += block_size(end);                                              \
 573       } while (end < t && (!block_is_obj(end) || !oop(end)->is_gc_marked()));\
 574                                                                              \
 575       /* see if we might want to pretend this object is alive so that        \
 576        * we don't have to compact quite as often.                            \
 577        */                                                                    \
 578       if (allowed_deadspace > 0 && q == compact_top) {                       \
 579         size_t sz = pointer_delta(end, q);                                   \
 580         if (insert_deadspace(allowed_deadspace, q, sz)) {                    \
 581           compact_top = cp->space->forward(oop(q), sz, cp, compact_top);     \
 582           q = end;                                                           \
 583           end_of_live = end;                                                 \
 584           continue;                                                          \
 585         }                                                                    \
 586       }                                                                      \
 587                                                                              \
 588       /* otherwise, it really is a free region. */                           \
 589                                                                              \
 590       /* for the previous LiveRange, record the end of the live objects. */  \
 591       if (liveRange) {                                                       \
 592         liveRange->set_end(q);                                               \
 593       }                                                                      \
 594                                                                              \
 595       /* record the current LiveRange object.                                \
 596        * liveRange->start() is overlaid on the mark word.                    \
 597        */                                                                    \
 598       liveRange = (LiveRange*)q;                                             \
 599       liveRange->set_start(end);                                             \
 600       liveRange->set_end(end);                                               \
 601                                                                              \
 602       /* see if this is the first dead region. */                            \
 603       if (q < first_dead) {                                                  \
 604         first_dead = q;                                                      \
 605       }                                                                      \
 606                                                                              \
 607       /* move on to the next object */                                       \
 608       q = end;                                                               \
 609     }                                                                        \
 610   }                                                                          \
 611                                                                              \
 612   assert(q == t, "just checking");                                           \
 613   if (liveRange != NULL) {                                                   \
 614     liveRange->set_end(q);                                                   \
 615   }                                                                          \
 616   _end_of_live = end_of_live;                                                \
 617   if (end_of_live < first_dead) {                                            \
 618     first_dead = end_of_live;                                                \
 619   }                                                                          \
 620   _first_dead = first_dead;                                                  \
 621                                                                              \
 622   /* save the compaction_top of the compaction space. */                     \
 623   cp->space->set_compaction_top(compact_top);                                \
 624 }
 625 
 626 #define SCAN_AND_ADJUST_POINTERS(adjust_obj_size) {                             \
 627   /* adjust all the interior pointers to point at the new locations of objects  \
 628    * Used by MarkSweep::mark_sweep_phase3() */                                  \
 629                                                                                 \
 630   HeapWord* q = bottom();                                                       \
 631   HeapWord* t = _end_of_live;  /* Established by "prepare_for_compaction". */   \
 632                                                                                 \
 633   assert(_first_dead <= _end_of_live, "Stands to reason, no?");                 \
 634                                                                                 \
 635   if (q < t && _first_dead > q &&                                               \
 636       !oop(q)->is_gc_marked()) {                                                \
 637     /* we have a chunk of the space which hasn't moved and we've                \
 638      * reinitialized the mark word during the previous pass, so we can't        \
 639      * use is_gc_marked for the traversal. */                                   \
 640     HeapWord* end = _first_dead;                                                \
 641                                                                                 \
 642     while (q < end) {                                                           \
 643       /* I originally tried to conjoin "block_start(q) == q" to the             \
 644        * assertion below, but that doesn't work, because you can't              \
 645        * accurately traverse previous objects to get to the current one         \
 646        * after their pointers (including pointers into permGen) have been       \
 647        * updated, until the actual compaction is done.  dld, 4/00 */            \
 648       assert(block_is_obj(q),                                                   \
 649              "should be at block boundaries, and should be looking at objs");   \
 650                                                                                 \
 651       VALIDATE_MARK_SWEEP_ONLY(MarkSweep::track_interior_pointers(oop(q)));     \
 652                                                                                 \
 653       /* point all the oops to the new location */                              \
 654       size_t size = oop(q)->adjust_pointers();                                  \
 655       size = adjust_obj_size(size);                                             \
 656                                                                                 \
 657       VALIDATE_MARK_SWEEP_ONLY(MarkSweep::check_interior_pointers());           \
 658                                                                                 \
 659       VALIDATE_MARK_SWEEP_ONLY(MarkSweep::validate_live_oop(oop(q), size));     \
 660                                                                                 \
 661       q += size;                                                                \
 662     }                                                                           \
 663                                                                                 \
 664     if (_first_dead == t) {                                                     \
 665       q = t;                                                                    \
 666     } else {                                                                    \
 667       /* $$$ This is funky.  Using this to read the previously written          \
 668        * LiveRange.  See also use below. */                                     \
 669       q = (HeapWord*)oop(_first_dead)->mark()->decode_pointer();                \
 670     }                                                                           \
 671   }                                                                             \
 672                                                                                 \
 673   const intx interval = PrefetchScanIntervalInBytes;                            \
 674                                                                                 \
 675   debug_only(HeapWord* prev_q = NULL);                                          \
 676   while (q < t) {                                                               \
 677     /* prefetch beyond q */                                                     \
 678     Prefetch::write(q, interval);                                               \
 679     if (oop(q)->is_gc_marked()) {                                               \
 680       /* q is alive */                                                          \
 681       VALIDATE_MARK_SWEEP_ONLY(MarkSweep::track_interior_pointers(oop(q)));     \
 682       /* point all the oops to the new location */                              \
 683       size_t size = oop(q)->adjust_pointers();                                  \
 684       size = adjust_obj_size(size);                                             \
 685       VALIDATE_MARK_SWEEP_ONLY(MarkSweep::check_interior_pointers());           \
 686       VALIDATE_MARK_SWEEP_ONLY(MarkSweep::validate_live_oop(oop(q), size));     \
 687       debug_only(prev_q = q);                                                   \
 688       q += size;                                                                \
 689     } else {                                                                    \
 690       /* q is not a live object, so its mark should point at the next           \
 691        * live object */                                                         \
 692       debug_only(prev_q = q);                                                   \
 693       q = (HeapWord*) oop(q)->mark()->decode_pointer();                         \
 694       assert(q > prev_q, "we should be moving forward through memory");         \
 695     }                                                                           \
 696   }                                                                             \
 697                                                                                 \
 698   assert(q == t, "just checking");                                              \
 699 }
 700 
 701 #define SCAN_AND_COMPACT(obj_size) {                                            \
 702   /* Copy all live objects to their new location                                \
 703    * Used by MarkSweep::mark_sweep_phase4() */                                  \
 704                                                                                 \
 705   HeapWord*       q = bottom();                                                 \
 706   HeapWord* const t = _end_of_live;                                             \
 707   debug_only(HeapWord* prev_q = NULL);                                          \
 708                                                                                 \
 709   if (q < t && _first_dead > q &&                                               \
 710       !oop(q)->is_gc_marked()) {                                                \
 711     debug_only(                                                                 \
 712     /* we have a chunk of the space which hasn't moved and we've reinitialized  \
 713      * the mark word during the previous pass, so we can't use is_gc_marked for \
 714      * the traversal. */                                                        \
 715     HeapWord* const end = _first_dead;                                          \
 716                                                                                 \
 717     while (q < end) {                                                           \
 718       size_t size = obj_size(q);                                                \
 719       assert(!oop(q)->is_gc_marked(),                                           \
 720              "should be unmarked (special dense prefix handling)");             \
 721       VALIDATE_MARK_SWEEP_ONLY(MarkSweep::live_oop_moved_to(q, size, q));       \
 722       debug_only(prev_q = q);                                                   \
 723       q += size;                                                                \
 724     }                                                                           \
 725     )  /* debug_only */                                                         \
 726                                                                                 \
 727     if (_first_dead == t) {                                                     \
 728       q = t;                                                                    \
 729     } else {                                                                    \
 730       /* $$$ Funky */                                                           \
 731       q = (HeapWord*) oop(_first_dead)->mark()->decode_pointer();               \
 732     }                                                                           \
 733   }                                                                             \
 734                                                                                 \
 735   const intx scan_interval = PrefetchScanIntervalInBytes;                       \
 736   const intx copy_interval = PrefetchCopyIntervalInBytes;                       \
 737   while (q < t) {                                                               \
 738     if (!oop(q)->is_gc_marked()) {                                              \
 739       /* mark is pointer to next marked oop */                                  \
 740       debug_only(prev_q = q);                                                   \
 741       q = (HeapWord*) oop(q)->mark()->decode_pointer();                         \
 742       assert(q > prev_q, "we should be moving forward through memory");         \
 743     } else {                                                                    \
 744       /* prefetch beyond q */                                                   \
 745       Prefetch::read(q, scan_interval);                                         \
 746                                                                                 \
 747       /* size and destination */                                                \
 748       size_t size = obj_size(q);                                                \
 749       HeapWord* compaction_top = (HeapWord*)oop(q)->forwardee();                \
 750                                                                                 \
 751       /* prefetch beyond compaction_top */                                      \
 752       Prefetch::write(compaction_top, copy_interval);                           \
 753                                                                                 \
 754       /* copy object and reinit its mark */                                     \
 755       VALIDATE_MARK_SWEEP_ONLY(MarkSweep::live_oop_moved_to(q, size,            \
 756                                                             compaction_top));   \
 757       assert(q != compaction_top, "everything in this pass should be moving");  \
 758       Copy::aligned_conjoint_words(q, compaction_top, size);                    \
 759       oop(compaction_top)->init_mark();                                         \
 760       assert(oop(compaction_top)->klass() != NULL, "should have a class");      \
 761                                                                                 \
 762       debug_only(prev_q = q);                                                   \
 763       q += size;                                                                \
 764     }                                                                           \
 765   }                                                                             \
 766                                                                                 \
 767   /* Let's remember if we were empty before we did the compaction. */           \
 768   bool was_empty = used_region().is_empty();                                    \
 769   /* Reset space after compaction is complete */                                \
 770   reset_after_compaction();                                                     \
 771   /* We do this clear, below, since it has overloaded meanings for some */      \
 772   /* space subtypes.  For example, OffsetTableContigSpace's that were   */      \
 773   /* compacted into will have had their offset table thresholds updated */      \
 774   /* continuously, but those that weren't need to have their thresholds */      \
 775   /* re-initialized.  Also mangles unused area for debugging.           */      \
 776   if (used_region().is_empty()) {                                               \
 777     if (!was_empty) clear(SpaceDecorator::Mangle);                              \
 778   } else {                                                                      \
 779     if (ZapUnusedHeapArea) mangle_unused_area();                                \
 780   }                                                                             \
 781 }
 782 
 783 class GenSpaceMangler;
 784 
 785 // A space in which the free area is contiguous.  It therefore supports
 786 // faster allocation, and compaction.
 787 class ContiguousSpace: public CompactibleSpace {
 788   friend class OneContigSpaceCardGeneration;
 789   friend class VMStructs;
 790  protected:
 791   HeapWord* _top;
 792   HeapWord* _concurrent_iteration_safe_limit;
 793   // A helper for mangling the unused area of the space in debug builds.
 794   GenSpaceMangler* _mangler;
 795 
 796   GenSpaceMangler* mangler() { return _mangler; }
 797 
 798   // Allocation helpers (return NULL if full).
 799   inline HeapWord* allocate_impl(size_t word_size, HeapWord* end_value);
 800   inline HeapWord* par_allocate_impl(size_t word_size, HeapWord* end_value);
 801 
 802  public:
 803   ContiguousSpace();
 804   ~ContiguousSpace();
 805 
 806   virtual void initialize(MemRegion mr, bool clear_space, bool mangle_space);
 807   virtual void clear(bool mangle_space);
 808 
 809   // Accessors
 810   HeapWord* top() const            { return _top;    }
 811   void set_top(HeapWord* value)    { _top = value; }
 812 
 813   virtual void set_saved_mark()    { _saved_mark_word = top();    }
 814   void reset_saved_mark()          { _saved_mark_word = bottom(); }
 815 
 816   WaterMark bottom_mark()     { return WaterMark(this, bottom()); }
 817   WaterMark top_mark()        { return WaterMark(this, top()); }
 818   WaterMark saved_mark()      { return WaterMark(this, saved_mark_word()); }
 819   bool saved_mark_at_top() const { return saved_mark_word() == top(); }
 820 
 821   // In debug mode mangle (write it with a particular bit
 822   // pattern) the unused part of a space.
 823 
 824   // Used to save the an address in a space for later use during mangling.
 825   void set_top_for_allocations(HeapWord* v) PRODUCT_RETURN;
 826   // Used to save the space's current top for later use during mangling.
 827   void set_top_for_allocations() PRODUCT_RETURN;
 828 
 829   // Mangle regions in the space from the current top up to the
 830   // previously mangled part of the space.
 831   void mangle_unused_area() PRODUCT_RETURN;
 832   // Mangle [top, end)
 833   void mangle_unused_area_complete() PRODUCT_RETURN;
 834   // Mangle the given MemRegion.
 835   void mangle_region(MemRegion mr) PRODUCT_RETURN;
 836 
 837   // Do some sparse checking on the area that should have been mangled.
 838   void check_mangled_unused_area(HeapWord* limit) PRODUCT_RETURN;
 839   // Check the complete area that should have been mangled.
 840   // This code may be NULL depending on the macro DEBUG_MANGLING.
 841   void check_mangled_unused_area_complete() PRODUCT_RETURN;
 842 
 843   // Size computations: sizes in bytes.
 844   size_t capacity() const        { return byte_size(bottom(), end()); }
 845   size_t used() const            { return byte_size(bottom(), top()); }
 846   size_t free() const            { return byte_size(top(),    end()); }
 847 
 848   // Override from space.
 849   bool is_in(const void* p) const;
 850 
 851   virtual bool is_free_block(const HeapWord* p) const;
 852 
 853   // In a contiguous space we have a more obvious bound on what parts
 854   // contain objects.
 855   MemRegion used_region() const { return MemRegion(bottom(), top()); }
 856 
 857   MemRegion used_region_at_save_marks() const {
 858     return MemRegion(bottom(), saved_mark_word());
 859   }
 860 
 861   // Allocation (return NULL if full)
 862   virtual HeapWord* allocate(size_t word_size);
 863   virtual HeapWord* par_allocate(size_t word_size);
 864 
 865   virtual bool obj_allocated_since_save_marks(const oop obj) const {
 866     return (HeapWord*)obj >= saved_mark_word();
 867   }
 868 
 869   // Iteration
 870   void oop_iterate(OopClosure* cl);
 871   void oop_iterate(MemRegion mr, OopClosure* cl);
 872   void object_iterate(ObjectClosure* blk);
 873   // For contiguous spaces this method will iterate safely over objects
 874   // in the space (i.e., between bottom and top) when at a safepoint.
 875   void safe_object_iterate(ObjectClosure* blk);
 876   void object_iterate_mem(MemRegion mr, UpwardsObjectClosure* cl);
 877   // iterates on objects up to the safe limit
 878   HeapWord* object_iterate_careful(ObjectClosureCareful* cl);
 879   inline HeapWord* concurrent_iteration_safe_limit();
 880   // changes the safe limit, all objects from bottom() to the new
 881   // limit should be properly initialized
 882   inline void set_concurrent_iteration_safe_limit(HeapWord* new_limit);
 883 
 884 #ifndef SERIALGC
 885   // In support of parallel oop_iterate.
 886   #define ContigSpace_PAR_OOP_ITERATE_DECL(OopClosureType, nv_suffix)  \
 887     void par_oop_iterate(MemRegion mr, OopClosureType* blk);
 888 
 889     ALL_PAR_OOP_ITERATE_CLOSURES(ContigSpace_PAR_OOP_ITERATE_DECL)
 890   #undef ContigSpace_PAR_OOP_ITERATE_DECL
 891 #endif // SERIALGC
 892 
 893   // Compaction support
 894   virtual void reset_after_compaction() {
 895     assert(compaction_top() >= bottom() && compaction_top() <= end(), "should point inside space");
 896     set_top(compaction_top());
 897     // set new iteration safe limit
 898     set_concurrent_iteration_safe_limit(compaction_top());
 899   }
 900   virtual size_t minimum_free_block_size() const { return 0; }
 901 
 902   // Override.
 903   DirtyCardToOopClosure* new_dcto_cl(OopClosure* cl,
 904                                      CardTableModRefBS::PrecisionStyle precision,
 905                                      HeapWord* boundary = NULL);
 906 
 907   // Apply "blk->do_oop" to the addresses of all reference fields in objects
 908   // starting with the _saved_mark_word, which was noted during a generation's
 909   // save_marks and is required to denote the head of an object.
 910   // Fields in objects allocated by applications of the closure
 911   // *are* included in the iteration.
 912   // Updates _saved_mark_word to point to just after the last object
 913   // iterated over.
 914 #define ContigSpace_OOP_SINCE_SAVE_MARKS_DECL(OopClosureType, nv_suffix)  \
 915   void oop_since_save_marks_iterate##nv_suffix(OopClosureType* blk);
 916 
 917   ALL_SINCE_SAVE_MARKS_CLOSURES(ContigSpace_OOP_SINCE_SAVE_MARKS_DECL)
 918 #undef ContigSpace_OOP_SINCE_SAVE_MARKS_DECL
 919 
 920   // Same as object_iterate, but starting from "mark", which is required
 921   // to denote the start of an object.  Objects allocated by
 922   // applications of the closure *are* included in the iteration.
 923   virtual void object_iterate_from(WaterMark mark, ObjectClosure* blk);
 924 
 925   // Very inefficient implementation.
 926   virtual HeapWord* block_start_const(const void* p) const;
 927   size_t block_size(const HeapWord* p) const;
 928   // If a block is in the allocated area, it is an object.
 929   bool block_is_obj(const HeapWord* p) const { return p < top(); }
 930 
 931   // Addresses for inlined allocation
 932   HeapWord** top_addr() { return &_top; }
 933   HeapWord** end_addr() { return &_end; }
 934 
 935   // Overrides for more efficient compaction support.
 936   void prepare_for_compaction(CompactPoint* cp);
 937 
 938   // PrintHeapAtGC support.
 939   virtual void print_on(outputStream* st) const;
 940 
 941   // Checked dynamic downcasts.
 942   virtual ContiguousSpace* toContiguousSpace() {
 943     return this;
 944   }
 945 
 946   // Debugging
 947   virtual void verify(bool allow_dirty) const;
 948 
 949   // Used to increase collection frequency.  "factor" of 0 means entire
 950   // space.
 951   void allocate_temporary_filler(int factor);
 952 
 953 };
 954 
 955 
 956 // A dirty card to oop closure that does filtering.
 957 // It knows how to filter out objects that are outside of the _boundary.
 958 class Filtering_DCTOC : public DirtyCardToOopClosure {
 959 protected:
 960   // Override.
 961   void walk_mem_region(MemRegion mr,
 962                        HeapWord* bottom, HeapWord* top);
 963 
 964   // Walk the given memory region, from bottom to top, applying
 965   // the given oop closure to (possibly) all objects found. The
 966   // given oop closure may or may not be the same as the oop
 967   // closure with which this closure was created, as it may
 968   // be a filtering closure which makes use of the _boundary.
 969   // We offer two signatures, so the FilteringClosure static type is
 970   // apparent.
 971   virtual void walk_mem_region_with_cl(MemRegion mr,
 972                                        HeapWord* bottom, HeapWord* top,
 973                                        OopClosure* cl) = 0;
 974   virtual void walk_mem_region_with_cl(MemRegion mr,
 975                                        HeapWord* bottom, HeapWord* top,
 976                                        FilteringClosure* cl) = 0;
 977 
 978 public:
 979   Filtering_DCTOC(Space* sp, OopClosure* cl,
 980                   CardTableModRefBS::PrecisionStyle precision,
 981                   HeapWord* boundary) :
 982     DirtyCardToOopClosure(sp, cl, precision, boundary) {}
 983 };
 984 
 985 // A dirty card to oop closure for contiguous spaces
 986 // (ContiguousSpace and sub-classes).
 987 // It is a FilteringClosure, as defined above, and it knows:
 988 //
 989 // 1. That the actual top of any area in a memory region
 990 //    contained by the space is bounded by the end of the contiguous
 991 //    region of the space.
 992 // 2. That the space is really made up of objects and not just
 993 //    blocks.
 994 
 995 class ContiguousSpaceDCTOC : public Filtering_DCTOC {
 996 protected:
 997   // Overrides.
 998   HeapWord* get_actual_top(HeapWord* top, HeapWord* top_obj);
 999 
1000   virtual void walk_mem_region_with_cl(MemRegion mr,
1001                                        HeapWord* bottom, HeapWord* top,
1002                                        OopClosure* cl);
1003   virtual void walk_mem_region_with_cl(MemRegion mr,
1004                                        HeapWord* bottom, HeapWord* top,
1005                                        FilteringClosure* cl);
1006 
1007 public:
1008   ContiguousSpaceDCTOC(ContiguousSpace* sp, OopClosure* cl,
1009                        CardTableModRefBS::PrecisionStyle precision,
1010                        HeapWord* boundary) :
1011     Filtering_DCTOC(sp, cl, precision, boundary)
1012   {}
1013 };
1014 
1015 
1016 // Class EdenSpace describes eden-space in new generation.
1017 
1018 class DefNewGeneration;
1019 
1020 class EdenSpace : public ContiguousSpace {
1021   friend class VMStructs;
1022  private:
1023   DefNewGeneration* _gen;
1024 
1025   // _soft_end is used as a soft limit on allocation.  As soft limits are
1026   // reached, the slow-path allocation code can invoke other actions and then
1027   // adjust _soft_end up to a new soft limit or to end().
1028   HeapWord* _soft_end;
1029 
1030  public:
1031   EdenSpace(DefNewGeneration* gen) :
1032    _gen(gen), _soft_end(NULL) {}
1033 
1034   // Get/set just the 'soft' limit.
1035   HeapWord* soft_end()               { return _soft_end; }
1036   HeapWord** soft_end_addr()         { return &_soft_end; }
1037   void set_soft_end(HeapWord* value) { _soft_end = value; }
1038 
1039   // Override.
1040   void clear(bool mangle_space);
1041 
1042   // Set both the 'hard' and 'soft' limits (_end and _soft_end).
1043   void set_end(HeapWord* value) {
1044     set_soft_end(value);
1045     ContiguousSpace::set_end(value);
1046   }
1047 
1048   // Allocation (return NULL if full)
1049   HeapWord* allocate(size_t word_size);
1050   HeapWord* par_allocate(size_t word_size);
1051 };
1052 
1053 // Class ConcEdenSpace extends EdenSpace for the sake of safe
1054 // allocation while soft-end is being modified concurrently
1055 
1056 class ConcEdenSpace : public EdenSpace {
1057  public:
1058   ConcEdenSpace(DefNewGeneration* gen) : EdenSpace(gen) { }
1059 
1060   // Allocation (return NULL if full)
1061   HeapWord* par_allocate(size_t word_size);
1062 };
1063 
1064 
1065 // A ContigSpace that Supports an efficient "block_start" operation via
1066 // a BlockOffsetArray (whose BlockOffsetSharedArray may be shared with
1067 // other spaces.)  This is the abstract base class for old generation
1068 // (tenured, perm) spaces.
1069 
1070 class OffsetTableContigSpace: public ContiguousSpace {
1071   friend class VMStructs;
1072  protected:
1073   BlockOffsetArrayContigSpace _offsets;
1074   Mutex _par_alloc_lock;
1075 
1076  public:
1077   // Constructor
1078   OffsetTableContigSpace(BlockOffsetSharedArray* sharedOffsetArray,
1079                          MemRegion mr);
1080 
1081   void set_bottom(HeapWord* value);
1082   void set_end(HeapWord* value);
1083 
1084   void clear(bool mangle_space);
1085 
1086   inline HeapWord* block_start_const(const void* p) const;
1087 
1088   // Add offset table update.
1089   virtual inline HeapWord* allocate(size_t word_size);
1090   inline HeapWord* par_allocate(size_t word_size);
1091 
1092   // MarkSweep support phase3
1093   virtual HeapWord* initialize_threshold();
1094   virtual HeapWord* cross_threshold(HeapWord* start, HeapWord* end);
1095 
1096   virtual void print_on(outputStream* st) const;
1097 
1098   // Debugging
1099   void verify(bool allow_dirty) const;
1100 
1101   // Shared space support
1102   void serialize_block_offset_array_offsets(SerializeOopClosure* soc);
1103 };
1104 
1105 
1106 // Class TenuredSpace is used by TenuredGeneration
1107 
1108 class TenuredSpace: public OffsetTableContigSpace {
1109   friend class VMStructs;
1110  protected:
1111   // Mark sweep support
1112   size_t allowed_dead_ratio() const;
1113  public:
1114   // Constructor
1115   TenuredSpace(BlockOffsetSharedArray* sharedOffsetArray,
1116                MemRegion mr) :
1117     OffsetTableContigSpace(sharedOffsetArray, mr) {}
1118 };
1119 
1120 
1121 // Class ContigPermSpace is used by CompactingPermGen
1122 
1123 class ContigPermSpace: public OffsetTableContigSpace {
1124   friend class VMStructs;
1125  protected:
1126   // Mark sweep support
1127   size_t allowed_dead_ratio() const;
1128  public:
1129   // Constructor
1130   ContigPermSpace(BlockOffsetSharedArray* sharedOffsetArray, MemRegion mr) :
1131     OffsetTableContigSpace(sharedOffsetArray, mr) {}
1132 };
1133 
1134 #endif // SHARE_VM_MEMORY_SPACE_HPP