1 /*
   2  * Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "classfile/systemDictionary.hpp"
  27 #include "classfile/vmSymbols.hpp"
  28 #include "gc_implementation/shared/liveRange.hpp"
  29 #include "gc_implementation/shared/markSweep.hpp"
  30 #include "gc_implementation/shared/spaceDecorator.hpp"
  31 #include "gc_interface/collectedHeap.inline.hpp"
  32 #include "memory/blockOffsetTable.inline.hpp"
  33 #include "memory/defNewGeneration.hpp"
  34 #include "memory/genCollectedHeap.hpp"
  35 #include "memory/space.hpp"
  36 #include "memory/space.inline.hpp"
  37 #include "memory/universe.inline.hpp"
  38 #include "oops/oop.inline.hpp"
  39 #include "oops/oop.inline2.hpp"
  40 #include "runtime/java.hpp"
  41 #include "runtime/atomic.inline.hpp"
  42 #include "runtime/prefetch.inline.hpp"
  43 #include "runtime/orderAccess.inline.hpp"
  44 #include "runtime/safepoint.hpp"
  45 #include "utilities/copy.hpp"
  46 #include "utilities/globalDefinitions.hpp"
  47 #include "utilities/macros.hpp"
  48 
  49 PRAGMA_FORMAT_MUTE_WARNINGS_FOR_GCC
  50 
  51 HeapWord* DirtyCardToOopClosure::get_actual_top(HeapWord* top,
  52                                                 HeapWord* top_obj) {
  53   if (top_obj != NULL) {
  54     if (_sp->block_is_obj(top_obj)) {
  55       if (_precision == CardTableModRefBS::ObjHeadPreciseArray) {
  56         if (oop(top_obj)->is_objArray() || oop(top_obj)->is_typeArray()) {
  57           // An arrayOop is starting on the dirty card - since we do exact
  58           // store checks for objArrays we are done.
  59         } else {
  60           // Otherwise, it is possible that the object starting on the dirty
  61           // card spans the entire card, and that the store happened on a
  62           // later card.  Figure out where the object ends.
  63           // Use the block_size() method of the space over which
  64           // the iteration is being done.  That space (e.g. CMS) may have
  65           // specific requirements on object sizes which will
  66           // be reflected in the block_size() method.
  67           top = top_obj + oop(top_obj)->size();
  68         }
  69       }
  70     } else {
  71       top = top_obj;
  72     }
  73   } else {
  74     assert(top == _sp->end(), "only case where top_obj == NULL");
  75   }
  76   return top;
  77 }
  78 
  79 void DirtyCardToOopClosure::walk_mem_region(MemRegion mr,
  80                                             HeapWord* bottom,
  81                                             HeapWord* top) {
  82   // 1. Blocks may or may not be objects.
  83   // 2. Even when a block_is_obj(), it may not entirely
  84   //    occupy the block if the block quantum is larger than
  85   //    the object size.
  86   // We can and should try to optimize by calling the non-MemRegion
  87   // version of oop_iterate() for all but the extremal objects
  88   // (for which we need to call the MemRegion version of
  89   // oop_iterate()) To be done post-beta XXX
  90   for (; bottom < top; bottom += _sp->block_size(bottom)) {
  91     // As in the case of contiguous space above, we'd like to
  92     // just use the value returned by oop_iterate to increment the
  93     // current pointer; unfortunately, that won't work in CMS because
  94     // we'd need an interface change (it seems) to have the space
  95     // "adjust the object size" (for instance pad it up to its
  96     // block alignment or minimum block size restrictions. XXX
  97     if (_sp->block_is_obj(bottom) &&
  98         !_sp->obj_allocated_since_save_marks(oop(bottom))) {
  99       oop(bottom)->oop_iterate<false>(_cl, mr);
 100     }
 101   }
 102 }
 103 
 104 // We get called with "mr" representing the dirty region
 105 // that we want to process. Because of imprecise marking,
 106 // we may need to extend the incoming "mr" to the right,
 107 // and scan more. However, because we may already have
 108 // scanned some of that extended region, we may need to
 109 // trim its right-end back some so we do not scan what
 110 // we (or another worker thread) may already have scanned
 111 // or planning to scan.
 112 void DirtyCardToOopClosure::do_MemRegion(MemRegion mr) {
 113 
 114   // Some collectors need to do special things whenever their dirty
 115   // cards are processed. For instance, CMS must remember mutator updates
 116   // (i.e. dirty cards) so as to re-scan mutated objects.
 117   // Such work can be piggy-backed here on dirty card scanning, so as to make
 118   // it slightly more efficient than doing a complete non-destructive pre-scan
 119   // of the card table.
 120   MemRegionClosure* pCl = _sp->preconsumptionDirtyCardClosure();
 121   if (pCl != NULL) {
 122     pCl->do_MemRegion(mr);
 123   }
 124 
 125   HeapWord* bottom = mr.start();
 126   HeapWord* last = mr.last();
 127   HeapWord* top = mr.end();
 128   HeapWord* bottom_obj;
 129   HeapWord* top_obj;
 130 
 131   assert(_precision == CardTableModRefBS::ObjHeadPreciseArray ||
 132          _precision == CardTableModRefBS::Precise,
 133          "Only ones we deal with for now.");
 134 
 135   assert(_precision != CardTableModRefBS::ObjHeadPreciseArray ||
 136          _cl->idempotent() || _last_bottom == NULL ||
 137          top <= _last_bottom,
 138          "Not decreasing");
 139   NOT_PRODUCT(_last_bottom = mr.start());
 140 
 141   bottom_obj = _sp->block_start(bottom);
 142   top_obj    = _sp->block_start(last);
 143 
 144   assert(bottom_obj <= bottom, "just checking");
 145   assert(top_obj    <= top,    "just checking");
 146 
 147   // Given what we think is the top of the memory region and
 148   // the start of the object at the top, get the actual
 149   // value of the top.
 150   top = get_actual_top(top, top_obj);
 151 
 152   // If the previous call did some part of this region, don't redo.
 153   if (_precision == CardTableModRefBS::ObjHeadPreciseArray &&
 154       _min_done != NULL &&
 155       _min_done < top) {
 156     top = _min_done;
 157   }
 158 
 159   // Top may have been reset, and in fact may be below bottom,
 160   // e.g. the dirty card region is entirely in a now free object
 161   // -- something that could happen with a concurrent sweeper.
 162   bottom = MIN2(bottom, top);
 163   MemRegion extended_mr = MemRegion(bottom, top);
 164   assert(bottom <= top &&
 165          (_precision != CardTableModRefBS::ObjHeadPreciseArray ||
 166           _min_done == NULL ||
 167           top <= _min_done),
 168          "overlap!");
 169 
 170   // Walk the region if it is not empty; otherwise there is nothing to do.
 171   if (!extended_mr.is_empty()) {
 172     walk_mem_region(extended_mr, bottom_obj, top);
 173   }
 174 
 175   // An idempotent closure might be applied in any order, so we don't
 176   // record a _min_done for it.
 177   if (!_cl->idempotent()) {
 178     _min_done = bottom;
 179   } else {
 180     assert(_min_done == _last_explicit_min_done,
 181            "Don't update _min_done for idempotent cl");
 182   }
 183 }
 184 
 185 DirtyCardToOopClosure* Space::new_dcto_cl(ExtendedOopClosure* cl,
 186                                           CardTableModRefBS::PrecisionStyle precision,
 187                                           HeapWord* boundary) {
 188   return new DirtyCardToOopClosure(this, cl, precision, boundary);
 189 }
 190 
 191 HeapWord* ContiguousSpaceDCTOC::get_actual_top(HeapWord* top,
 192                                                HeapWord* top_obj) {
 193   if (top_obj != NULL && top_obj < (_sp->toContiguousSpace())->top()) {
 194     if (_precision == CardTableModRefBS::ObjHeadPreciseArray) {
 195       if (oop(top_obj)->is_objArray() || oop(top_obj)->is_typeArray()) {
 196         // An arrayOop is starting on the dirty card - since we do exact
 197         // store checks for objArrays we are done.
 198       } else {
 199         // Otherwise, it is possible that the object starting on the dirty
 200         // card spans the entire card, and that the store happened on a
 201         // later card.  Figure out where the object ends.
 202         assert(_sp->block_size(top_obj) == (size_t) oop(top_obj)->size(),
 203           "Block size and object size mismatch");
 204         top = top_obj + oop(top_obj)->size();
 205       }
 206     }
 207   } else {
 208     top = (_sp->toContiguousSpace())->top();
 209   }
 210   return top;
 211 }
 212 
 213 void Filtering_DCTOC::walk_mem_region(MemRegion mr,
 214                                       HeapWord* bottom,
 215                                       HeapWord* top) {
 216   // Note that this assumption won't hold if we have a concurrent
 217   // collector in this space, which may have freed up objects after
 218   // they were dirtied and before the stop-the-world GC that is
 219   // examining cards here.
 220   assert(bottom < top, "ought to be at least one obj on a dirty card.");
 221 
 222   if (_boundary != NULL) {
 223     // We have a boundary outside of which we don't want to look
 224     // at objects, so create a filtering closure around the
 225     // oop closure before walking the region.
 226     FilteringClosure filter(_boundary, _cl);
 227     walk_mem_region_with_cl(mr, bottom, top, &filter);
 228   } else {
 229     // No boundary, simply walk the heap with the oop closure.
 230     walk_mem_region_with_cl(mr, bottom, top, _cl);
 231   }
 232 
 233 }
 234 
 235 // We must replicate this so that the static type of "FilteringClosure"
 236 // (see above) is apparent at the oop_iterate calls.
 237 #define ContiguousSpaceDCTOC__walk_mem_region_with_cl_DEFN(ClosureType, nv) \
 238 void ContiguousSpaceDCTOC::walk_mem_region_with_cl(MemRegion mr,        \
 239                                                    HeapWord* bottom,    \
 240                                                    HeapWord* top,       \
 241                                                    ClosureType* cl) {   \
 242   bottom += oop(bottom)->oop_iterate<nv>(cl, mr);                           \
 243   if (bottom < top) {                                                   \
 244     HeapWord* next_obj = bottom + oop(bottom)->size();                  \
 245     while (next_obj < top) {                                            \
 246       /* Bottom lies entirely below top, so we can call the */          \
 247       /* non-memRegion version of oop_iterate below. */                 \
 248       oop(bottom)->oop_iterate<nv>(cl);                                     \
 249       bottom = next_obj;                                                \
 250       next_obj = bottom + oop(bottom)->size();                          \
 251     }                                                                   \
 252     /* Last object. */                                                  \
 253     oop(bottom)->oop_iterate<nv>(cl, mr);                                   \
 254   }                                                                     \
 255 }
 256 
 257 // (There are only two of these, rather than N, because the split is due
 258 // only to the introduction of the FilteringClosure, a local part of the
 259 // impl of this abstraction.)
 260 ContiguousSpaceDCTOC__walk_mem_region_with_cl_DEFN(ExtendedOopClosure, false)
 261 ContiguousSpaceDCTOC__walk_mem_region_with_cl_DEFN(FilteringClosure, true)
 262 
 263 DirtyCardToOopClosure*
 264 ContiguousSpace::new_dcto_cl(ExtendedOopClosure* cl,
 265                              CardTableModRefBS::PrecisionStyle precision,
 266                              HeapWord* boundary) {
 267   return new ContiguousSpaceDCTOC(this, cl, precision, boundary);
 268 }
 269 
 270 void Space::initialize(MemRegion mr,
 271                        bool clear_space,
 272                        bool mangle_space) {
 273   HeapWord* bottom = mr.start();
 274   HeapWord* end    = mr.end();
 275   assert(Universe::on_page_boundary(bottom) && Universe::on_page_boundary(end),
 276          "invalid space boundaries");
 277   set_bottom(bottom);
 278   set_end(end);
 279   if (clear_space) clear(mangle_space);
 280 }
 281 
 282 void Space::clear(bool mangle_space) {
 283   if (ZapUnusedHeapArea && mangle_space) {
 284     mangle_unused_area();
 285   }
 286 }
 287 
 288 ContiguousSpace::ContiguousSpace(): CompactibleSpace(), _top(NULL),
 289     _concurrent_iteration_safe_limit(NULL) {
 290   _mangler = new GenSpaceMangler(this);
 291 }
 292 
 293 ContiguousSpace::~ContiguousSpace() {
 294   delete _mangler;
 295 }
 296 
 297 void ContiguousSpace::initialize(MemRegion mr,
 298                                  bool clear_space,
 299                                  bool mangle_space)
 300 {
 301   CompactibleSpace::initialize(mr, clear_space, mangle_space);
 302   set_concurrent_iteration_safe_limit(top());
 303 }
 304 
 305 void ContiguousSpace::clear(bool mangle_space) {
 306   set_top(bottom());
 307   set_saved_mark();
 308   CompactibleSpace::clear(mangle_space);
 309 }
 310 
 311 bool ContiguousSpace::is_free_block(const HeapWord* p) const {
 312   return p >= _top;
 313 }
 314 
 315 void OffsetTableContigSpace::clear(bool mangle_space) {
 316   ContiguousSpace::clear(mangle_space);
 317   _offsets.initialize_threshold();
 318 }
 319 
 320 void OffsetTableContigSpace::set_bottom(HeapWord* new_bottom) {
 321   Space::set_bottom(new_bottom);
 322   _offsets.set_bottom(new_bottom);
 323 }
 324 
 325 void OffsetTableContigSpace::set_end(HeapWord* new_end) {
 326   // Space should not advertise an increase in size
 327   // until after the underlying offset table has been enlarged.
 328   _offsets.resize(pointer_delta(new_end, bottom()));
 329   Space::set_end(new_end);
 330 }
 331 
 332 #ifndef PRODUCT
 333 
 334 void ContiguousSpace::set_top_for_allocations(HeapWord* v) {
 335   mangler()->set_top_for_allocations(v);
 336 }
 337 void ContiguousSpace::set_top_for_allocations() {
 338   mangler()->set_top_for_allocations(top());
 339 }
 340 void ContiguousSpace::check_mangled_unused_area(HeapWord* limit) {
 341   mangler()->check_mangled_unused_area(limit);
 342 }
 343 
 344 void ContiguousSpace::check_mangled_unused_area_complete() {
 345   mangler()->check_mangled_unused_area_complete();
 346 }
 347 
 348 // Mangled only the unused space that has not previously
 349 // been mangled and that has not been allocated since being
 350 // mangled.
 351 void ContiguousSpace::mangle_unused_area() {
 352   mangler()->mangle_unused_area();
 353 }
 354 void ContiguousSpace::mangle_unused_area_complete() {
 355   mangler()->mangle_unused_area_complete();
 356 }
 357 void ContiguousSpace::mangle_region(MemRegion mr) {
 358   // Although this method uses SpaceMangler::mangle_region() which
 359   // is not specific to a space, the when the ContiguousSpace version
 360   // is called, it is always with regard to a space and this
 361   // bounds checking is appropriate.
 362   MemRegion space_mr(bottom(), end());
 363   assert(space_mr.contains(mr), "Mangling outside space");
 364   SpaceMangler::mangle_region(mr);
 365 }
 366 #endif  // NOT_PRODUCT
 367 
 368 void CompactibleSpace::initialize(MemRegion mr,
 369                                   bool clear_space,
 370                                   bool mangle_space) {
 371   Space::initialize(mr, clear_space, mangle_space);
 372   set_compaction_top(bottom());
 373   _next_compaction_space = NULL;
 374 }
 375 
 376 void CompactibleSpace::clear(bool mangle_space) {
 377   Space::clear(mangle_space);
 378   _compaction_top = bottom();
 379 }
 380 
 381 HeapWord* CompactibleSpace::forward(oop q, size_t size,
 382                                     CompactPoint* cp, HeapWord* compact_top) {
 383   // q is alive
 384   // First check if we should switch compaction space
 385   assert(this == cp->space, "'this' should be current compaction space.");
 386   size_t compaction_max_size = pointer_delta(end(), compact_top);
 387   while (size > compaction_max_size) {
 388     // switch to next compaction space
 389     cp->space->set_compaction_top(compact_top);
 390     cp->space = cp->space->next_compaction_space();
 391     if (cp->space == NULL) {
 392       cp->gen = GenCollectedHeap::heap()->prev_gen(cp->gen);
 393       assert(cp->gen != NULL, "compaction must succeed");
 394       cp->space = cp->gen->first_compaction_space();
 395       assert(cp->space != NULL, "generation must have a first compaction space");
 396     }
 397     compact_top = cp->space->bottom();
 398     cp->space->set_compaction_top(compact_top);
 399     cp->threshold = cp->space->initialize_threshold();
 400     compaction_max_size = pointer_delta(cp->space->end(), compact_top);
 401   }
 402 
 403   // store the forwarding pointer into the mark word
 404   if ((HeapWord*)q != compact_top) {
 405     q->forward_to(oop(compact_top));
 406     assert(q->is_gc_marked(), "encoding the pointer should preserve the mark");
 407   } else {
 408     // if the object isn't moving we can just set the mark to the default
 409     // mark and handle it specially later on.
 410     q->init_mark();
 411     assert(q->forwardee() == NULL, "should be forwarded to NULL");
 412   }
 413 
 414   compact_top += size;
 415 
 416   // we need to update the offset table so that the beginnings of objects can be
 417   // found during scavenge.  Note that we are updating the offset table based on
 418   // where the object will be once the compaction phase finishes.
 419   if (compact_top > cp->threshold)
 420     cp->threshold =
 421       cp->space->cross_threshold(compact_top - size, compact_top);
 422   return compact_top;
 423 }
 424 
 425 
 426 bool CompactibleSpace::insert_deadspace(size_t& allowed_deadspace_words,
 427                                         HeapWord* q, size_t deadlength) {
 428   if (allowed_deadspace_words >= deadlength) {
 429     allowed_deadspace_words -= deadlength;
 430     CollectedHeap::fill_with_object(q, deadlength);
 431     oop(q)->set_mark(oop(q)->mark()->set_marked());
 432     assert((int) deadlength == oop(q)->size(), "bad filler object size");
 433     // Recall that we required "q == compaction_top".
 434     return true;
 435   } else {
 436     allowed_deadspace_words = 0;
 437     return false;
 438   }
 439 }
 440 
 441 #define block_is_always_obj(q) true
 442 #define obj_size(q) oop(q)->size()
 443 #define adjust_obj_size(s) s
 444 
 445 void CompactibleSpace::prepare_for_compaction(CompactPoint* cp) {
 446   SCAN_AND_FORWARD(cp, end, block_is_obj, block_size);
 447 }
 448 
 449 // Faster object search.
 450 void ContiguousSpace::prepare_for_compaction(CompactPoint* cp) {
 451   SCAN_AND_FORWARD(cp, top, block_is_always_obj, obj_size);
 452 }
 453 
 454 void Space::adjust_pointers() {
 455   // adjust all the interior pointers to point at the new locations of objects
 456   // Used by MarkSweep::mark_sweep_phase3()
 457 
 458   // First check to see if there is any work to be done.
 459   if (used() == 0) {
 460     return;  // Nothing to do.
 461   }
 462 
 463   // Otherwise...
 464   HeapWord* q = bottom();
 465   HeapWord* t = end();
 466 
 467   debug_only(HeapWord* prev_q = NULL);
 468   while (q < t) {
 469     if (oop(q)->is_gc_marked()) {
 470       // q is alive
 471 
 472       // point all the oops to the new location
 473       size_t size = oop(q)->adjust_pointers();
 474 
 475       debug_only(prev_q = q);
 476 
 477       q += size;
 478     } else {
 479       // q is not a live object.  But we're not in a compactible space,
 480       // So we don't have live ranges.
 481       debug_only(prev_q = q);
 482       q += block_size(q);
 483       assert(q > prev_q, "we should be moving forward through memory");
 484     }
 485   }
 486   assert(q == t, "just checking");
 487 }
 488 
 489 void CompactibleSpace::adjust_pointers() {
 490   // Check first is there is any work to do.
 491   if (used() == 0) {
 492     return;   // Nothing to do.
 493   }
 494 
 495   SCAN_AND_ADJUST_POINTERS(adjust_obj_size);
 496 }
 497 
 498 void CompactibleSpace::compact() {
 499   SCAN_AND_COMPACT(obj_size);
 500 }
 501 
 502 void Space::print_short() const { print_short_on(tty); }
 503 
 504 void Space::print_short_on(outputStream* st) const {
 505   st->print(" space " SIZE_FORMAT "K, %3d%% used", capacity() / K,
 506               (int) ((double) used() * 100 / capacity()));
 507 }
 508 
 509 void Space::print() const { print_on(tty); }
 510 
 511 void Space::print_on(outputStream* st) const {
 512   print_short_on(st);
 513   st->print_cr(" [" INTPTR_FORMAT ", " INTPTR_FORMAT ")",
 514                 bottom(), end());
 515 }
 516 
 517 void ContiguousSpace::print_on(outputStream* st) const {
 518   print_short_on(st);
 519   st->print_cr(" [" INTPTR_FORMAT ", " INTPTR_FORMAT ", " INTPTR_FORMAT ")",
 520                 bottom(), top(), end());
 521 }
 522 
 523 void OffsetTableContigSpace::print_on(outputStream* st) const {
 524   print_short_on(st);
 525   st->print_cr(" [" INTPTR_FORMAT ", " INTPTR_FORMAT ", "
 526                 INTPTR_FORMAT ", " INTPTR_FORMAT ")",
 527               bottom(), top(), _offsets.threshold(), end());
 528 }
 529 
 530 void ContiguousSpace::verify() const {
 531   HeapWord* p = bottom();
 532   HeapWord* t = top();
 533   HeapWord* prev_p = NULL;
 534   while (p < t) {
 535     oop(p)->verify();
 536     prev_p = p;
 537     p += oop(p)->size();
 538   }
 539   guarantee(p == top(), "end of last object must match end of space");
 540   if (top() != end()) {
 541     guarantee(top() == block_start_const(end()-1) &&
 542               top() == block_start_const(top()),
 543               "top should be start of unallocated block, if it exists");
 544   }
 545 }
 546 
 547 void Space::oop_iterate(ExtendedOopClosure* blk) {
 548   ObjectToOopClosure blk2(blk);
 549   object_iterate(&blk2);
 550 }
 551 
 552 bool Space::obj_is_alive(const HeapWord* p) const {
 553   assert (block_is_obj(p), "The address should point to an object");
 554   return true;
 555 }
 556 
 557 void ContiguousSpace::oop_iterate(ExtendedOopClosure* blk) {
 558   if (is_empty()) return;
 559   HeapWord* obj_addr = bottom();
 560   HeapWord* t = top();
 561   // Could call objects iterate, but this is easier.
 562   while (obj_addr < t) {
 563     obj_addr += oop(obj_addr)->oop_iterate<false>(blk);
 564   }
 565 }
 566 
 567 void ContiguousSpace::object_iterate(ObjectClosure* blk) {
 568   if (is_empty()) return;
 569   WaterMark bm = bottom_mark();
 570   object_iterate_from(bm, blk);
 571 }
 572 
 573 // For a ContiguousSpace object_iterate() and safe_object_iterate()
 574 // are the same.
 575 void ContiguousSpace::safe_object_iterate(ObjectClosure* blk) {
 576   object_iterate(blk);
 577 }
 578 
 579 void ContiguousSpace::object_iterate_from(WaterMark mark, ObjectClosure* blk) {
 580   assert(mark.space() == this, "Mark does not match space");
 581   HeapWord* p = mark.point();
 582   while (p < top()) {
 583     blk->do_object(oop(p));
 584     p += oop(p)->size();
 585   }
 586 }
 587 
 588 HeapWord*
 589 ContiguousSpace::object_iterate_careful(ObjectClosureCareful* blk) {
 590   HeapWord * limit = concurrent_iteration_safe_limit();
 591   assert(limit <= top(), "sanity check");
 592   for (HeapWord* p = bottom(); p < limit;) {
 593     size_t size = blk->do_object_careful(oop(p));
 594     if (size == 0) {
 595       return p;  // failed at p
 596     } else {
 597       p += size;
 598     }
 599   }
 600   return NULL; // all done
 601 }
 602 
 603 template <bool nv, typename OopClosureType>
 604 void ContiguousSpace::
 605 cspace_oop_since_save_marks_iterate(OopClosureType* blk) {
 606   HeapWord* t;
 607   HeapWord* p = saved_mark_word();
 608   assert(p != NULL, "expected saved mark");
 609 
 610   const intx interval = PrefetchScanIntervalInBytes;
 611   do {
 612     t = top();
 613     while (p < t) {
 614       Prefetch::write(p, interval);
 615       debug_only(HeapWord* prev = p);
 616       oop m = oop(p);
 617       p += m->oop_iterate<nv>(blk);
 618     }
 619   } while (t < top());
 620 
 621   set_saved_mark_word(p);
 622 }
 623 
 624 // Very general, slow implementation.
 625 HeapWord* ContiguousSpace::block_start_const(const void* p) const {
 626   assert(MemRegion(bottom(), end()).contains(p),
 627          err_msg("p (" PTR_FORMAT ") not in space [" PTR_FORMAT ", " PTR_FORMAT ")",
 628                   p, bottom(), end()));
 629   if (p >= top()) {
 630     return top();
 631   } else {
 632     HeapWord* last = bottom();
 633     HeapWord* cur = last;
 634     while (cur <= p) {
 635       last = cur;
 636       cur += oop(cur)->size();
 637     }
 638     assert(oop(last)->is_oop(),
 639            err_msg(PTR_FORMAT " should be an object start", last));
 640     return last;
 641   }
 642 }
 643 
 644 size_t ContiguousSpace::block_size(const HeapWord* p) const {
 645   assert(MemRegion(bottom(), end()).contains(p),
 646          err_msg("p (" PTR_FORMAT ") not in space [" PTR_FORMAT ", " PTR_FORMAT ")",
 647                   p, bottom(), end()));
 648   HeapWord* current_top = top();
 649   assert(p <= current_top,
 650          err_msg("p > current top - p: " PTR_FORMAT ", current top: " PTR_FORMAT,
 651                   p, current_top));
 652   assert(p == current_top || oop(p)->is_oop(),
 653          err_msg("p (" PTR_FORMAT ") is not a block start - "
 654                  "current_top: " PTR_FORMAT ", is_oop: %s",
 655                  p, current_top, BOOL_TO_STR(oop(p)->is_oop())));
 656   if (p < current_top) {
 657     return oop(p)->size();
 658   } else {
 659     assert(p == current_top, "just checking");
 660     return pointer_delta(end(), (HeapWord*) p);
 661   }
 662 }
 663 
 664 // This version requires locking.
 665 inline HeapWord* ContiguousSpace::allocate_impl(size_t size,
 666                                                 HeapWord* const end_value) {
 667   assert(Heap_lock->owned_by_self() ||
 668          (SafepointSynchronize::is_at_safepoint() && Thread::current()->is_VM_thread()),
 669          "not locked");
 670   HeapWord* obj = top();
 671   if (pointer_delta(end_value, obj) >= size) {
 672     HeapWord* new_top = obj + size;
 673     set_top(new_top);
 674     assert(is_aligned(obj) && is_aligned(new_top), "checking alignment");
 675     return obj;
 676   } else {
 677     return NULL;
 678   }
 679 }
 680 
 681 // This version is lock-free.
 682 inline HeapWord* ContiguousSpace::par_allocate_impl(size_t size,
 683                                                     HeapWord* const end_value) {
 684   do {
 685     HeapWord* obj = top();
 686     if (pointer_delta(end_value, obj) >= size) {
 687       HeapWord* new_top = obj + size;
 688       HeapWord* result = (HeapWord*)Atomic::cmpxchg_ptr(new_top, top_addr(), obj);
 689       // result can be one of two:
 690       //  the old top value: the exchange succeeded
 691       //  otherwise: the new value of the top is returned.
 692       if (result == obj) {
 693         assert(is_aligned(obj) && is_aligned(new_top), "checking alignment");
 694         return obj;
 695       }
 696     } else {
 697       return NULL;
 698     }
 699   } while (true);
 700 }
 701 
 702 HeapWord* ContiguousSpace::allocate_aligned(size_t size) {
 703   assert(Heap_lock->owned_by_self() || (SafepointSynchronize::is_at_safepoint() && Thread::current()->is_VM_thread()), "not locked");
 704   HeapWord* end_value = end();
 705 
 706   HeapWord* obj = CollectedHeap::align_allocation_or_fail(top(), end_value, SurvivorAlignmentInBytes);
 707   if (obj == NULL) {
 708     return NULL;
 709   }
 710 
 711   if (pointer_delta(end_value, obj) >= size) {
 712     HeapWord* new_top = obj + size;
 713     set_top(new_top);
 714     assert(is_ptr_aligned(obj, SurvivorAlignmentInBytes) && is_aligned(new_top),
 715       "checking alignment");
 716     return obj;
 717   } else {
 718     set_top(obj);
 719     return NULL;
 720   }
 721 }
 722 
 723 // Requires locking.
 724 HeapWord* ContiguousSpace::allocate(size_t size) {
 725   return allocate_impl(size, end());
 726 }
 727 
 728 // Lock-free.
 729 HeapWord* ContiguousSpace::par_allocate(size_t size) {
 730   return par_allocate_impl(size, end());
 731 }
 732 
 733 void ContiguousSpace::allocate_temporary_filler(int factor) {
 734   // allocate temporary type array decreasing free size with factor 'factor'
 735   assert(factor >= 0, "just checking");
 736   size_t size = pointer_delta(end(), top());
 737 
 738   // if space is full, return
 739   if (size == 0) return;
 740 
 741   if (factor > 0) {
 742     size -= size/factor;
 743   }
 744   size = align_object_size(size);
 745 
 746   const size_t array_header_size = typeArrayOopDesc::header_size(T_INT);
 747   if (size >= (size_t)align_object_size(array_header_size)) {
 748     size_t length = (size - array_header_size) * (HeapWordSize / sizeof(jint));
 749     // allocate uninitialized int array
 750     typeArrayOop t = (typeArrayOop) allocate(size);
 751     assert(t != NULL, "allocation should succeed");
 752     t->set_mark(markOopDesc::prototype());
 753     t->set_klass(Universe::intArrayKlassObj());
 754     t->set_length((int)length);
 755   } else {
 756     assert(size == CollectedHeap::min_fill_size(),
 757            "size for smallest fake object doesn't match");
 758     instanceOop obj = (instanceOop) allocate(size);
 759     obj->set_mark(markOopDesc::prototype());
 760     obj->set_klass_gap(0);
 761     obj->set_klass(SystemDictionary::Object_klass());
 762   }
 763 }
 764 
 765 void EdenSpace::clear(bool mangle_space) {
 766   ContiguousSpace::clear(mangle_space);
 767   set_soft_end(end());
 768 }
 769 
 770 // Requires locking.
 771 HeapWord* EdenSpace::allocate(size_t size) {
 772   return allocate_impl(size, soft_end());
 773 }
 774 
 775 // Lock-free.
 776 HeapWord* EdenSpace::par_allocate(size_t size) {
 777   return par_allocate_impl(size, soft_end());
 778 }
 779 
 780 HeapWord* ConcEdenSpace::par_allocate(size_t size)
 781 {
 782   do {
 783     // The invariant is top() should be read before end() because
 784     // top() can't be greater than end(), so if an update of _soft_end
 785     // occurs between 'end_val = end();' and 'top_val = top();' top()
 786     // also can grow up to the new end() and the condition
 787     // 'top_val > end_val' is true. To ensure the loading order
 788     // OrderAccess::loadload() is required after top() read.
 789     HeapWord* obj = top();
 790     OrderAccess::loadload();
 791     if (pointer_delta(*soft_end_addr(), obj) >= size) {
 792       HeapWord* new_top = obj + size;
 793       HeapWord* result = (HeapWord*)Atomic::cmpxchg_ptr(new_top, top_addr(), obj);
 794       // result can be one of two:
 795       //  the old top value: the exchange succeeded
 796       //  otherwise: the new value of the top is returned.
 797       if (result == obj) {
 798         assert(is_aligned(obj) && is_aligned(new_top), "checking alignment");
 799         return obj;
 800       }
 801     } else {
 802       return NULL;
 803     }
 804   } while (true);
 805 }
 806 
 807 
 808 HeapWord* OffsetTableContigSpace::initialize_threshold() {
 809   return _offsets.initialize_threshold();
 810 }
 811 
 812 HeapWord* OffsetTableContigSpace::cross_threshold(HeapWord* start, HeapWord* end) {
 813   _offsets.alloc_block(start, end);
 814   return _offsets.threshold();
 815 }
 816 
 817 OffsetTableContigSpace::OffsetTableContigSpace(BlockOffsetSharedArray* sharedOffsetArray,
 818                                                MemRegion mr) :
 819   _offsets(sharedOffsetArray, mr),
 820   _par_alloc_lock(Mutex::leaf, "OffsetTableContigSpace par alloc lock", true)
 821 {
 822   _offsets.set_contig_space(this);
 823   initialize(mr, SpaceDecorator::Clear, SpaceDecorator::Mangle);
 824 }
 825 
 826 #define OBJ_SAMPLE_INTERVAL 0
 827 #define BLOCK_SAMPLE_INTERVAL 100
 828 
 829 void OffsetTableContigSpace::verify() const {
 830   HeapWord* p = bottom();
 831   HeapWord* prev_p = NULL;
 832   int objs = 0;
 833   int blocks = 0;
 834 
 835   if (VerifyObjectStartArray) {
 836     _offsets.verify();
 837   }
 838 
 839   while (p < top()) {
 840     size_t size = oop(p)->size();
 841     // For a sampling of objects in the space, find it using the
 842     // block offset table.
 843     if (blocks == BLOCK_SAMPLE_INTERVAL) {
 844       guarantee(p == block_start_const(p + (size/2)),
 845                 "check offset computation");
 846       blocks = 0;
 847     } else {
 848       blocks++;
 849     }
 850 
 851     if (objs == OBJ_SAMPLE_INTERVAL) {
 852       oop(p)->verify();
 853       objs = 0;
 854     } else {
 855       objs++;
 856     }
 857     prev_p = p;
 858     p += size;
 859   }
 860   guarantee(p == top(), "end of last object must match end of space");
 861 }
 862 
 863 
 864 size_t TenuredSpace::allowed_dead_ratio() const {
 865   return MarkSweepDeadRatio;
 866 }