1 /*
   2  * Copyright (c) 1997, 2015, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "classfile/systemDictionary.hpp"
  27 #include "classfile/vmSymbols.hpp"
  28 #include "gc_implementation/shared/liveRange.hpp"
  29 #include "gc_implementation/shared/spaceDecorator.hpp"
  30 #include "gc_interface/collectedHeap.inline.hpp"
  31 #include "memory/blockOffsetTable.inline.hpp"
  32 #include "memory/defNewGeneration.hpp"
  33 #include "memory/genCollectedHeap.hpp"
  34 #include "memory/genOopClosures.inline.hpp"
  35 #include "memory/space.hpp"
  36 #include "memory/space.inline.hpp"
  37 #include "memory/universe.inline.hpp"
  38 #include "oops/oop.inline.hpp"
  39 #include "runtime/java.hpp"
  40 #include "runtime/atomic.inline.hpp"
  41 #include "runtime/prefetch.inline.hpp"
  42 #include "runtime/orderAccess.inline.hpp"
  43 #include "runtime/safepoint.hpp"
  44 #include "utilities/copy.hpp"
  45 #include "utilities/globalDefinitions.hpp"
  46 #include "utilities/macros.hpp"
  47 
  48 HeapWord* DirtyCardToOopClosure::get_actual_top(HeapWord* top,
  49                                                 HeapWord* top_obj) {
  50   if (top_obj != NULL) {
  51     if (_sp->block_is_obj(top_obj)) {
  52       if (_precision == CardTableModRefBS::ObjHeadPreciseArray) {
  53         if (oop(top_obj)->is_objArray() || oop(top_obj)->is_typeArray()) {
  54           // An arrayOop is starting on the dirty card - since we do exact
  55           // store checks for objArrays we are done.
  56         } else {
  57           // Otherwise, it is possible that the object starting on the dirty
  58           // card spans the entire card, and that the store happened on a
  59           // later card.  Figure out where the object ends.
  60           // Use the block_size() method of the space over which
  61           // the iteration is being done.  That space (e.g. CMS) may have
  62           // specific requirements on object sizes which will
  63           // be reflected in the block_size() method.
  64           top = top_obj + oop(top_obj)->size();
  65         }
  66       }
  67     } else {
  68       top = top_obj;
  69     }
  70   } else {
  71     assert(top == _sp->end(), "only case where top_obj == NULL");
  72   }
  73   return top;
  74 }
  75 
  76 void DirtyCardToOopClosure::walk_mem_region(MemRegion mr,
  77                                             HeapWord* bottom,
  78                                             HeapWord* top) {
  79   // 1. Blocks may or may not be objects.
  80   // 2. Even when a block_is_obj(), it may not entirely
  81   //    occupy the block if the block quantum is larger than
  82   //    the object size.
  83   // We can and should try to optimize by calling the non-MemRegion
  84   // version of oop_iterate() for all but the extremal objects
  85   // (for which we need to call the MemRegion version of
  86   // oop_iterate()) To be done post-beta XXX
  87   for (; bottom < top; bottom += _sp->block_size(bottom)) {
  88     // As in the case of contiguous space above, we'd like to
  89     // just use the value returned by oop_iterate to increment the
  90     // current pointer; unfortunately, that won't work in CMS because
  91     // we'd need an interface change (it seems) to have the space
  92     // "adjust the object size" (for instance pad it up to its
  93     // block alignment or minimum block size restrictions. XXX
  94     if (_sp->block_is_obj(bottom) &&
  95         !_sp->obj_allocated_since_save_marks(oop(bottom))) {
  96       oop(bottom)->oop_iterate(_cl, mr);
  97     }
  98   }
  99 }
 100 
 101 // We get called with "mr" representing the dirty region
 102 // that we want to process. Because of imprecise marking,
 103 // we may need to extend the incoming "mr" to the right,
 104 // and scan more. However, because we may already have
 105 // scanned some of that extended region, we may need to
 106 // trim its right-end back some so we do not scan what
 107 // we (or another worker thread) may already have scanned
 108 // or planning to scan.
 109 void DirtyCardToOopClosure::do_MemRegion(MemRegion mr) {
 110 
 111   // Some collectors need to do special things whenever their dirty
 112   // cards are processed. For instance, CMS must remember mutator updates
 113   // (i.e. dirty cards) so as to re-scan mutated objects.
 114   // Such work can be piggy-backed here on dirty card scanning, so as to make
 115   // it slightly more efficient than doing a complete non-destructive pre-scan
 116   // of the card table.
 117   MemRegionClosure* pCl = _sp->preconsumptionDirtyCardClosure();
 118   if (pCl != NULL) {
 119     pCl->do_MemRegion(mr);
 120   }
 121 
 122   HeapWord* bottom = mr.start();
 123   HeapWord* last = mr.last();
 124   HeapWord* top = mr.end();
 125   HeapWord* bottom_obj;
 126   HeapWord* top_obj;
 127 
 128   assert(_precision == CardTableModRefBS::ObjHeadPreciseArray ||
 129          _precision == CardTableModRefBS::Precise,
 130          "Only ones we deal with for now.");
 131 
 132   assert(_precision != CardTableModRefBS::ObjHeadPreciseArray ||
 133          _cl->idempotent() || _last_bottom == NULL ||
 134          top <= _last_bottom,
 135          "Not decreasing");
 136   NOT_PRODUCT(_last_bottom = mr.start());
 137 
 138   bottom_obj = _sp->block_start(bottom);
 139   top_obj    = _sp->block_start(last);
 140 
 141   assert(bottom_obj <= bottom, "just checking");
 142   assert(top_obj    <= top,    "just checking");
 143 
 144   // Given what we think is the top of the memory region and
 145   // the start of the object at the top, get the actual
 146   // value of the top.
 147   top = get_actual_top(top, top_obj);
 148 
 149   // If the previous call did some part of this region, don't redo.
 150   if (_precision == CardTableModRefBS::ObjHeadPreciseArray &&
 151       _min_done != NULL &&
 152       _min_done < top) {
 153     top = _min_done;
 154   }
 155 
 156   // Top may have been reset, and in fact may be below bottom,
 157   // e.g. the dirty card region is entirely in a now free object
 158   // -- something that could happen with a concurrent sweeper.
 159   bottom = MIN2(bottom, top);
 160   MemRegion extended_mr = MemRegion(bottom, top);
 161   assert(bottom <= top &&
 162          (_precision != CardTableModRefBS::ObjHeadPreciseArray ||
 163           _min_done == NULL ||
 164           top <= _min_done),
 165          "overlap!");
 166 
 167   // Walk the region if it is not empty; otherwise there is nothing to do.
 168   if (!extended_mr.is_empty()) {
 169     walk_mem_region(extended_mr, bottom_obj, top);
 170   }
 171 
 172   // An idempotent closure might be applied in any order, so we don't
 173   // record a _min_done for it.
 174   if (!_cl->idempotent()) {
 175     _min_done = bottom;
 176   } else {
 177     assert(_min_done == _last_explicit_min_done,
 178            "Don't update _min_done for idempotent cl");
 179   }
 180 }
 181 
 182 DirtyCardToOopClosure* Space::new_dcto_cl(ExtendedOopClosure* cl,
 183                                           CardTableModRefBS::PrecisionStyle precision,
 184                                           HeapWord* boundary) {
 185   return new DirtyCardToOopClosure(this, cl, precision, boundary);
 186 }
 187 
 188 HeapWord* ContiguousSpaceDCTOC::get_actual_top(HeapWord* top,
 189                                                HeapWord* top_obj) {
 190   if (top_obj != NULL && top_obj < (_sp->toContiguousSpace())->top()) {
 191     if (_precision == CardTableModRefBS::ObjHeadPreciseArray) {
 192       if (oop(top_obj)->is_objArray() || oop(top_obj)->is_typeArray()) {
 193         // An arrayOop is starting on the dirty card - since we do exact
 194         // store checks for objArrays we are done.
 195       } else {
 196         // Otherwise, it is possible that the object starting on the dirty
 197         // card spans the entire card, and that the store happened on a
 198         // later card.  Figure out where the object ends.
 199         assert(_sp->block_size(top_obj) == (size_t) oop(top_obj)->size(),
 200           "Block size and object size mismatch");
 201         top = top_obj + oop(top_obj)->size();
 202       }
 203     }
 204   } else {
 205     top = (_sp->toContiguousSpace())->top();
 206   }
 207   return top;
 208 }
 209 
 210 void Filtering_DCTOC::walk_mem_region(MemRegion mr,
 211                                       HeapWord* bottom,
 212                                       HeapWord* top) {
 213   // Note that this assumption won't hold if we have a concurrent
 214   // collector in this space, which may have freed up objects after
 215   // they were dirtied and before the stop-the-world GC that is
 216   // examining cards here.
 217   assert(bottom < top, "ought to be at least one obj on a dirty card.");
 218 
 219   if (_boundary != NULL) {
 220     // We have a boundary outside of which we don't want to look
 221     // at objects, so create a filtering closure around the
 222     // oop closure before walking the region.
 223     FilteringClosure filter(_boundary, _cl);
 224     walk_mem_region_with_cl(mr, bottom, top, &filter);
 225   } else {
 226     // No boundary, simply walk the heap with the oop closure.
 227     walk_mem_region_with_cl(mr, bottom, top, _cl);
 228   }
 229 
 230 }
 231 
 232 // We must replicate this so that the static type of "FilteringClosure"
 233 // (see above) is apparent at the oop_iterate calls.
 234 #define ContiguousSpaceDCTOC__walk_mem_region_with_cl_DEFN(ClosureType) \
 235 void ContiguousSpaceDCTOC::walk_mem_region_with_cl(MemRegion mr,        \
 236                                                    HeapWord* bottom,    \
 237                                                    HeapWord* top,       \
 238                                                    ClosureType* cl) {   \
 239   bottom += oop(bottom)->oop_iterate(cl, mr);                           \
 240   if (bottom < top) {                                                   \
 241     HeapWord* next_obj = bottom + oop(bottom)->size();                  \
 242     while (next_obj < top) {                                            \
 243       /* Bottom lies entirely below top, so we can call the */          \
 244       /* non-memRegion version of oop_iterate below. */                 \
 245       oop(bottom)->oop_iterate(cl);                                     \
 246       bottom = next_obj;                                                \
 247       next_obj = bottom + oop(bottom)->size();                          \
 248     }                                                                   \
 249     /* Last object. */                                                  \
 250     oop(bottom)->oop_iterate(cl, mr);                                   \
 251   }                                                                     \
 252 }
 253 
 254 // (There are only two of these, rather than N, because the split is due
 255 // only to the introduction of the FilteringClosure, a local part of the
 256 // impl of this abstraction.)
 257 ContiguousSpaceDCTOC__walk_mem_region_with_cl_DEFN(ExtendedOopClosure)
 258 ContiguousSpaceDCTOC__walk_mem_region_with_cl_DEFN(FilteringClosure)
 259 
 260 DirtyCardToOopClosure*
 261 ContiguousSpace::new_dcto_cl(ExtendedOopClosure* cl,
 262                              CardTableModRefBS::PrecisionStyle precision,
 263                              HeapWord* boundary) {
 264   return new ContiguousSpaceDCTOC(this, cl, precision, boundary);
 265 }
 266 
 267 void Space::initialize(MemRegion mr,
 268                        bool clear_space,
 269                        bool mangle_space) {
 270   HeapWord* bottom = mr.start();
 271   HeapWord* end    = mr.end();
 272   assert(Universe::on_page_boundary(bottom) && Universe::on_page_boundary(end),
 273          "invalid space boundaries");
 274   set_bottom(bottom);
 275   set_end(end);
 276   if (clear_space) clear(mangle_space);
 277 }
 278 
 279 void Space::clear(bool mangle_space) {
 280   if (ZapUnusedHeapArea && mangle_space) {
 281     mangle_unused_area();
 282   }
 283 }
 284 
 285 ContiguousSpace::ContiguousSpace(): CompactibleSpace(), _top(NULL),
 286     _concurrent_iteration_safe_limit(NULL) {
 287   _mangler = new GenSpaceMangler(this);
 288 }
 289 
 290 ContiguousSpace::~ContiguousSpace() {
 291   delete _mangler;
 292 }
 293 
 294 void ContiguousSpace::initialize(MemRegion mr,
 295                                  bool clear_space,
 296                                  bool mangle_space)
 297 {
 298   CompactibleSpace::initialize(mr, clear_space, mangle_space);
 299   set_concurrent_iteration_safe_limit(top());
 300 }
 301 
 302 void ContiguousSpace::clear(bool mangle_space) {
 303   set_top(bottom());
 304   set_saved_mark();
 305   CompactibleSpace::clear(mangle_space);
 306 }
 307 
 308 bool ContiguousSpace::is_free_block(const HeapWord* p) const {
 309   return p >= _top;
 310 }
 311 
 312 void OffsetTableContigSpace::clear(bool mangle_space) {
 313   ContiguousSpace::clear(mangle_space);
 314   _offsets.initialize_threshold();
 315 }
 316 
 317 void OffsetTableContigSpace::set_bottom(HeapWord* new_bottom) {
 318   Space::set_bottom(new_bottom);
 319   _offsets.set_bottom(new_bottom);
 320 }
 321 
 322 void OffsetTableContigSpace::set_end(HeapWord* new_end) {
 323   // Space should not advertise an increase in size
 324   // until after the underlying offset table has been enlarged.
 325   _offsets.resize(pointer_delta(new_end, bottom()));
 326   Space::set_end(new_end);
 327 }
 328 
 329 #ifndef PRODUCT
 330 
 331 void ContiguousSpace::set_top_for_allocations(HeapWord* v) {
 332   mangler()->set_top_for_allocations(v);
 333 }
 334 void ContiguousSpace::set_top_for_allocations() {
 335   mangler()->set_top_for_allocations(top());
 336 }
 337 void ContiguousSpace::check_mangled_unused_area(HeapWord* limit) {
 338   mangler()->check_mangled_unused_area(limit);
 339 }
 340 
 341 void ContiguousSpace::check_mangled_unused_area_complete() {
 342   mangler()->check_mangled_unused_area_complete();
 343 }
 344 
 345 // Mangled only the unused space that has not previously
 346 // been mangled and that has not been allocated since being
 347 // mangled.
 348 void ContiguousSpace::mangle_unused_area() {
 349   mangler()->mangle_unused_area();
 350 }
 351 void ContiguousSpace::mangle_unused_area_complete() {
 352   mangler()->mangle_unused_area_complete();
 353 }
 354 #endif  // NOT_PRODUCT
 355 
 356 void CompactibleSpace::initialize(MemRegion mr,
 357                                   bool clear_space,
 358                                   bool mangle_space) {
 359   Space::initialize(mr, clear_space, mangle_space);
 360   set_compaction_top(bottom());
 361   _next_compaction_space = NULL;
 362 }
 363 
 364 void CompactibleSpace::clear(bool mangle_space) {
 365   Space::clear(mangle_space);
 366   _compaction_top = bottom();
 367 }
 368 
 369 HeapWord* CompactibleSpace::forward(oop q, size_t size,
 370                                     CompactPoint* cp, HeapWord* compact_top) {
 371   // q is alive
 372   // First check if we should switch compaction space
 373   assert(this == cp->space, "'this' should be current compaction space.");
 374   size_t compaction_max_size = pointer_delta(end(), compact_top);
 375   while (size > compaction_max_size) {
 376     // switch to next compaction space
 377     cp->space->set_compaction_top(compact_top);
 378     cp->space = cp->space->next_compaction_space();
 379     if (cp->space == NULL) {
 380       cp->gen = GenCollectedHeap::heap()->young_gen();
 381       assert(cp->gen != NULL, "compaction must succeed");
 382       cp->space = cp->gen->first_compaction_space();
 383       assert(cp->space != NULL, "generation must have a first compaction space");
 384     }
 385     compact_top = cp->space->bottom();
 386     cp->space->set_compaction_top(compact_top);
 387     cp->threshold = cp->space->initialize_threshold();
 388     compaction_max_size = pointer_delta(cp->space->end(), compact_top);
 389   }
 390 
 391   // store the forwarding pointer into the mark word
 392   if ((HeapWord*)q != compact_top) {
 393     q->forward_to(oop(compact_top));
 394     assert(q->is_gc_marked(), "encoding the pointer should preserve the mark");
 395   } else {
 396     // if the object isn't moving we can just set the mark to the default
 397     // mark and handle it specially later on.
 398     q->init_mark();
 399     assert(q->forwardee() == NULL, "should be forwarded to NULL");
 400   }
 401 
 402   compact_top += size;
 403 
 404   // we need to update the offset table so that the beginnings of objects can be
 405   // found during scavenge.  Note that we are updating the offset table based on
 406   // where the object will be once the compaction phase finishes.
 407   if (compact_top > cp->threshold)
 408     cp->threshold =
 409       cp->space->cross_threshold(compact_top - size, compact_top);
 410   return compact_top;
 411 }
 412 
 413 
 414 bool CompactibleSpace::insert_deadspace(size_t& allowed_deadspace_words,
 415                                         HeapWord* q, size_t deadlength) {
 416   if (allowed_deadspace_words >= deadlength) {
 417     allowed_deadspace_words -= deadlength;
 418     CollectedHeap::fill_with_object(q, deadlength);
 419     oop(q)->set_mark(oop(q)->mark()->set_marked());
 420     assert((int) deadlength == oop(q)->size(), "bad filler object size");
 421     // Recall that we required "q == compaction_top".
 422     return true;
 423   } else {
 424     allowed_deadspace_words = 0;
 425     return false;
 426   }
 427 }
 428 
 429 void ContiguousSpace::prepare_for_compaction(CompactPoint* cp) {
 430   scan_and_forward(this, cp);
 431 }
 432 
 433 void CompactibleSpace::adjust_pointers() {
 434   // Check first is there is any work to do.
 435   if (used() == 0) {
 436     return;   // Nothing to do.
 437   }
 438 
 439   scan_and_adjust_pointers(this);
 440 }
 441 
 442 void CompactibleSpace::compact() {
 443   scan_and_compact(this);
 444 }
 445 
 446 void Space::print_short() const { print_short_on(tty); }
 447 
 448 void Space::print_short_on(outputStream* st) const {
 449   st->print(" space " SIZE_FORMAT "K, %3d%% used", capacity() / K,
 450               (int) ((double) used() * 100 / capacity()));
 451 }
 452 
 453 void Space::print() const { print_on(tty); }
 454 
 455 void Space::print_on(outputStream* st) const {
 456   print_short_on(st);
 457   st->print_cr(" [" INTPTR_FORMAT ", " INTPTR_FORMAT ")",
 458                 p2i(bottom()), p2i(end()));
 459 }
 460 
 461 void ContiguousSpace::print_on(outputStream* st) const {
 462   print_short_on(st);
 463   st->print_cr(" [" INTPTR_FORMAT ", " INTPTR_FORMAT ", " INTPTR_FORMAT ")",
 464                 p2i(bottom()), p2i(top()), p2i(end()));
 465 }
 466 
 467 void OffsetTableContigSpace::print_on(outputStream* st) const {
 468   print_short_on(st);
 469   st->print_cr(" [" INTPTR_FORMAT ", " INTPTR_FORMAT ", "
 470                 INTPTR_FORMAT ", " INTPTR_FORMAT ")",
 471               p2i(bottom()), p2i(top()), p2i(_offsets.threshold()), p2i(end()));
 472 }
 473 
 474 void ContiguousSpace::verify() const {
 475   HeapWord* p = bottom();
 476   HeapWord* t = top();
 477   HeapWord* prev_p = NULL;
 478   while (p < t) {
 479     oop(p)->verify();
 480     prev_p = p;
 481     p += oop(p)->size();
 482   }
 483   guarantee(p == top(), "end of last object must match end of space");
 484   if (top() != end()) {
 485     guarantee(top() == block_start_const(end()-1) &&
 486               top() == block_start_const(top()),
 487               "top should be start of unallocated block, if it exists");
 488   }
 489 }
 490 
 491 void Space::oop_iterate(ExtendedOopClosure* blk) {
 492   ObjectToOopClosure blk2(blk);
 493   object_iterate(&blk2);
 494 }
 495 
 496 bool Space::obj_is_alive(const HeapWord* p) const {
 497   assert (block_is_obj(p), "The address should point to an object");
 498   return true;
 499 }
 500 
 501 #if INCLUDE_ALL_GCS
 502 #define ContigSpace_PAR_OOP_ITERATE_DEFN(OopClosureType, nv_suffix)         \
 503                                                                             \
 504   void ContiguousSpace::par_oop_iterate(MemRegion mr, OopClosureType* blk) {\
 505     HeapWord* obj_addr = mr.start();                                        \
 506     HeapWord* t = mr.end();                                                 \
 507     while (obj_addr < t) {                                                  \
 508       assert(oop(obj_addr)->is_oop(), "Should be an oop");                  \
 509       obj_addr += oop(obj_addr)->oop_iterate(blk);                          \
 510     }                                                                       \
 511   }
 512 
 513   ALL_PAR_OOP_ITERATE_CLOSURES(ContigSpace_PAR_OOP_ITERATE_DEFN)
 514 
 515 #undef ContigSpace_PAR_OOP_ITERATE_DEFN
 516 #endif // INCLUDE_ALL_GCS
 517 
 518 void ContiguousSpace::oop_iterate(ExtendedOopClosure* blk) {
 519   if (is_empty()) return;
 520   HeapWord* obj_addr = bottom();
 521   HeapWord* t = top();
 522   // Could call objects iterate, but this is easier.
 523   while (obj_addr < t) {
 524     obj_addr += oop(obj_addr)->oop_iterate(blk);
 525   }
 526 }
 527 
 528 void ContiguousSpace::object_iterate(ObjectClosure* blk) {
 529   if (is_empty()) return;
 530   WaterMark bm = bottom_mark();
 531   object_iterate_from(bm, blk);
 532 }
 533 
 534 // For a ContiguousSpace object_iterate() and safe_object_iterate()
 535 // are the same.
 536 void ContiguousSpace::safe_object_iterate(ObjectClosure* blk) {
 537   object_iterate(blk);
 538 }
 539 
 540 void ContiguousSpace::object_iterate_from(WaterMark mark, ObjectClosure* blk) {
 541   assert(mark.space() == this, "Mark does not match space");
 542   HeapWord* p = mark.point();
 543   while (p < top()) {
 544     blk->do_object(oop(p));
 545     p += oop(p)->size();
 546   }
 547 }
 548 
 549 HeapWord*
 550 ContiguousSpace::object_iterate_careful(ObjectClosureCareful* blk) {
 551   HeapWord * limit = concurrent_iteration_safe_limit();
 552   assert(limit <= top(), "sanity check");
 553   for (HeapWord* p = bottom(); p < limit;) {
 554     size_t size = blk->do_object_careful(oop(p));
 555     if (size == 0) {
 556       return p;  // failed at p
 557     } else {
 558       p += size;
 559     }
 560   }
 561   return NULL; // all done
 562 }
 563 
 564 #define ContigSpace_OOP_SINCE_SAVE_MARKS_DEFN(OopClosureType, nv_suffix)  \
 565                                                                           \
 566 void ContiguousSpace::                                                    \
 567 oop_since_save_marks_iterate##nv_suffix(OopClosureType* blk) {            \
 568   HeapWord* t;                                                            \
 569   HeapWord* p = saved_mark_word();                                        \
 570   assert(p != NULL, "expected saved mark");                               \
 571                                                                           \
 572   const intx interval = PrefetchScanIntervalInBytes;                      \
 573   do {                                                                    \
 574     t = top();                                                            \
 575     while (p < t) {                                                       \
 576       Prefetch::write(p, interval);                                       \
 577       debug_only(HeapWord* prev = p);                                     \
 578       oop m = oop(p);                                                     \
 579       p += m->oop_iterate(blk);                                           \
 580     }                                                                     \
 581   } while (t < top());                                                    \
 582                                                                           \
 583   set_saved_mark_word(p);                                                 \
 584 }
 585 
 586 ALL_SINCE_SAVE_MARKS_CLOSURES(ContigSpace_OOP_SINCE_SAVE_MARKS_DEFN)
 587 
 588 #undef ContigSpace_OOP_SINCE_SAVE_MARKS_DEFN
 589 
 590 // Very general, slow implementation.
 591 HeapWord* ContiguousSpace::block_start_const(const void* p) const {
 592   assert(MemRegion(bottom(), end()).contains(p),
 593          err_msg("p (" PTR_FORMAT ") not in space [" PTR_FORMAT ", " PTR_FORMAT ")",
 594                   p2i(p), p2i(bottom()), p2i(end())));
 595   if (p >= top()) {
 596     return top();
 597   } else {
 598     HeapWord* last = bottom();
 599     HeapWord* cur = last;
 600     while (cur <= p) {
 601       last = cur;
 602       cur += oop(cur)->size();
 603     }
 604     assert(oop(last)->is_oop(),
 605            err_msg(PTR_FORMAT " should be an object start", p2i(last)));
 606     return last;
 607   }
 608 }
 609 
 610 size_t ContiguousSpace::block_size(const HeapWord* p) const {
 611   assert(MemRegion(bottom(), end()).contains(p),
 612          err_msg("p (" PTR_FORMAT ") not in space [" PTR_FORMAT ", " PTR_FORMAT ")",
 613                   p2i(p), p2i(bottom()), p2i(end())));
 614   HeapWord* current_top = top();
 615   assert(p <= current_top,
 616          err_msg("p > current top - p: " PTR_FORMAT ", current top: " PTR_FORMAT,
 617                   p2i(p), p2i(current_top)));
 618   assert(p == current_top || oop(p)->is_oop(),
 619          err_msg("p (" PTR_FORMAT ") is not a block start - "
 620                  "current_top: " PTR_FORMAT ", is_oop: %s",
 621                  p2i(p), p2i(current_top), BOOL_TO_STR(oop(p)->is_oop())));
 622   if (p < current_top) {
 623     return oop(p)->size();
 624   } else {
 625     assert(p == current_top, "just checking");
 626     return pointer_delta(end(), (HeapWord*) p);
 627   }
 628 }
 629 
 630 // This version requires locking.
 631 inline HeapWord* ContiguousSpace::allocate_impl(size_t size) {
 632   assert(Heap_lock->owned_by_self() ||
 633          (SafepointSynchronize::is_at_safepoint() && Thread::current()->is_VM_thread()),
 634          "not locked");
 635   HeapWord* obj = top();
 636   if (pointer_delta(end(), obj) >= size) {
 637     HeapWord* new_top = obj + size;
 638     set_top(new_top);
 639     assert(is_aligned(obj) && is_aligned(new_top), "checking alignment");
 640     return obj;
 641   } else {
 642     return NULL;
 643   }
 644 }
 645 
 646 // This version is lock-free.
 647 inline HeapWord* ContiguousSpace::par_allocate_impl(size_t size) {
 648   do {
 649     HeapWord* obj = top();
 650     if (pointer_delta(end(), obj) >= size) {
 651       HeapWord* new_top = obj + size;
 652       HeapWord* result = (HeapWord*)Atomic::cmpxchg_ptr(new_top, top_addr(), obj);
 653       // result can be one of two:
 654       //  the old top value: the exchange succeeded
 655       //  otherwise: the new value of the top is returned.
 656       if (result == obj) {
 657         assert(is_aligned(obj) && is_aligned(new_top), "checking alignment");
 658         return obj;
 659       }
 660     } else {
 661       return NULL;
 662     }
 663   } while (true);
 664 }
 665 
 666 HeapWord* ContiguousSpace::allocate_aligned(size_t size) {
 667   assert(Heap_lock->owned_by_self() || (SafepointSynchronize::is_at_safepoint() && Thread::current()->is_VM_thread()), "not locked");
 668   HeapWord* end_value = end();
 669 
 670   HeapWord* obj = CollectedHeap::align_allocation_or_fail(top(), end_value, SurvivorAlignmentInBytes);
 671   if (obj == NULL) {
 672     return NULL;
 673   }
 674 
 675   if (pointer_delta(end_value, obj) >= size) {
 676     HeapWord* new_top = obj + size;
 677     set_top(new_top);
 678     assert(is_ptr_aligned(obj, SurvivorAlignmentInBytes) && is_aligned(new_top),
 679       "checking alignment");
 680     return obj;
 681   } else {
 682     set_top(obj);
 683     return NULL;
 684   }
 685 }
 686 
 687 // Requires locking.
 688 HeapWord* ContiguousSpace::allocate(size_t size) {
 689   return allocate_impl(size);
 690 }
 691 
 692 // Lock-free.
 693 HeapWord* ContiguousSpace::par_allocate(size_t size) {
 694   return par_allocate_impl(size);
 695 }
 696 
 697 void ContiguousSpace::allocate_temporary_filler(int factor) {
 698   // allocate temporary type array decreasing free size with factor 'factor'
 699   assert(factor >= 0, "just checking");
 700   size_t size = pointer_delta(end(), top());
 701 
 702   // if space is full, return
 703   if (size == 0) return;
 704 
 705   if (factor > 0) {
 706     size -= size/factor;
 707   }
 708   size = align_object_size(size);
 709 
 710   const size_t array_header_size = typeArrayOopDesc::header_size(T_INT);
 711   if (size >= (size_t)align_object_size(array_header_size)) {
 712     size_t length = (size - array_header_size) * (HeapWordSize / sizeof(jint));
 713     // allocate uninitialized int array
 714     typeArrayOop t = (typeArrayOop) allocate(size);
 715     assert(t != NULL, "allocation should succeed");
 716     t->set_mark(markOopDesc::prototype());
 717     t->set_klass(Universe::intArrayKlassObj());
 718     t->set_length((int)length);
 719   } else {
 720     assert(size == CollectedHeap::min_fill_size(),
 721            "size for smallest fake object doesn't match");
 722     instanceOop obj = (instanceOop) allocate(size);
 723     obj->set_mark(markOopDesc::prototype());
 724     obj->set_klass_gap(0);
 725     obj->set_klass(SystemDictionary::Object_klass());
 726   }
 727 }
 728 
 729 HeapWord* OffsetTableContigSpace::initialize_threshold() {
 730   return _offsets.initialize_threshold();
 731 }
 732 
 733 HeapWord* OffsetTableContigSpace::cross_threshold(HeapWord* start, HeapWord* end) {
 734   _offsets.alloc_block(start, end);
 735   return _offsets.threshold();
 736 }
 737 
 738 OffsetTableContigSpace::OffsetTableContigSpace(BlockOffsetSharedArray* sharedOffsetArray,
 739                                                MemRegion mr) :
 740   _offsets(sharedOffsetArray, mr),
 741   _par_alloc_lock(Mutex::leaf, "OffsetTableContigSpace par alloc lock", true)
 742 {
 743   _offsets.set_contig_space(this);
 744   initialize(mr, SpaceDecorator::Clear, SpaceDecorator::Mangle);
 745 }
 746 
 747 #define OBJ_SAMPLE_INTERVAL 0
 748 #define BLOCK_SAMPLE_INTERVAL 100
 749 
 750 void OffsetTableContigSpace::verify() const {
 751   HeapWord* p = bottom();
 752   HeapWord* prev_p = NULL;
 753   int objs = 0;
 754   int blocks = 0;
 755 
 756   if (VerifyObjectStartArray) {
 757     _offsets.verify();
 758   }
 759 
 760   while (p < top()) {
 761     size_t size = oop(p)->size();
 762     // For a sampling of objects in the space, find it using the
 763     // block offset table.
 764     if (blocks == BLOCK_SAMPLE_INTERVAL) {
 765       guarantee(p == block_start_const(p + (size/2)),
 766                 "check offset computation");
 767       blocks = 0;
 768     } else {
 769       blocks++;
 770     }
 771 
 772     if (objs == OBJ_SAMPLE_INTERVAL) {
 773       oop(p)->verify();
 774       objs = 0;
 775     } else {
 776       objs++;
 777     }
 778     prev_p = p;
 779     p += size;
 780   }
 781   guarantee(p == top(), "end of last object must match end of space");
 782 }
 783 
 784 
 785 size_t TenuredSpace::allowed_dead_ratio() const {
 786   return MarkSweepDeadRatio;
 787 }