1 /*
   2  * Copyright (c) 2005, 2017, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "classfile/stringTable.hpp"
  27 #include "classfile/symbolTable.hpp"
  28 #include "classfile/systemDictionary.hpp"
  29 #include "code/codeCache.hpp"
  30 #include "gc/parallel/gcTaskManager.hpp"
  31 #include "gc/parallel/parallelScavengeHeap.inline.hpp"
  32 #include "gc/parallel/pcTasks.hpp"
  33 #include "gc/parallel/psAdaptiveSizePolicy.hpp"
  34 #include "gc/parallel/psCompactionManager.inline.hpp"
  35 #include "gc/parallel/psMarkSweep.hpp"
  36 #include "gc/parallel/psMarkSweepDecorator.hpp"
  37 #include "gc/parallel/psOldGen.hpp"
  38 #include "gc/parallel/psParallelCompact.inline.hpp"
  39 #include "gc/parallel/psPromotionManager.inline.hpp"
  40 #include "gc/parallel/psScavenge.hpp"
  41 #include "gc/parallel/psYoungGen.hpp"
  42 #include "gc/shared/gcCause.hpp"
  43 #include "gc/shared/gcHeapSummary.hpp"
  44 #include "gc/shared/gcId.hpp"
  45 #include "gc/shared/gcLocker.inline.hpp"
  46 #include "gc/shared/gcTimer.hpp"
  47 #include "gc/shared/gcTrace.hpp"
  48 #include "gc/shared/gcTraceTime.inline.hpp"
  49 #include "gc/shared/isGCActiveMark.hpp"
  50 #include "gc/shared/referencePolicy.hpp"
  51 #include "gc/shared/referenceProcessor.hpp"
  52 #include "gc/shared/spaceDecorator.hpp"
  53 #include "logging/log.hpp"
  54 #include "oops/instanceKlass.inline.hpp"
  55 #include "oops/instanceMirrorKlass.inline.hpp"
  56 #include "oops/methodData.hpp"
  57 #include "oops/objArrayKlass.inline.hpp"
  58 #include "oops/oop.inline.hpp"
  59 #include "oops/valueArrayKlass.inline.hpp"
  60 #include "runtime/atomic.inline.hpp"
  61 #include "runtime/fprofiler.hpp"
  62 #include "runtime/safepoint.hpp"
  63 #include "runtime/vmThread.hpp"
  64 #include "services/management.hpp"
  65 #include "services/memTracker.hpp"
  66 #include "services/memoryService.hpp"
  67 #include "utilities/events.hpp"
  68 #include "utilities/stack.inline.hpp"
  69 
  70 #include <math.h>
  71 
  72 // All sizes are in HeapWords.
  73 const size_t ParallelCompactData::Log2RegionSize  = 16; // 64K words
  74 const size_t ParallelCompactData::RegionSize      = (size_t)1 << Log2RegionSize;
  75 const size_t ParallelCompactData::RegionSizeBytes =
  76   RegionSize << LogHeapWordSize;
  77 const size_t ParallelCompactData::RegionSizeOffsetMask = RegionSize - 1;
  78 const size_t ParallelCompactData::RegionAddrOffsetMask = RegionSizeBytes - 1;
  79 const size_t ParallelCompactData::RegionAddrMask       = ~RegionAddrOffsetMask;
  80 
  81 const size_t ParallelCompactData::Log2BlockSize   = 7; // 128 words
  82 const size_t ParallelCompactData::BlockSize       = (size_t)1 << Log2BlockSize;
  83 const size_t ParallelCompactData::BlockSizeBytes  =
  84   BlockSize << LogHeapWordSize;
  85 const size_t ParallelCompactData::BlockSizeOffsetMask = BlockSize - 1;
  86 const size_t ParallelCompactData::BlockAddrOffsetMask = BlockSizeBytes - 1;
  87 const size_t ParallelCompactData::BlockAddrMask       = ~BlockAddrOffsetMask;
  88 
  89 const size_t ParallelCompactData::BlocksPerRegion = RegionSize / BlockSize;
  90 const size_t ParallelCompactData::Log2BlocksPerRegion =
  91   Log2RegionSize - Log2BlockSize;
  92 
  93 const ParallelCompactData::RegionData::region_sz_t
  94 ParallelCompactData::RegionData::dc_shift = 27;
  95 
  96 const ParallelCompactData::RegionData::region_sz_t
  97 ParallelCompactData::RegionData::dc_mask = ~0U << dc_shift;
  98 
  99 const ParallelCompactData::RegionData::region_sz_t
 100 ParallelCompactData::RegionData::dc_one = 0x1U << dc_shift;
 101 
 102 const ParallelCompactData::RegionData::region_sz_t
 103 ParallelCompactData::RegionData::los_mask = ~dc_mask;
 104 
 105 const ParallelCompactData::RegionData::region_sz_t
 106 ParallelCompactData::RegionData::dc_claimed = 0x8U << dc_shift;
 107 
 108 const ParallelCompactData::RegionData::region_sz_t
 109 ParallelCompactData::RegionData::dc_completed = 0xcU << dc_shift;
 110 
 111 SpaceInfo PSParallelCompact::_space_info[PSParallelCompact::last_space_id];
 112 
 113 ReferenceProcessor* PSParallelCompact::_ref_processor = NULL;
 114 
 115 double PSParallelCompact::_dwl_mean;
 116 double PSParallelCompact::_dwl_std_dev;
 117 double PSParallelCompact::_dwl_first_term;
 118 double PSParallelCompact::_dwl_adjustment;
 119 #ifdef  ASSERT
 120 bool   PSParallelCompact::_dwl_initialized = false;
 121 #endif  // #ifdef ASSERT
 122 
 123 void SplitInfo::record(size_t src_region_idx, size_t partial_obj_size,
 124                        HeapWord* destination)
 125 {
 126   assert(src_region_idx != 0, "invalid src_region_idx");
 127   assert(partial_obj_size != 0, "invalid partial_obj_size argument");
 128   assert(destination != NULL, "invalid destination argument");
 129 
 130   _src_region_idx = src_region_idx;
 131   _partial_obj_size = partial_obj_size;
 132   _destination = destination;
 133 
 134   // These fields may not be updated below, so make sure they're clear.
 135   assert(_dest_region_addr == NULL, "should have been cleared");
 136   assert(_first_src_addr == NULL, "should have been cleared");
 137 
 138   // Determine the number of destination regions for the partial object.
 139   HeapWord* const last_word = destination + partial_obj_size - 1;
 140   const ParallelCompactData& sd = PSParallelCompact::summary_data();
 141   HeapWord* const beg_region_addr = sd.region_align_down(destination);
 142   HeapWord* const end_region_addr = sd.region_align_down(last_word);
 143 
 144   if (beg_region_addr == end_region_addr) {
 145     // One destination region.
 146     _destination_count = 1;
 147     if (end_region_addr == destination) {
 148       // The destination falls on a region boundary, thus the first word of the
 149       // partial object will be the first word copied to the destination region.
 150       _dest_region_addr = end_region_addr;
 151       _first_src_addr = sd.region_to_addr(src_region_idx);
 152     }
 153   } else {
 154     // Two destination regions.  When copied, the partial object will cross a
 155     // destination region boundary, so a word somewhere within the partial
 156     // object will be the first word copied to the second destination region.
 157     _destination_count = 2;
 158     _dest_region_addr = end_region_addr;
 159     const size_t ofs = pointer_delta(end_region_addr, destination);
 160     assert(ofs < _partial_obj_size, "sanity");
 161     _first_src_addr = sd.region_to_addr(src_region_idx) + ofs;
 162   }
 163 }
 164 
 165 void SplitInfo::clear()
 166 {
 167   _src_region_idx = 0;
 168   _partial_obj_size = 0;
 169   _destination = NULL;
 170   _destination_count = 0;
 171   _dest_region_addr = NULL;
 172   _first_src_addr = NULL;
 173   assert(!is_valid(), "sanity");
 174 }
 175 
 176 #ifdef  ASSERT
 177 void SplitInfo::verify_clear()
 178 {
 179   assert(_src_region_idx == 0, "not clear");
 180   assert(_partial_obj_size == 0, "not clear");
 181   assert(_destination == NULL, "not clear");
 182   assert(_destination_count == 0, "not clear");
 183   assert(_dest_region_addr == NULL, "not clear");
 184   assert(_first_src_addr == NULL, "not clear");
 185 }
 186 #endif  // #ifdef ASSERT
 187 
 188 
 189 void PSParallelCompact::print_on_error(outputStream* st) {
 190   _mark_bitmap.print_on_error(st);
 191 }
 192 
 193 #ifndef PRODUCT
 194 const char* PSParallelCompact::space_names[] = {
 195   "old ", "eden", "from", "to  "
 196 };
 197 
 198 void PSParallelCompact::print_region_ranges() {
 199   if (!log_develop_is_enabled(Trace, gc, compaction, phases)) {
 200     return;
 201   }
 202   LogHandle(gc, compaction, phases) log;
 203   ResourceMark rm;
 204   Universe::print_on(log.trace_stream());
 205   log.trace("space  bottom     top        end        new_top");
 206   log.trace("------ ---------- ---------- ---------- ----------");
 207 
 208   for (unsigned int id = 0; id < last_space_id; ++id) {
 209     const MutableSpace* space = _space_info[id].space();
 210     log.trace("%u %s "
 211               SIZE_FORMAT_W(10) " " SIZE_FORMAT_W(10) " "
 212               SIZE_FORMAT_W(10) " " SIZE_FORMAT_W(10) " ",
 213               id, space_names[id],
 214               summary_data().addr_to_region_idx(space->bottom()),
 215               summary_data().addr_to_region_idx(space->top()),
 216               summary_data().addr_to_region_idx(space->end()),
 217               summary_data().addr_to_region_idx(_space_info[id].new_top()));
 218   }
 219 }
 220 
 221 void
 222 print_generic_summary_region(size_t i, const ParallelCompactData::RegionData* c)
 223 {
 224 #define REGION_IDX_FORMAT        SIZE_FORMAT_W(7)
 225 #define REGION_DATA_FORMAT       SIZE_FORMAT_W(5)
 226 
 227   ParallelCompactData& sd = PSParallelCompact::summary_data();
 228   size_t dci = c->destination() ? sd.addr_to_region_idx(c->destination()) : 0;
 229   log_develop_trace(gc, compaction, phases)(
 230       REGION_IDX_FORMAT " " PTR_FORMAT " "
 231       REGION_IDX_FORMAT " " PTR_FORMAT " "
 232       REGION_DATA_FORMAT " " REGION_DATA_FORMAT " "
 233       REGION_DATA_FORMAT " " REGION_IDX_FORMAT " %d",
 234       i, p2i(c->data_location()), dci, p2i(c->destination()),
 235       c->partial_obj_size(), c->live_obj_size(),
 236       c->data_size(), c->source_region(), c->destination_count());
 237 
 238 #undef  REGION_IDX_FORMAT
 239 #undef  REGION_DATA_FORMAT
 240 }
 241 
 242 void
 243 print_generic_summary_data(ParallelCompactData& summary_data,
 244                            HeapWord* const beg_addr,
 245                            HeapWord* const end_addr)
 246 {
 247   size_t total_words = 0;
 248   size_t i = summary_data.addr_to_region_idx(beg_addr);
 249   const size_t last = summary_data.addr_to_region_idx(end_addr);
 250   HeapWord* pdest = 0;
 251 
 252   while (i <= last) {
 253     ParallelCompactData::RegionData* c = summary_data.region(i);
 254     if (c->data_size() != 0 || c->destination() != pdest) {
 255       print_generic_summary_region(i, c);
 256       total_words += c->data_size();
 257       pdest = c->destination();
 258     }
 259     ++i;
 260   }
 261 
 262   log_develop_trace(gc, compaction, phases)("summary_data_bytes=" SIZE_FORMAT, total_words * HeapWordSize);
 263 }
 264 
 265 void
 266 print_generic_summary_data(ParallelCompactData& summary_data,
 267                            SpaceInfo* space_info)
 268 {
 269   if (!log_develop_is_enabled(Trace, gc, compaction, phases)) {
 270     return;
 271   }
 272 
 273   for (unsigned int id = 0; id < PSParallelCompact::last_space_id; ++id) {
 274     const MutableSpace* space = space_info[id].space();
 275     print_generic_summary_data(summary_data, space->bottom(),
 276                                MAX2(space->top(), space_info[id].new_top()));
 277   }
 278 }
 279 
 280 void
 281 print_initial_summary_data(ParallelCompactData& summary_data,
 282                            const MutableSpace* space) {
 283   if (space->top() == space->bottom()) {
 284     return;
 285   }
 286 
 287   const size_t region_size = ParallelCompactData::RegionSize;
 288   typedef ParallelCompactData::RegionData RegionData;
 289   HeapWord* const top_aligned_up = summary_data.region_align_up(space->top());
 290   const size_t end_region = summary_data.addr_to_region_idx(top_aligned_up);
 291   const RegionData* c = summary_data.region(end_region - 1);
 292   HeapWord* end_addr = c->destination() + c->data_size();
 293   const size_t live_in_space = pointer_delta(end_addr, space->bottom());
 294 
 295   // Print (and count) the full regions at the beginning of the space.
 296   size_t full_region_count = 0;
 297   size_t i = summary_data.addr_to_region_idx(space->bottom());
 298   while (i < end_region && summary_data.region(i)->data_size() == region_size) {
 299     ParallelCompactData::RegionData* c = summary_data.region(i);
 300     log_develop_trace(gc, compaction, phases)(
 301         SIZE_FORMAT_W(5) " " PTR_FORMAT " " SIZE_FORMAT_W(5) " " SIZE_FORMAT_W(5) " " SIZE_FORMAT_W(5) " " SIZE_FORMAT_W(5) " %d",
 302         i, p2i(c->destination()),
 303         c->partial_obj_size(), c->live_obj_size(),
 304         c->data_size(), c->source_region(), c->destination_count());
 305     ++full_region_count;
 306     ++i;
 307   }
 308 
 309   size_t live_to_right = live_in_space - full_region_count * region_size;
 310 
 311   double max_reclaimed_ratio = 0.0;
 312   size_t max_reclaimed_ratio_region = 0;
 313   size_t max_dead_to_right = 0;
 314   size_t max_live_to_right = 0;
 315 
 316   // Print the 'reclaimed ratio' for regions while there is something live in
 317   // the region or to the right of it.  The remaining regions are empty (and
 318   // uninteresting), and computing the ratio will result in division by 0.
 319   while (i < end_region && live_to_right > 0) {
 320     c = summary_data.region(i);
 321     HeapWord* const region_addr = summary_data.region_to_addr(i);
 322     const size_t used_to_right = pointer_delta(space->top(), region_addr);
 323     const size_t dead_to_right = used_to_right - live_to_right;
 324     const double reclaimed_ratio = double(dead_to_right) / live_to_right;
 325 
 326     if (reclaimed_ratio > max_reclaimed_ratio) {
 327             max_reclaimed_ratio = reclaimed_ratio;
 328             max_reclaimed_ratio_region = i;
 329             max_dead_to_right = dead_to_right;
 330             max_live_to_right = live_to_right;
 331     }
 332 
 333     ParallelCompactData::RegionData* c = summary_data.region(i);
 334     log_develop_trace(gc, compaction, phases)(
 335         SIZE_FORMAT_W(5) " " PTR_FORMAT " " SIZE_FORMAT_W(5) " " SIZE_FORMAT_W(5) " " SIZE_FORMAT_W(5) " " SIZE_FORMAT_W(5) " %d"
 336         "%12.10f " SIZE_FORMAT_W(10) " " SIZE_FORMAT_W(10),
 337         i, p2i(c->destination()),
 338         c->partial_obj_size(), c->live_obj_size(),
 339         c->data_size(), c->source_region(), c->destination_count(),
 340         reclaimed_ratio, dead_to_right, live_to_right);
 341 
 342 
 343     live_to_right -= c->data_size();
 344     ++i;
 345   }
 346 
 347   // Any remaining regions are empty.  Print one more if there is one.
 348   if (i < end_region) {
 349     ParallelCompactData::RegionData* c = summary_data.region(i);
 350     log_develop_trace(gc, compaction, phases)(
 351         SIZE_FORMAT_W(5) " " PTR_FORMAT " " SIZE_FORMAT_W(5) " " SIZE_FORMAT_W(5) " " SIZE_FORMAT_W(5) " " SIZE_FORMAT_W(5) " %d",
 352          i, p2i(c->destination()),
 353          c->partial_obj_size(), c->live_obj_size(),
 354          c->data_size(), c->source_region(), c->destination_count());
 355   }
 356 
 357   log_develop_trace(gc, compaction, phases)("max:  " SIZE_FORMAT_W(4) " d2r=" SIZE_FORMAT_W(10) " l2r=" SIZE_FORMAT_W(10) " max_ratio=%14.12f",
 358                                             max_reclaimed_ratio_region, max_dead_to_right, max_live_to_right, max_reclaimed_ratio);
 359 }
 360 
 361 void
 362 print_initial_summary_data(ParallelCompactData& summary_data,
 363                            SpaceInfo* space_info) {
 364   if (!log_develop_is_enabled(Trace, gc, compaction, phases)) {
 365     return;
 366   }
 367 
 368   unsigned int id = PSParallelCompact::old_space_id;
 369   const MutableSpace* space;
 370   do {
 371     space = space_info[id].space();
 372     print_initial_summary_data(summary_data, space);
 373   } while (++id < PSParallelCompact::eden_space_id);
 374 
 375   do {
 376     space = space_info[id].space();
 377     print_generic_summary_data(summary_data, space->bottom(), space->top());
 378   } while (++id < PSParallelCompact::last_space_id);
 379 }
 380 #endif  // #ifndef PRODUCT
 381 
 382 #ifdef  ASSERT
 383 size_t add_obj_count;
 384 size_t add_obj_size;
 385 size_t mark_bitmap_count;
 386 size_t mark_bitmap_size;
 387 #endif  // #ifdef ASSERT
 388 
 389 ParallelCompactData::ParallelCompactData()
 390 {
 391   _region_start = 0;
 392 
 393   _region_vspace = 0;
 394   _reserved_byte_size = 0;
 395   _region_data = 0;
 396   _region_count = 0;
 397 
 398   _block_vspace = 0;
 399   _block_data = 0;
 400   _block_count = 0;
 401 }
 402 
 403 bool ParallelCompactData::initialize(MemRegion covered_region)
 404 {
 405   _region_start = covered_region.start();
 406   const size_t region_size = covered_region.word_size();
 407   DEBUG_ONLY(_region_end = _region_start + region_size;)
 408 
 409   assert(region_align_down(_region_start) == _region_start,
 410          "region start not aligned");
 411   assert((region_size & RegionSizeOffsetMask) == 0,
 412          "region size not a multiple of RegionSize");
 413 
 414   bool result = initialize_region_data(region_size) && initialize_block_data();
 415   return result;
 416 }
 417 
 418 PSVirtualSpace*
 419 ParallelCompactData::create_vspace(size_t count, size_t element_size)
 420 {
 421   const size_t raw_bytes = count * element_size;
 422   const size_t page_sz = os::page_size_for_region_aligned(raw_bytes, 10);
 423   const size_t granularity = os::vm_allocation_granularity();
 424   _reserved_byte_size = align_size_up(raw_bytes, MAX2(page_sz, granularity));
 425 
 426   const size_t rs_align = page_sz == (size_t) os::vm_page_size() ? 0 :
 427     MAX2(page_sz, granularity);
 428   ReservedSpace rs(_reserved_byte_size, rs_align, rs_align > 0);
 429   os::trace_page_sizes("par compact", raw_bytes, raw_bytes, page_sz, rs.base(),
 430                        rs.size());
 431 
 432   MemTracker::record_virtual_memory_type((address)rs.base(), mtGC);
 433 
 434   PSVirtualSpace* vspace = new PSVirtualSpace(rs, page_sz);
 435   if (vspace != 0) {
 436     if (vspace->expand_by(_reserved_byte_size)) {
 437       return vspace;
 438     }
 439     delete vspace;
 440     // Release memory reserved in the space.
 441     rs.release();
 442   }
 443 
 444   return 0;
 445 }
 446 
 447 bool ParallelCompactData::initialize_region_data(size_t region_size)
 448 {
 449   const size_t count = (region_size + RegionSizeOffsetMask) >> Log2RegionSize;
 450   _region_vspace = create_vspace(count, sizeof(RegionData));
 451   if (_region_vspace != 0) {
 452     _region_data = (RegionData*)_region_vspace->reserved_low_addr();
 453     _region_count = count;
 454     return true;
 455   }
 456   return false;
 457 }
 458 
 459 bool ParallelCompactData::initialize_block_data()
 460 {
 461   assert(_region_count != 0, "region data must be initialized first");
 462   const size_t count = _region_count << Log2BlocksPerRegion;
 463   _block_vspace = create_vspace(count, sizeof(BlockData));
 464   if (_block_vspace != 0) {
 465     _block_data = (BlockData*)_block_vspace->reserved_low_addr();
 466     _block_count = count;
 467     return true;
 468   }
 469   return false;
 470 }
 471 
 472 void ParallelCompactData::clear()
 473 {
 474   memset(_region_data, 0, _region_vspace->committed_size());
 475   memset(_block_data, 0, _block_vspace->committed_size());
 476 }
 477 
 478 void ParallelCompactData::clear_range(size_t beg_region, size_t end_region) {
 479   assert(beg_region <= _region_count, "beg_region out of range");
 480   assert(end_region <= _region_count, "end_region out of range");
 481   assert(RegionSize % BlockSize == 0, "RegionSize not a multiple of BlockSize");
 482 
 483   const size_t region_cnt = end_region - beg_region;
 484   memset(_region_data + beg_region, 0, region_cnt * sizeof(RegionData));
 485 
 486   const size_t beg_block = beg_region * BlocksPerRegion;
 487   const size_t block_cnt = region_cnt * BlocksPerRegion;
 488   memset(_block_data + beg_block, 0, block_cnt * sizeof(BlockData));
 489 }
 490 
 491 HeapWord* ParallelCompactData::partial_obj_end(size_t region_idx) const
 492 {
 493   const RegionData* cur_cp = region(region_idx);
 494   const RegionData* const end_cp = region(region_count() - 1);
 495 
 496   HeapWord* result = region_to_addr(region_idx);
 497   if (cur_cp < end_cp) {
 498     do {
 499       result += cur_cp->partial_obj_size();
 500     } while (cur_cp->partial_obj_size() == RegionSize && ++cur_cp < end_cp);
 501   }
 502   return result;
 503 }
 504 
 505 void ParallelCompactData::add_obj(HeapWord* addr, size_t len)
 506 {
 507   const size_t obj_ofs = pointer_delta(addr, _region_start);
 508   const size_t beg_region = obj_ofs >> Log2RegionSize;
 509   const size_t end_region = (obj_ofs + len - 1) >> Log2RegionSize;
 510 
 511   DEBUG_ONLY(Atomic::inc_ptr(&add_obj_count);)
 512   DEBUG_ONLY(Atomic::add_ptr(len, &add_obj_size);)
 513 
 514   if (beg_region == end_region) {
 515     // All in one region.
 516     _region_data[beg_region].add_live_obj(len);
 517     return;
 518   }
 519 
 520   // First region.
 521   const size_t beg_ofs = region_offset(addr);
 522   _region_data[beg_region].add_live_obj(RegionSize - beg_ofs);
 523 
 524   Klass* klass = ((oop)addr)->klass();
 525   // Middle regions--completely spanned by this object.
 526   for (size_t region = beg_region + 1; region < end_region; ++region) {
 527     _region_data[region].set_partial_obj_size(RegionSize);
 528     _region_data[region].set_partial_obj_addr(addr);
 529   }
 530 
 531   // Last region.
 532   const size_t end_ofs = region_offset(addr + len - 1);
 533   _region_data[end_region].set_partial_obj_size(end_ofs + 1);
 534   _region_data[end_region].set_partial_obj_addr(addr);
 535 }
 536 
 537 void
 538 ParallelCompactData::summarize_dense_prefix(HeapWord* beg, HeapWord* end)
 539 {
 540   assert(region_offset(beg) == 0, "not RegionSize aligned");
 541   assert(region_offset(end) == 0, "not RegionSize aligned");
 542 
 543   size_t cur_region = addr_to_region_idx(beg);
 544   const size_t end_region = addr_to_region_idx(end);
 545   HeapWord* addr = beg;
 546   while (cur_region < end_region) {
 547     _region_data[cur_region].set_destination(addr);
 548     _region_data[cur_region].set_destination_count(0);
 549     _region_data[cur_region].set_source_region(cur_region);
 550     _region_data[cur_region].set_data_location(addr);
 551 
 552     // Update live_obj_size so the region appears completely full.
 553     size_t live_size = RegionSize - _region_data[cur_region].partial_obj_size();
 554     _region_data[cur_region].set_live_obj_size(live_size);
 555 
 556     ++cur_region;
 557     addr += RegionSize;
 558   }
 559 }
 560 
 561 // Find the point at which a space can be split and, if necessary, record the
 562 // split point.
 563 //
 564 // If the current src region (which overflowed the destination space) doesn't
 565 // have a partial object, the split point is at the beginning of the current src
 566 // region (an "easy" split, no extra bookkeeping required).
 567 //
 568 // If the current src region has a partial object, the split point is in the
 569 // region where that partial object starts (call it the split_region).  If
 570 // split_region has a partial object, then the split point is just after that
 571 // partial object (a "hard" split where we have to record the split data and
 572 // zero the partial_obj_size field).  With a "hard" split, we know that the
 573 // partial_obj ends within split_region because the partial object that caused
 574 // the overflow starts in split_region.  If split_region doesn't have a partial
 575 // obj, then the split is at the beginning of split_region (another "easy"
 576 // split).
 577 HeapWord*
 578 ParallelCompactData::summarize_split_space(size_t src_region,
 579                                            SplitInfo& split_info,
 580                                            HeapWord* destination,
 581                                            HeapWord* target_end,
 582                                            HeapWord** target_next)
 583 {
 584   assert(destination <= target_end, "sanity");
 585   assert(destination + _region_data[src_region].data_size() > target_end,
 586     "region should not fit into target space");
 587   assert(is_region_aligned(target_end), "sanity");
 588 
 589   size_t split_region = src_region;
 590   HeapWord* split_destination = destination;
 591   size_t partial_obj_size = _region_data[src_region].partial_obj_size();
 592 
 593   if (destination + partial_obj_size > target_end) {
 594     // The split point is just after the partial object (if any) in the
 595     // src_region that contains the start of the object that overflowed the
 596     // destination space.
 597     //
 598     // Find the start of the "overflow" object and set split_region to the
 599     // region containing it.
 600     HeapWord* const overflow_obj = _region_data[src_region].partial_obj_addr();
 601     split_region = addr_to_region_idx(overflow_obj);
 602 
 603     // Clear the source_region field of all destination regions whose first word
 604     // came from data after the split point (a non-null source_region field
 605     // implies a region must be filled).
 606     //
 607     // An alternative to the simple loop below:  clear during post_compact(),
 608     // which uses memcpy instead of individual stores, and is easy to
 609     // parallelize.  (The downside is that it clears the entire RegionData
 610     // object as opposed to just one field.)
 611     //
 612     // post_compact() would have to clear the summary data up to the highest
 613     // address that was written during the summary phase, which would be
 614     //
 615     //         max(top, max(new_top, clear_top))
 616     //
 617     // where clear_top is a new field in SpaceInfo.  Would have to set clear_top
 618     // to target_end.
 619     const RegionData* const sr = region(split_region);
 620     const size_t beg_idx =
 621       addr_to_region_idx(region_align_up(sr->destination() +
 622                                          sr->partial_obj_size()));
 623     const size_t end_idx = addr_to_region_idx(target_end);
 624 
 625     log_develop_trace(gc, compaction, phases)("split:  clearing source_region field in [" SIZE_FORMAT ", " SIZE_FORMAT ")", beg_idx, end_idx);
 626     for (size_t idx = beg_idx; idx < end_idx; ++idx) {
 627       _region_data[idx].set_source_region(0);
 628     }
 629 
 630     // Set split_destination and partial_obj_size to reflect the split region.
 631     split_destination = sr->destination();
 632     partial_obj_size = sr->partial_obj_size();
 633   }
 634 
 635   // The split is recorded only if a partial object extends onto the region.
 636   if (partial_obj_size != 0) {
 637     _region_data[split_region].set_partial_obj_size(0);
 638     split_info.record(split_region, partial_obj_size, split_destination);
 639   }
 640 
 641   // Setup the continuation addresses.
 642   *target_next = split_destination + partial_obj_size;
 643   HeapWord* const source_next = region_to_addr(split_region) + partial_obj_size;
 644 
 645   if (log_develop_is_enabled(Trace, gc, compaction, phases)) {
 646     const char * split_type = partial_obj_size == 0 ? "easy" : "hard";
 647     log_develop_trace(gc, compaction, phases)("%s split:  src=" PTR_FORMAT " src_c=" SIZE_FORMAT " pos=" SIZE_FORMAT,
 648                                               split_type, p2i(source_next), split_region, partial_obj_size);
 649     log_develop_trace(gc, compaction, phases)("%s split:  dst=" PTR_FORMAT " dst_c=" SIZE_FORMAT " tn=" PTR_FORMAT,
 650                                               split_type, p2i(split_destination),
 651                                               addr_to_region_idx(split_destination),
 652                                               p2i(*target_next));
 653 
 654     if (partial_obj_size != 0) {
 655       HeapWord* const po_beg = split_info.destination();
 656       HeapWord* const po_end = po_beg + split_info.partial_obj_size();
 657       log_develop_trace(gc, compaction, phases)("%s split:  po_beg=" PTR_FORMAT " " SIZE_FORMAT " po_end=" PTR_FORMAT " " SIZE_FORMAT,
 658                                                 split_type,
 659                                                 p2i(po_beg), addr_to_region_idx(po_beg),
 660                                                 p2i(po_end), addr_to_region_idx(po_end));
 661     }
 662   }
 663 
 664   return source_next;
 665 }
 666 
 667 bool ParallelCompactData::summarize(SplitInfo& split_info,
 668                                     HeapWord* source_beg, HeapWord* source_end,
 669                                     HeapWord** source_next,
 670                                     HeapWord* target_beg, HeapWord* target_end,
 671                                     HeapWord** target_next)
 672 {
 673   HeapWord* const source_next_val = source_next == NULL ? NULL : *source_next;
 674   log_develop_trace(gc, compaction, phases)(
 675       "sb=" PTR_FORMAT " se=" PTR_FORMAT " sn=" PTR_FORMAT
 676       "tb=" PTR_FORMAT " te=" PTR_FORMAT " tn=" PTR_FORMAT,
 677       p2i(source_beg), p2i(source_end), p2i(source_next_val),
 678       p2i(target_beg), p2i(target_end), p2i(*target_next));
 679 
 680   size_t cur_region = addr_to_region_idx(source_beg);
 681   const size_t end_region = addr_to_region_idx(region_align_up(source_end));
 682 
 683   HeapWord *dest_addr = target_beg;
 684   while (cur_region < end_region) {
 685     // The destination must be set even if the region has no data.
 686     _region_data[cur_region].set_destination(dest_addr);
 687 
 688     size_t words = _region_data[cur_region].data_size();
 689     if (words > 0) {
 690       // If cur_region does not fit entirely into the target space, find a point
 691       // at which the source space can be 'split' so that part is copied to the
 692       // target space and the rest is copied elsewhere.
 693       if (dest_addr + words > target_end) {
 694         assert(source_next != NULL, "source_next is NULL when splitting");
 695         *source_next = summarize_split_space(cur_region, split_info, dest_addr,
 696                                              target_end, target_next);
 697         return false;
 698       }
 699 
 700       // Compute the destination_count for cur_region, and if necessary, update
 701       // source_region for a destination region.  The source_region field is
 702       // updated if cur_region is the first (left-most) region to be copied to a
 703       // destination region.
 704       //
 705       // The destination_count calculation is a bit subtle.  A region that has
 706       // data that compacts into itself does not count itself as a destination.
 707       // This maintains the invariant that a zero count means the region is
 708       // available and can be claimed and then filled.
 709       uint destination_count = 0;
 710       if (split_info.is_split(cur_region)) {
 711         // The current region has been split:  the partial object will be copied
 712         // to one destination space and the remaining data will be copied to
 713         // another destination space.  Adjust the initial destination_count and,
 714         // if necessary, set the source_region field if the partial object will
 715         // cross a destination region boundary.
 716         destination_count = split_info.destination_count();
 717         if (destination_count == 2) {
 718           size_t dest_idx = addr_to_region_idx(split_info.dest_region_addr());
 719           _region_data[dest_idx].set_source_region(cur_region);
 720         }
 721       }
 722 
 723       HeapWord* const last_addr = dest_addr + words - 1;
 724       const size_t dest_region_1 = addr_to_region_idx(dest_addr);
 725       const size_t dest_region_2 = addr_to_region_idx(last_addr);
 726 
 727       // Initially assume that the destination regions will be the same and
 728       // adjust the value below if necessary.  Under this assumption, if
 729       // cur_region == dest_region_2, then cur_region will be compacted
 730       // completely into itself.
 731       destination_count += cur_region == dest_region_2 ? 0 : 1;
 732       if (dest_region_1 != dest_region_2) {
 733         // Destination regions differ; adjust destination_count.
 734         destination_count += 1;
 735         // Data from cur_region will be copied to the start of dest_region_2.
 736         _region_data[dest_region_2].set_source_region(cur_region);
 737       } else if (region_offset(dest_addr) == 0) {
 738         // Data from cur_region will be copied to the start of the destination
 739         // region.
 740         _region_data[dest_region_1].set_source_region(cur_region);
 741       }
 742 
 743       _region_data[cur_region].set_destination_count(destination_count);
 744       _region_data[cur_region].set_data_location(region_to_addr(cur_region));
 745       dest_addr += words;
 746     }
 747 
 748     ++cur_region;
 749   }
 750 
 751   *target_next = dest_addr;
 752   return true;
 753 }
 754 
 755 HeapWord* ParallelCompactData::calc_new_pointer(HeapWord* addr, ParCompactionManager* cm) {
 756   assert(addr != NULL, "Should detect NULL oop earlier");
 757   assert(ParallelScavengeHeap::heap()->is_in(addr), "not in heap");
 758   assert(PSParallelCompact::mark_bitmap()->is_marked(addr), "not marked");
 759 
 760   // Region covering the object.
 761   RegionData* const region_ptr = addr_to_region_ptr(addr);
 762   HeapWord* result = region_ptr->destination();
 763 
 764   // If the entire Region is live, the new location is region->destination + the
 765   // offset of the object within in the Region.
 766 
 767   // Run some performance tests to determine if this special case pays off.  It
 768   // is worth it for pointers into the dense prefix.  If the optimization to
 769   // avoid pointer updates in regions that only point to the dense prefix is
 770   // ever implemented, this should be revisited.
 771   if (region_ptr->data_size() == RegionSize) {
 772     result += region_offset(addr);
 773     return result;
 774   }
 775 
 776   // Otherwise, the new location is region->destination + block offset + the
 777   // number of live words in the Block that are (a) to the left of addr and (b)
 778   // due to objects that start in the Block.
 779 
 780   // Fill in the block table if necessary.  This is unsynchronized, so multiple
 781   // threads may fill the block table for a region (harmless, since it is
 782   // idempotent).
 783   if (!region_ptr->blocks_filled()) {
 784     PSParallelCompact::fill_blocks(addr_to_region_idx(addr));
 785     region_ptr->set_blocks_filled();
 786   }
 787 
 788   HeapWord* const search_start = block_align_down(addr);
 789   const size_t block_offset = addr_to_block_ptr(addr)->offset();
 790 
 791   const ParMarkBitMap* bitmap = PSParallelCompact::mark_bitmap();
 792   const size_t live = bitmap->live_words_in_range(cm, search_start, oop(addr));
 793   result += block_offset + live;
 794   DEBUG_ONLY(PSParallelCompact::check_new_location(addr, result));
 795   return result;
 796 }
 797 
 798 #ifdef ASSERT
 799 void ParallelCompactData::verify_clear(const PSVirtualSpace* vspace)
 800 {
 801   const size_t* const beg = (const size_t*)vspace->committed_low_addr();
 802   const size_t* const end = (const size_t*)vspace->committed_high_addr();
 803   for (const size_t* p = beg; p < end; ++p) {
 804     assert(*p == 0, "not zero");
 805   }
 806 }
 807 
 808 void ParallelCompactData::verify_clear()
 809 {
 810   verify_clear(_region_vspace);
 811   verify_clear(_block_vspace);
 812 }
 813 #endif  // #ifdef ASSERT
 814 
 815 STWGCTimer          PSParallelCompact::_gc_timer;
 816 ParallelOldTracer   PSParallelCompact::_gc_tracer;
 817 elapsedTimer        PSParallelCompact::_accumulated_time;
 818 unsigned int        PSParallelCompact::_total_invocations = 0;
 819 unsigned int        PSParallelCompact::_maximum_compaction_gc_num = 0;
 820 jlong               PSParallelCompact::_time_of_last_gc = 0;
 821 CollectorCounters*  PSParallelCompact::_counters = NULL;
 822 ParMarkBitMap       PSParallelCompact::_mark_bitmap;
 823 ParallelCompactData PSParallelCompact::_summary_data;
 824 
 825 PSParallelCompact::IsAliveClosure PSParallelCompact::_is_alive_closure;
 826 
 827 bool PSParallelCompact::IsAliveClosure::do_object_b(oop p) { return mark_bitmap()->is_marked(p); }
 828 
 829 void PSParallelCompact::AdjustKlassClosure::do_klass(Klass* klass) {
 830   PSParallelCompact::AdjustPointerClosure closure(_cm);
 831   klass->oops_do(&closure);
 832 }
 833 
 834 void PSParallelCompact::post_initialize() {
 835   ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
 836   MemRegion mr = heap->reserved_region();
 837   _ref_processor =
 838     new ReferenceProcessor(mr,            // span
 839                            ParallelRefProcEnabled && (ParallelGCThreads > 1), // mt processing
 840                            ParallelGCThreads, // mt processing degree
 841                            true,              // mt discovery
 842                            ParallelGCThreads, // mt discovery degree
 843                            true,              // atomic_discovery
 844                            &_is_alive_closure); // non-header is alive closure
 845   _counters = new CollectorCounters("PSParallelCompact", 1);
 846 
 847   // Initialize static fields in ParCompactionManager.
 848   ParCompactionManager::initialize(mark_bitmap());
 849 }
 850 
 851 bool PSParallelCompact::initialize() {
 852   ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
 853   MemRegion mr = heap->reserved_region();
 854 
 855   // Was the old gen get allocated successfully?
 856   if (!heap->old_gen()->is_allocated()) {
 857     return false;
 858   }
 859 
 860   initialize_space_info();
 861   initialize_dead_wood_limiter();
 862 
 863   if (!_mark_bitmap.initialize(mr)) {
 864     vm_shutdown_during_initialization(
 865       err_msg("Unable to allocate " SIZE_FORMAT "KB bitmaps for parallel "
 866       "garbage collection for the requested " SIZE_FORMAT "KB heap.",
 867       _mark_bitmap.reserved_byte_size()/K, mr.byte_size()/K));
 868     return false;
 869   }
 870 
 871   if (!_summary_data.initialize(mr)) {
 872     vm_shutdown_during_initialization(
 873       err_msg("Unable to allocate " SIZE_FORMAT "KB card tables for parallel "
 874       "garbage collection for the requested " SIZE_FORMAT "KB heap.",
 875       _summary_data.reserved_byte_size()/K, mr.byte_size()/K));
 876     return false;
 877   }
 878 
 879   return true;
 880 }
 881 
 882 void PSParallelCompact::initialize_space_info()
 883 {
 884   memset(&_space_info, 0, sizeof(_space_info));
 885 
 886   ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
 887   PSYoungGen* young_gen = heap->young_gen();
 888 
 889   _space_info[old_space_id].set_space(heap->old_gen()->object_space());
 890   _space_info[eden_space_id].set_space(young_gen->eden_space());
 891   _space_info[from_space_id].set_space(young_gen->from_space());
 892   _space_info[to_space_id].set_space(young_gen->to_space());
 893 
 894   _space_info[old_space_id].set_start_array(heap->old_gen()->start_array());
 895 }
 896 
 897 void PSParallelCompact::initialize_dead_wood_limiter()
 898 {
 899   const size_t max = 100;
 900   _dwl_mean = double(MIN2(ParallelOldDeadWoodLimiterMean, max)) / 100.0;
 901   _dwl_std_dev = double(MIN2(ParallelOldDeadWoodLimiterStdDev, max)) / 100.0;
 902   _dwl_first_term = 1.0 / (sqrt(2.0 * M_PI) * _dwl_std_dev);
 903   DEBUG_ONLY(_dwl_initialized = true;)
 904   _dwl_adjustment = normal_distribution(1.0);
 905 }
 906 
 907 void
 908 PSParallelCompact::clear_data_covering_space(SpaceId id)
 909 {
 910   // At this point, top is the value before GC, new_top() is the value that will
 911   // be set at the end of GC.  The marking bitmap is cleared to top; nothing
 912   // should be marked above top.  The summary data is cleared to the larger of
 913   // top & new_top.
 914   MutableSpace* const space = _space_info[id].space();
 915   HeapWord* const bot = space->bottom();
 916   HeapWord* const top = space->top();
 917   HeapWord* const max_top = MAX2(top, _space_info[id].new_top());
 918 
 919   const idx_t beg_bit = _mark_bitmap.addr_to_bit(bot);
 920   const idx_t end_bit = BitMap::word_align_up(_mark_bitmap.addr_to_bit(top));
 921   _mark_bitmap.clear_range(beg_bit, end_bit);
 922 
 923   const size_t beg_region = _summary_data.addr_to_region_idx(bot);
 924   const size_t end_region =
 925     _summary_data.addr_to_region_idx(_summary_data.region_align_up(max_top));
 926   _summary_data.clear_range(beg_region, end_region);
 927 
 928   // Clear the data used to 'split' regions.
 929   SplitInfo& split_info = _space_info[id].split_info();
 930   if (split_info.is_valid()) {
 931     split_info.clear();
 932   }
 933   DEBUG_ONLY(split_info.verify_clear();)
 934 }
 935 
 936 void PSParallelCompact::pre_compact()
 937 {
 938   // Update the from & to space pointers in space_info, since they are swapped
 939   // at each young gen gc.  Do the update unconditionally (even though a
 940   // promotion failure does not swap spaces) because an unknown number of young
 941   // collections will have swapped the spaces an unknown number of times.
 942   GCTraceTime(Trace, gc, phases) tm("Pre Compact", &_gc_timer);
 943   ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
 944   _space_info[from_space_id].set_space(heap->young_gen()->from_space());
 945   _space_info[to_space_id].set_space(heap->young_gen()->to_space());
 946 
 947   DEBUG_ONLY(add_obj_count = add_obj_size = 0;)
 948   DEBUG_ONLY(mark_bitmap_count = mark_bitmap_size = 0;)
 949 
 950   // Increment the invocation count
 951   heap->increment_total_collections(true);
 952 
 953   // We need to track unique mark sweep invocations as well.
 954   _total_invocations++;
 955 
 956   heap->print_heap_before_gc();
 957   heap->trace_heap_before_gc(&_gc_tracer);
 958 
 959   // Fill in TLABs
 960   heap->accumulate_statistics_all_tlabs();
 961   heap->ensure_parsability(true);  // retire TLABs
 962 
 963   if (VerifyBeforeGC && heap->total_collections() >= VerifyGCStartAt) {
 964     HandleMark hm;  // Discard invalid handles created during verification
 965     Universe::verify("Before GC");
 966   }
 967 
 968   // Verify object start arrays
 969   if (VerifyObjectStartArray &&
 970       VerifyBeforeGC) {
 971     heap->old_gen()->verify_object_start_array();
 972   }
 973 
 974   DEBUG_ONLY(mark_bitmap()->verify_clear();)
 975   DEBUG_ONLY(summary_data().verify_clear();)
 976 
 977   // Have worker threads release resources the next time they run a task.
 978   gc_task_manager()->release_all_resources();
 979 
 980   ParCompactionManager::reset_all_bitmap_query_caches();
 981 }
 982 
 983 void PSParallelCompact::post_compact()
 984 {
 985   GCTraceTime(Trace, gc, phases) tm("Post Compact", &_gc_timer);
 986 
 987   for (unsigned int id = old_space_id; id < last_space_id; ++id) {
 988     // Clear the marking bitmap, summary data and split info.
 989     clear_data_covering_space(SpaceId(id));
 990     // Update top().  Must be done after clearing the bitmap and summary data.
 991     _space_info[id].publish_new_top();
 992   }
 993 
 994   MutableSpace* const eden_space = _space_info[eden_space_id].space();
 995   MutableSpace* const from_space = _space_info[from_space_id].space();
 996   MutableSpace* const to_space   = _space_info[to_space_id].space();
 997 
 998   ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
 999   bool eden_empty = eden_space->is_empty();
1000   if (!eden_empty) {
1001     eden_empty = absorb_live_data_from_eden(heap->size_policy(),
1002                                             heap->young_gen(), heap->old_gen());
1003   }
1004 
1005   // Update heap occupancy information which is used as input to the soft ref
1006   // clearing policy at the next gc.
1007   Universe::update_heap_info_at_gc();
1008 
1009   bool young_gen_empty = eden_empty && from_space->is_empty() &&
1010     to_space->is_empty();
1011 
1012   ModRefBarrierSet* modBS = barrier_set_cast<ModRefBarrierSet>(heap->barrier_set());
1013   MemRegion old_mr = heap->old_gen()->reserved();
1014   if (young_gen_empty) {
1015     modBS->clear(MemRegion(old_mr.start(), old_mr.end()));
1016   } else {
1017     modBS->invalidate(MemRegion(old_mr.start(), old_mr.end()));
1018   }
1019 
1020   // Delete metaspaces for unloaded class loaders and clean up loader_data graph
1021   ClassLoaderDataGraph::purge();
1022   MetaspaceAux::verify_metrics();
1023 
1024   CodeCache::gc_epilogue();
1025   JvmtiExport::gc_epilogue();
1026 
1027 #if defined(COMPILER2) || INCLUDE_JVMCI
1028   DerivedPointerTable::update_pointers();
1029 #endif
1030 
1031   ref_processor()->enqueue_discovered_references(NULL);
1032 
1033   if (ZapUnusedHeapArea) {
1034     heap->gen_mangle_unused_area();
1035   }
1036 
1037   // Update time of last GC
1038   reset_millis_since_last_gc();
1039 }
1040 
1041 HeapWord*
1042 PSParallelCompact::compute_dense_prefix_via_density(const SpaceId id,
1043                                                     bool maximum_compaction)
1044 {
1045   const size_t region_size = ParallelCompactData::RegionSize;
1046   const ParallelCompactData& sd = summary_data();
1047 
1048   const MutableSpace* const space = _space_info[id].space();
1049   HeapWord* const top_aligned_up = sd.region_align_up(space->top());
1050   const RegionData* const beg_cp = sd.addr_to_region_ptr(space->bottom());
1051   const RegionData* const end_cp = sd.addr_to_region_ptr(top_aligned_up);
1052 
1053   // Skip full regions at the beginning of the space--they are necessarily part
1054   // of the dense prefix.
1055   size_t full_count = 0;
1056   const RegionData* cp;
1057   for (cp = beg_cp; cp < end_cp && cp->data_size() == region_size; ++cp) {
1058     ++full_count;
1059   }
1060 
1061   assert(total_invocations() >= _maximum_compaction_gc_num, "sanity");
1062   const size_t gcs_since_max = total_invocations() - _maximum_compaction_gc_num;
1063   const bool interval_ended = gcs_since_max > HeapMaximumCompactionInterval;
1064   if (maximum_compaction || cp == end_cp || interval_ended) {
1065     _maximum_compaction_gc_num = total_invocations();
1066     return sd.region_to_addr(cp);
1067   }
1068 
1069   HeapWord* const new_top = _space_info[id].new_top();
1070   const size_t space_live = pointer_delta(new_top, space->bottom());
1071   const size_t space_used = space->used_in_words();
1072   const size_t space_capacity = space->capacity_in_words();
1073 
1074   const double cur_density = double(space_live) / space_capacity;
1075   const double deadwood_density =
1076     (1.0 - cur_density) * (1.0 - cur_density) * cur_density * cur_density;
1077   const size_t deadwood_goal = size_t(space_capacity * deadwood_density);
1078 
1079   if (TraceParallelOldGCDensePrefix) {
1080     tty->print_cr("cur_dens=%5.3f dw_dens=%5.3f dw_goal=" SIZE_FORMAT,
1081                   cur_density, deadwood_density, deadwood_goal);
1082     tty->print_cr("space_live=" SIZE_FORMAT " " "space_used=" SIZE_FORMAT " "
1083                   "space_cap=" SIZE_FORMAT,
1084                   space_live, space_used,
1085                   space_capacity);
1086   }
1087 
1088   // XXX - Use binary search?
1089   HeapWord* dense_prefix = sd.region_to_addr(cp);
1090   const RegionData* full_cp = cp;
1091   const RegionData* const top_cp = sd.addr_to_region_ptr(space->top() - 1);
1092   while (cp < end_cp) {
1093     HeapWord* region_destination = cp->destination();
1094     const size_t cur_deadwood = pointer_delta(dense_prefix, region_destination);
1095     if (TraceParallelOldGCDensePrefix && Verbose) {
1096       tty->print_cr("c#=" SIZE_FORMAT_W(4) " dst=" PTR_FORMAT " "
1097                     "dp=" PTR_FORMAT " " "cdw=" SIZE_FORMAT_W(8),
1098                     sd.region(cp), p2i(region_destination),
1099                     p2i(dense_prefix), cur_deadwood);
1100     }
1101 
1102     if (cur_deadwood >= deadwood_goal) {
1103       // Found the region that has the correct amount of deadwood to the left.
1104       // This typically occurs after crossing a fairly sparse set of regions, so
1105       // iterate backwards over those sparse regions, looking for the region
1106       // that has the lowest density of live objects 'to the right.'
1107       size_t space_to_left = sd.region(cp) * region_size;
1108       size_t live_to_left = space_to_left - cur_deadwood;
1109       size_t space_to_right = space_capacity - space_to_left;
1110       size_t live_to_right = space_live - live_to_left;
1111       double density_to_right = double(live_to_right) / space_to_right;
1112       while (cp > full_cp) {
1113         --cp;
1114         const size_t prev_region_live_to_right = live_to_right -
1115           cp->data_size();
1116         const size_t prev_region_space_to_right = space_to_right + region_size;
1117         double prev_region_density_to_right =
1118           double(prev_region_live_to_right) / prev_region_space_to_right;
1119         if (density_to_right <= prev_region_density_to_right) {
1120           return dense_prefix;
1121         }
1122         if (TraceParallelOldGCDensePrefix && Verbose) {
1123           tty->print_cr("backing up from c=" SIZE_FORMAT_W(4) " d2r=%10.8f "
1124                         "pc_d2r=%10.8f", sd.region(cp), density_to_right,
1125                         prev_region_density_to_right);
1126         }
1127         dense_prefix -= region_size;
1128         live_to_right = prev_region_live_to_right;
1129         space_to_right = prev_region_space_to_right;
1130         density_to_right = prev_region_density_to_right;
1131       }
1132       return dense_prefix;
1133     }
1134 
1135     dense_prefix += region_size;
1136     ++cp;
1137   }
1138 
1139   return dense_prefix;
1140 }
1141 
1142 #ifndef PRODUCT
1143 void PSParallelCompact::print_dense_prefix_stats(const char* const algorithm,
1144                                                  const SpaceId id,
1145                                                  const bool maximum_compaction,
1146                                                  HeapWord* const addr)
1147 {
1148   const size_t region_idx = summary_data().addr_to_region_idx(addr);
1149   RegionData* const cp = summary_data().region(region_idx);
1150   const MutableSpace* const space = _space_info[id].space();
1151   HeapWord* const new_top = _space_info[id].new_top();
1152 
1153   const size_t space_live = pointer_delta(new_top, space->bottom());
1154   const size_t dead_to_left = pointer_delta(addr, cp->destination());
1155   const size_t space_cap = space->capacity_in_words();
1156   const double dead_to_left_pct = double(dead_to_left) / space_cap;
1157   const size_t live_to_right = new_top - cp->destination();
1158   const size_t dead_to_right = space->top() - addr - live_to_right;
1159 
1160   tty->print_cr("%s=" PTR_FORMAT " dpc=" SIZE_FORMAT_W(5) " "
1161                 "spl=" SIZE_FORMAT " "
1162                 "d2l=" SIZE_FORMAT " d2l%%=%6.4f "
1163                 "d2r=" SIZE_FORMAT " l2r=" SIZE_FORMAT
1164                 " ratio=%10.8f",
1165                 algorithm, p2i(addr), region_idx,
1166                 space_live,
1167                 dead_to_left, dead_to_left_pct,
1168                 dead_to_right, live_to_right,
1169                 double(dead_to_right) / live_to_right);
1170 }
1171 #endif  // #ifndef PRODUCT
1172 
1173 // Return a fraction indicating how much of the generation can be treated as
1174 // "dead wood" (i.e., not reclaimed).  The function uses a normal distribution
1175 // based on the density of live objects in the generation to determine a limit,
1176 // which is then adjusted so the return value is min_percent when the density is
1177 // 1.
1178 //
1179 // The following table shows some return values for a different values of the
1180 // standard deviation (ParallelOldDeadWoodLimiterStdDev); the mean is 0.5 and
1181 // min_percent is 1.
1182 //
1183 //                          fraction allowed as dead wood
1184 //         -----------------------------------------------------------------
1185 // density std_dev=70 std_dev=75 std_dev=80 std_dev=85 std_dev=90 std_dev=95
1186 // ------- ---------- ---------- ---------- ---------- ---------- ----------
1187 // 0.00000 0.01000000 0.01000000 0.01000000 0.01000000 0.01000000 0.01000000
1188 // 0.05000 0.03193096 0.02836880 0.02550828 0.02319280 0.02130337 0.01974941
1189 // 0.10000 0.05247504 0.04547452 0.03988045 0.03537016 0.03170171 0.02869272
1190 // 0.15000 0.07135702 0.06111390 0.05296419 0.04641639 0.04110601 0.03676066
1191 // 0.20000 0.08831616 0.07509618 0.06461766 0.05622444 0.04943437 0.04388975
1192 // 0.25000 0.10311208 0.08724696 0.07471205 0.06469760 0.05661313 0.05002313
1193 // 0.30000 0.11553050 0.09741183 0.08313394 0.07175114 0.06257797 0.05511132
1194 // 0.35000 0.12538832 0.10545958 0.08978741 0.07731366 0.06727491 0.05911289
1195 // 0.40000 0.13253818 0.11128511 0.09459590 0.08132834 0.07066107 0.06199500
1196 // 0.45000 0.13687208 0.11481163 0.09750361 0.08375387 0.07270534 0.06373386
1197 // 0.50000 0.13832410 0.11599237 0.09847664 0.08456518 0.07338887 0.06431510
1198 // 0.55000 0.13687208 0.11481163 0.09750361 0.08375387 0.07270534 0.06373386
1199 // 0.60000 0.13253818 0.11128511 0.09459590 0.08132834 0.07066107 0.06199500
1200 // 0.65000 0.12538832 0.10545958 0.08978741 0.07731366 0.06727491 0.05911289
1201 // 0.70000 0.11553050 0.09741183 0.08313394 0.07175114 0.06257797 0.05511132
1202 // 0.75000 0.10311208 0.08724696 0.07471205 0.06469760 0.05661313 0.05002313
1203 // 0.80000 0.08831616 0.07509618 0.06461766 0.05622444 0.04943437 0.04388975
1204 // 0.85000 0.07135702 0.06111390 0.05296419 0.04641639 0.04110601 0.03676066
1205 // 0.90000 0.05247504 0.04547452 0.03988045 0.03537016 0.03170171 0.02869272
1206 // 0.95000 0.03193096 0.02836880 0.02550828 0.02319280 0.02130337 0.01974941
1207 // 1.00000 0.01000000 0.01000000 0.01000000 0.01000000 0.01000000 0.01000000
1208 
1209 double PSParallelCompact::dead_wood_limiter(double density, size_t min_percent)
1210 {
1211   assert(_dwl_initialized, "uninitialized");
1212 
1213   // The raw limit is the value of the normal distribution at x = density.
1214   const double raw_limit = normal_distribution(density);
1215 
1216   // Adjust the raw limit so it becomes the minimum when the density is 1.
1217   //
1218   // First subtract the adjustment value (which is simply the precomputed value
1219   // normal_distribution(1.0)); this yields a value of 0 when the density is 1.
1220   // Then add the minimum value, so the minimum is returned when the density is
1221   // 1.  Finally, prevent negative values, which occur when the mean is not 0.5.
1222   const double min = double(min_percent) / 100.0;
1223   const double limit = raw_limit - _dwl_adjustment + min;
1224   return MAX2(limit, 0.0);
1225 }
1226 
1227 ParallelCompactData::RegionData*
1228 PSParallelCompact::first_dead_space_region(const RegionData* beg,
1229                                            const RegionData* end)
1230 {
1231   const size_t region_size = ParallelCompactData::RegionSize;
1232   ParallelCompactData& sd = summary_data();
1233   size_t left = sd.region(beg);
1234   size_t right = end > beg ? sd.region(end) - 1 : left;
1235 
1236   // Binary search.
1237   while (left < right) {
1238     // Equivalent to (left + right) / 2, but does not overflow.
1239     const size_t middle = left + (right - left) / 2;
1240     RegionData* const middle_ptr = sd.region(middle);
1241     HeapWord* const dest = middle_ptr->destination();
1242     HeapWord* const addr = sd.region_to_addr(middle);
1243     assert(dest != NULL, "sanity");
1244     assert(dest <= addr, "must move left");
1245 
1246     if (middle > left && dest < addr) {
1247       right = middle - 1;
1248     } else if (middle < right && middle_ptr->data_size() == region_size) {
1249       left = middle + 1;
1250     } else {
1251       return middle_ptr;
1252     }
1253   }
1254   return sd.region(left);
1255 }
1256 
1257 ParallelCompactData::RegionData*
1258 PSParallelCompact::dead_wood_limit_region(const RegionData* beg,
1259                                           const RegionData* end,
1260                                           size_t dead_words)
1261 {
1262   ParallelCompactData& sd = summary_data();
1263   size_t left = sd.region(beg);
1264   size_t right = end > beg ? sd.region(end) - 1 : left;
1265 
1266   // Binary search.
1267   while (left < right) {
1268     // Equivalent to (left + right) / 2, but does not overflow.
1269     const size_t middle = left + (right - left) / 2;
1270     RegionData* const middle_ptr = sd.region(middle);
1271     HeapWord* const dest = middle_ptr->destination();
1272     HeapWord* const addr = sd.region_to_addr(middle);
1273     assert(dest != NULL, "sanity");
1274     assert(dest <= addr, "must move left");
1275 
1276     const size_t dead_to_left = pointer_delta(addr, dest);
1277     if (middle > left && dead_to_left > dead_words) {
1278       right = middle - 1;
1279     } else if (middle < right && dead_to_left < dead_words) {
1280       left = middle + 1;
1281     } else {
1282       return middle_ptr;
1283     }
1284   }
1285   return sd.region(left);
1286 }
1287 
1288 // The result is valid during the summary phase, after the initial summarization
1289 // of each space into itself, and before final summarization.
1290 inline double
1291 PSParallelCompact::reclaimed_ratio(const RegionData* const cp,
1292                                    HeapWord* const bottom,
1293                                    HeapWord* const top,
1294                                    HeapWord* const new_top)
1295 {
1296   ParallelCompactData& sd = summary_data();
1297 
1298   assert(cp != NULL, "sanity");
1299   assert(bottom != NULL, "sanity");
1300   assert(top != NULL, "sanity");
1301   assert(new_top != NULL, "sanity");
1302   assert(top >= new_top, "summary data problem?");
1303   assert(new_top > bottom, "space is empty; should not be here");
1304   assert(new_top >= cp->destination(), "sanity");
1305   assert(top >= sd.region_to_addr(cp), "sanity");
1306 
1307   HeapWord* const destination = cp->destination();
1308   const size_t dense_prefix_live  = pointer_delta(destination, bottom);
1309   const size_t compacted_region_live = pointer_delta(new_top, destination);
1310   const size_t compacted_region_used = pointer_delta(top,
1311                                                      sd.region_to_addr(cp));
1312   const size_t reclaimable = compacted_region_used - compacted_region_live;
1313 
1314   const double divisor = dense_prefix_live + 1.25 * compacted_region_live;
1315   return double(reclaimable) / divisor;
1316 }
1317 
1318 // Return the address of the end of the dense prefix, a.k.a. the start of the
1319 // compacted region.  The address is always on a region boundary.
1320 //
1321 // Completely full regions at the left are skipped, since no compaction can
1322 // occur in those regions.  Then the maximum amount of dead wood to allow is
1323 // computed, based on the density (amount live / capacity) of the generation;
1324 // the region with approximately that amount of dead space to the left is
1325 // identified as the limit region.  Regions between the last completely full
1326 // region and the limit region are scanned and the one that has the best
1327 // (maximum) reclaimed_ratio() is selected.
1328 HeapWord*
1329 PSParallelCompact::compute_dense_prefix(const SpaceId id,
1330                                         bool maximum_compaction)
1331 {
1332   const size_t region_size = ParallelCompactData::RegionSize;
1333   const ParallelCompactData& sd = summary_data();
1334 
1335   const MutableSpace* const space = _space_info[id].space();
1336   HeapWord* const top = space->top();
1337   HeapWord* const top_aligned_up = sd.region_align_up(top);
1338   HeapWord* const new_top = _space_info[id].new_top();
1339   HeapWord* const new_top_aligned_up = sd.region_align_up(new_top);
1340   HeapWord* const bottom = space->bottom();
1341   const RegionData* const beg_cp = sd.addr_to_region_ptr(bottom);
1342   const RegionData* const top_cp = sd.addr_to_region_ptr(top_aligned_up);
1343   const RegionData* const new_top_cp =
1344     sd.addr_to_region_ptr(new_top_aligned_up);
1345 
1346   // Skip full regions at the beginning of the space--they are necessarily part
1347   // of the dense prefix.
1348   const RegionData* const full_cp = first_dead_space_region(beg_cp, new_top_cp);
1349   assert(full_cp->destination() == sd.region_to_addr(full_cp) ||
1350          space->is_empty(), "no dead space allowed to the left");
1351   assert(full_cp->data_size() < region_size || full_cp == new_top_cp - 1,
1352          "region must have dead space");
1353 
1354   // The gc number is saved whenever a maximum compaction is done, and used to
1355   // determine when the maximum compaction interval has expired.  This avoids
1356   // successive max compactions for different reasons.
1357   assert(total_invocations() >= _maximum_compaction_gc_num, "sanity");
1358   const size_t gcs_since_max = total_invocations() - _maximum_compaction_gc_num;
1359   const bool interval_ended = gcs_since_max > HeapMaximumCompactionInterval ||
1360     total_invocations() == HeapFirstMaximumCompactionCount;
1361   if (maximum_compaction || full_cp == top_cp || interval_ended) {
1362     _maximum_compaction_gc_num = total_invocations();
1363     return sd.region_to_addr(full_cp);
1364   }
1365 
1366   const size_t space_live = pointer_delta(new_top, bottom);
1367   const size_t space_used = space->used_in_words();
1368   const size_t space_capacity = space->capacity_in_words();
1369 
1370   const double density = double(space_live) / double(space_capacity);
1371   const size_t min_percent_free = MarkSweepDeadRatio;
1372   const double limiter = dead_wood_limiter(density, min_percent_free);
1373   const size_t dead_wood_max = space_used - space_live;
1374   const size_t dead_wood_limit = MIN2(size_t(space_capacity * limiter),
1375                                       dead_wood_max);
1376 
1377   if (TraceParallelOldGCDensePrefix) {
1378     tty->print_cr("space_live=" SIZE_FORMAT " " "space_used=" SIZE_FORMAT " "
1379                   "space_cap=" SIZE_FORMAT,
1380                   space_live, space_used,
1381                   space_capacity);
1382     tty->print_cr("dead_wood_limiter(%6.4f, " SIZE_FORMAT ")=%6.4f "
1383                   "dead_wood_max=" SIZE_FORMAT " dead_wood_limit=" SIZE_FORMAT,
1384                   density, min_percent_free, limiter,
1385                   dead_wood_max, dead_wood_limit);
1386   }
1387 
1388   // Locate the region with the desired amount of dead space to the left.
1389   const RegionData* const limit_cp =
1390     dead_wood_limit_region(full_cp, top_cp, dead_wood_limit);
1391 
1392   // Scan from the first region with dead space to the limit region and find the
1393   // one with the best (largest) reclaimed ratio.
1394   double best_ratio = 0.0;
1395   const RegionData* best_cp = full_cp;
1396   for (const RegionData* cp = full_cp; cp < limit_cp; ++cp) {
1397     double tmp_ratio = reclaimed_ratio(cp, bottom, top, new_top);
1398     if (tmp_ratio > best_ratio) {
1399       best_cp = cp;
1400       best_ratio = tmp_ratio;
1401     }
1402   }
1403 
1404   return sd.region_to_addr(best_cp);
1405 }
1406 
1407 void PSParallelCompact::summarize_spaces_quick()
1408 {
1409   for (unsigned int i = 0; i < last_space_id; ++i) {
1410     const MutableSpace* space = _space_info[i].space();
1411     HeapWord** nta = _space_info[i].new_top_addr();
1412     bool result = _summary_data.summarize(_space_info[i].split_info(),
1413                                           space->bottom(), space->top(), NULL,
1414                                           space->bottom(), space->end(), nta);
1415     assert(result, "space must fit into itself");
1416     _space_info[i].set_dense_prefix(space->bottom());
1417   }
1418 }
1419 
1420 void PSParallelCompact::fill_dense_prefix_end(SpaceId id)
1421 {
1422   HeapWord* const dense_prefix_end = dense_prefix(id);
1423   const RegionData* region = _summary_data.addr_to_region_ptr(dense_prefix_end);
1424   const idx_t dense_prefix_bit = _mark_bitmap.addr_to_bit(dense_prefix_end);
1425   if (dead_space_crosses_boundary(region, dense_prefix_bit)) {
1426     // Only enough dead space is filled so that any remaining dead space to the
1427     // left is larger than the minimum filler object.  (The remainder is filled
1428     // during the copy/update phase.)
1429     //
1430     // The size of the dead space to the right of the boundary is not a
1431     // concern, since compaction will be able to use whatever space is
1432     // available.
1433     //
1434     // Here '||' is the boundary, 'x' represents a don't care bit and a box
1435     // surrounds the space to be filled with an object.
1436     //
1437     // In the 32-bit VM, each bit represents two 32-bit words:
1438     //                              +---+
1439     // a) beg_bits:  ...  x   x   x | 0 | ||   0   x  x  ...
1440     //    end_bits:  ...  x   x   x | 0 | ||   0   x  x  ...
1441     //                              +---+
1442     //
1443     // In the 64-bit VM, each bit represents one 64-bit word:
1444     //                              +------------+
1445     // b) beg_bits:  ...  x   x   x | 0   ||   0 | x  x  ...
1446     //    end_bits:  ...  x   x   1 | 0   ||   0 | x  x  ...
1447     //                              +------------+
1448     //                          +-------+
1449     // c) beg_bits:  ...  x   x | 0   0 | ||   0   x  x  ...
1450     //    end_bits:  ...  x   1 | 0   0 | ||   0   x  x  ...
1451     //                          +-------+
1452     //                      +-----------+
1453     // d) beg_bits:  ...  x | 0   0   0 | ||   0   x  x  ...
1454     //    end_bits:  ...  1 | 0   0   0 | ||   0   x  x  ...
1455     //                      +-----------+
1456     //                          +-------+
1457     // e) beg_bits:  ...  0   0 | 0   0 | ||   0   x  x  ...
1458     //    end_bits:  ...  0   0 | 0   0 | ||   0   x  x  ...
1459     //                          +-------+
1460 
1461     // Initially assume case a, c or e will apply.
1462     size_t obj_len = CollectedHeap::min_fill_size();
1463     HeapWord* obj_beg = dense_prefix_end - obj_len;
1464 
1465 #ifdef  _LP64
1466     if (MinObjAlignment > 1) { // object alignment > heap word size
1467       // Cases a, c or e.
1468     } else if (_mark_bitmap.is_obj_end(dense_prefix_bit - 2)) {
1469       // Case b above.
1470       obj_beg = dense_prefix_end - 1;
1471     } else if (!_mark_bitmap.is_obj_end(dense_prefix_bit - 3) &&
1472                _mark_bitmap.is_obj_end(dense_prefix_bit - 4)) {
1473       // Case d above.
1474       obj_beg = dense_prefix_end - 3;
1475       obj_len = 3;
1476     }
1477 #endif  // #ifdef _LP64
1478 
1479     CollectedHeap::fill_with_object(obj_beg, obj_len);
1480     _mark_bitmap.mark_obj(obj_beg, obj_len);
1481     _summary_data.add_obj(obj_beg, obj_len);
1482     assert(start_array(id) != NULL, "sanity");
1483     start_array(id)->allocate_block(obj_beg);
1484   }
1485 }
1486 
1487 void
1488 PSParallelCompact::clear_source_region(HeapWord* beg_addr, HeapWord* end_addr)
1489 {
1490   RegionData* const beg_ptr = _summary_data.addr_to_region_ptr(beg_addr);
1491   HeapWord* const end_aligned_up = _summary_data.region_align_up(end_addr);
1492   RegionData* const end_ptr = _summary_data.addr_to_region_ptr(end_aligned_up);
1493   for (RegionData* cur = beg_ptr; cur < end_ptr; ++cur) {
1494     cur->set_source_region(0);
1495   }
1496 }
1497 
1498 void
1499 PSParallelCompact::summarize_space(SpaceId id, bool maximum_compaction)
1500 {
1501   assert(id < last_space_id, "id out of range");
1502   assert(_space_info[id].dense_prefix() == _space_info[id].space()->bottom(),
1503          "should have been reset in summarize_spaces_quick()");
1504 
1505   const MutableSpace* space = _space_info[id].space();
1506   if (_space_info[id].new_top() != space->bottom()) {
1507     HeapWord* dense_prefix_end = compute_dense_prefix(id, maximum_compaction);
1508     _space_info[id].set_dense_prefix(dense_prefix_end);
1509 
1510 #ifndef PRODUCT
1511     if (TraceParallelOldGCDensePrefix) {
1512       print_dense_prefix_stats("ratio", id, maximum_compaction,
1513                                dense_prefix_end);
1514       HeapWord* addr = compute_dense_prefix_via_density(id, maximum_compaction);
1515       print_dense_prefix_stats("density", id, maximum_compaction, addr);
1516     }
1517 #endif  // #ifndef PRODUCT
1518 
1519     // Recompute the summary data, taking into account the dense prefix.  If
1520     // every last byte will be reclaimed, then the existing summary data which
1521     // compacts everything can be left in place.
1522     if (!maximum_compaction && dense_prefix_end != space->bottom()) {
1523       // If dead space crosses the dense prefix boundary, it is (at least
1524       // partially) filled with a dummy object, marked live and added to the
1525       // summary data.  This simplifies the copy/update phase and must be done
1526       // before the final locations of objects are determined, to prevent
1527       // leaving a fragment of dead space that is too small to fill.
1528       fill_dense_prefix_end(id);
1529 
1530       // Compute the destination of each Region, and thus each object.
1531       _summary_data.summarize_dense_prefix(space->bottom(), dense_prefix_end);
1532       _summary_data.summarize(_space_info[id].split_info(),
1533                               dense_prefix_end, space->top(), NULL,
1534                               dense_prefix_end, space->end(),
1535                               _space_info[id].new_top_addr());
1536     }
1537   }
1538 
1539   if (log_develop_is_enabled(Trace, gc, compaction, phases)) {
1540     const size_t region_size = ParallelCompactData::RegionSize;
1541     HeapWord* const dense_prefix_end = _space_info[id].dense_prefix();
1542     const size_t dp_region = _summary_data.addr_to_region_idx(dense_prefix_end);
1543     const size_t dp_words = pointer_delta(dense_prefix_end, space->bottom());
1544     HeapWord* const new_top = _space_info[id].new_top();
1545     const HeapWord* nt_aligned_up = _summary_data.region_align_up(new_top);
1546     const size_t cr_words = pointer_delta(nt_aligned_up, dense_prefix_end);
1547     log_develop_trace(gc, compaction, phases)(
1548         "id=%d cap=" SIZE_FORMAT " dp=" PTR_FORMAT " "
1549         "dp_region=" SIZE_FORMAT " " "dp_count=" SIZE_FORMAT " "
1550         "cr_count=" SIZE_FORMAT " " "nt=" PTR_FORMAT,
1551         id, space->capacity_in_words(), p2i(dense_prefix_end),
1552         dp_region, dp_words / region_size,
1553         cr_words / region_size, p2i(new_top));
1554   }
1555 }
1556 
1557 #ifndef PRODUCT
1558 void PSParallelCompact::summary_phase_msg(SpaceId dst_space_id,
1559                                           HeapWord* dst_beg, HeapWord* dst_end,
1560                                           SpaceId src_space_id,
1561                                           HeapWord* src_beg, HeapWord* src_end)
1562 {
1563   log_develop_trace(gc, compaction, phases)(
1564       "Summarizing %d [%s] into %d [%s]:  "
1565       "src=" PTR_FORMAT "-" PTR_FORMAT " "
1566       SIZE_FORMAT "-" SIZE_FORMAT " "
1567       "dst=" PTR_FORMAT "-" PTR_FORMAT " "
1568       SIZE_FORMAT "-" SIZE_FORMAT,
1569       src_space_id, space_names[src_space_id],
1570       dst_space_id, space_names[dst_space_id],
1571       p2i(src_beg), p2i(src_end),
1572       _summary_data.addr_to_region_idx(src_beg),
1573       _summary_data.addr_to_region_idx(src_end),
1574       p2i(dst_beg), p2i(dst_end),
1575       _summary_data.addr_to_region_idx(dst_beg),
1576       _summary_data.addr_to_region_idx(dst_end));
1577 }
1578 #endif  // #ifndef PRODUCT
1579 
1580 void PSParallelCompact::summary_phase(ParCompactionManager* cm,
1581                                       bool maximum_compaction)
1582 {
1583   GCTraceTime(Trace, gc, phases) tm("Summary Phase", &_gc_timer);
1584 
1585 #ifdef  ASSERT
1586   if (TraceParallelOldGCMarkingPhase) {
1587     tty->print_cr("add_obj_count=" SIZE_FORMAT " "
1588                   "add_obj_bytes=" SIZE_FORMAT,
1589                   add_obj_count, add_obj_size * HeapWordSize);
1590     tty->print_cr("mark_bitmap_count=" SIZE_FORMAT " "
1591                   "mark_bitmap_bytes=" SIZE_FORMAT,
1592                   mark_bitmap_count, mark_bitmap_size * HeapWordSize);
1593   }
1594 #endif  // #ifdef ASSERT
1595 
1596   // Quick summarization of each space into itself, to see how much is live.
1597   summarize_spaces_quick();
1598 
1599   log_develop_trace(gc, compaction, phases)("summary phase:  after summarizing each space to self");
1600   NOT_PRODUCT(print_region_ranges());
1601   NOT_PRODUCT(print_initial_summary_data(_summary_data, _space_info));
1602 
1603   // The amount of live data that will end up in old space (assuming it fits).
1604   size_t old_space_total_live = 0;
1605   for (unsigned int id = old_space_id; id < last_space_id; ++id) {
1606     old_space_total_live += pointer_delta(_space_info[id].new_top(),
1607                                           _space_info[id].space()->bottom());
1608   }
1609 
1610   MutableSpace* const old_space = _space_info[old_space_id].space();
1611   const size_t old_capacity = old_space->capacity_in_words();
1612   if (old_space_total_live > old_capacity) {
1613     // XXX - should also try to expand
1614     maximum_compaction = true;
1615   }
1616 
1617   // Old generations.
1618   summarize_space(old_space_id, maximum_compaction);
1619 
1620   // Summarize the remaining spaces in the young gen.  The initial target space
1621   // is the old gen.  If a space does not fit entirely into the target, then the
1622   // remainder is compacted into the space itself and that space becomes the new
1623   // target.
1624   SpaceId dst_space_id = old_space_id;
1625   HeapWord* dst_space_end = old_space->end();
1626   HeapWord** new_top_addr = _space_info[dst_space_id].new_top_addr();
1627   for (unsigned int id = eden_space_id; id < last_space_id; ++id) {
1628     const MutableSpace* space = _space_info[id].space();
1629     const size_t live = pointer_delta(_space_info[id].new_top(),
1630                                       space->bottom());
1631     const size_t available = pointer_delta(dst_space_end, *new_top_addr);
1632 
1633     NOT_PRODUCT(summary_phase_msg(dst_space_id, *new_top_addr, dst_space_end,
1634                                   SpaceId(id), space->bottom(), space->top());)
1635     if (live > 0 && live <= available) {
1636       // All the live data will fit.
1637       bool done = _summary_data.summarize(_space_info[id].split_info(),
1638                                           space->bottom(), space->top(),
1639                                           NULL,
1640                                           *new_top_addr, dst_space_end,
1641                                           new_top_addr);
1642       assert(done, "space must fit into old gen");
1643 
1644       // Reset the new_top value for the space.
1645       _space_info[id].set_new_top(space->bottom());
1646     } else if (live > 0) {
1647       // Attempt to fit part of the source space into the target space.
1648       HeapWord* next_src_addr = NULL;
1649       bool done = _summary_data.summarize(_space_info[id].split_info(),
1650                                           space->bottom(), space->top(),
1651                                           &next_src_addr,
1652                                           *new_top_addr, dst_space_end,
1653                                           new_top_addr);
1654       assert(!done, "space should not fit into old gen");
1655       assert(next_src_addr != NULL, "sanity");
1656 
1657       // The source space becomes the new target, so the remainder is compacted
1658       // within the space itself.
1659       dst_space_id = SpaceId(id);
1660       dst_space_end = space->end();
1661       new_top_addr = _space_info[id].new_top_addr();
1662       NOT_PRODUCT(summary_phase_msg(dst_space_id,
1663                                     space->bottom(), dst_space_end,
1664                                     SpaceId(id), next_src_addr, space->top());)
1665       done = _summary_data.summarize(_space_info[id].split_info(),
1666                                      next_src_addr, space->top(),
1667                                      NULL,
1668                                      space->bottom(), dst_space_end,
1669                                      new_top_addr);
1670       assert(done, "space must fit when compacted into itself");
1671       assert(*new_top_addr <= space->top(), "usage should not grow");
1672     }
1673   }
1674 
1675   log_develop_trace(gc, compaction, phases)("Summary_phase:  after final summarization");
1676   NOT_PRODUCT(print_region_ranges());
1677   NOT_PRODUCT(print_initial_summary_data(_summary_data, _space_info));
1678 }
1679 
1680 // This method should contain all heap-specific policy for invoking a full
1681 // collection.  invoke_no_policy() will only attempt to compact the heap; it
1682 // will do nothing further.  If we need to bail out for policy reasons, scavenge
1683 // before full gc, or any other specialized behavior, it needs to be added here.
1684 //
1685 // Note that this method should only be called from the vm_thread while at a
1686 // safepoint.
1687 //
1688 // Note that the all_soft_refs_clear flag in the collector policy
1689 // may be true because this method can be called without intervening
1690 // activity.  For example when the heap space is tight and full measure
1691 // are being taken to free space.
1692 void PSParallelCompact::invoke(bool maximum_heap_compaction) {
1693   assert(SafepointSynchronize::is_at_safepoint(), "should be at safepoint");
1694   assert(Thread::current() == (Thread*)VMThread::vm_thread(),
1695          "should be in vm thread");
1696 
1697   ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
1698   GCCause::Cause gc_cause = heap->gc_cause();
1699   assert(!heap->is_gc_active(), "not reentrant");
1700 
1701   PSAdaptiveSizePolicy* policy = heap->size_policy();
1702   IsGCActiveMark mark;
1703 
1704   if (ScavengeBeforeFullGC) {
1705     PSScavenge::invoke_no_policy();
1706   }
1707 
1708   const bool clear_all_soft_refs =
1709     heap->collector_policy()->should_clear_all_soft_refs();
1710 
1711   PSParallelCompact::invoke_no_policy(clear_all_soft_refs ||
1712                                       maximum_heap_compaction);
1713 }
1714 
1715 // This method contains no policy. You should probably
1716 // be calling invoke() instead.
1717 bool PSParallelCompact::invoke_no_policy(bool maximum_heap_compaction) {
1718   assert(SafepointSynchronize::is_at_safepoint(), "must be at a safepoint");
1719   assert(ref_processor() != NULL, "Sanity");
1720 
1721   if (GCLocker::check_active_before_gc()) {
1722     return false;
1723   }
1724 
1725   ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
1726 
1727   GCIdMark gc_id_mark;
1728   _gc_timer.register_gc_start();
1729   _gc_tracer.report_gc_start(heap->gc_cause(), _gc_timer.gc_start());
1730 
1731   TimeStamp marking_start;
1732   TimeStamp compaction_start;
1733   TimeStamp collection_exit;
1734 
1735   GCCause::Cause gc_cause = heap->gc_cause();
1736   PSYoungGen* young_gen = heap->young_gen();
1737   PSOldGen* old_gen = heap->old_gen();
1738   PSAdaptiveSizePolicy* size_policy = heap->size_policy();
1739 
1740   // The scope of casr should end after code that can change
1741   // CollectorPolicy::_should_clear_all_soft_refs.
1742   ClearedAllSoftRefs casr(maximum_heap_compaction,
1743                           heap->collector_policy());
1744 
1745   if (ZapUnusedHeapArea) {
1746     // Save information needed to minimize mangling
1747     heap->record_gen_tops_before_GC();
1748   }
1749 
1750   // Make sure data structures are sane, make the heap parsable, and do other
1751   // miscellaneous bookkeeping.
1752   pre_compact();
1753 
1754   PreGCValues pre_gc_values(heap);
1755 
1756   // Get the compaction manager reserved for the VM thread.
1757   ParCompactionManager* const vmthread_cm =
1758     ParCompactionManager::manager_array(gc_task_manager()->workers());
1759 
1760   {
1761     ResourceMark rm;
1762     HandleMark hm;
1763 
1764     // Set the number of GC threads to be used in this collection
1765     gc_task_manager()->set_active_gang();
1766     gc_task_manager()->task_idle_workers();
1767 
1768     GCTraceCPUTime tcpu;
1769     GCTraceTime(Info, gc) tm("Pause Full", NULL, gc_cause, true);
1770 
1771     heap->pre_full_gc_dump(&_gc_timer);
1772 
1773     TraceCollectorStats tcs(counters());
1774     TraceMemoryManagerStats tms(true /* Full GC */,gc_cause);
1775 
1776     if (TraceOldGenTime) accumulated_time()->start();
1777 
1778     // Let the size policy know we're starting
1779     size_policy->major_collection_begin();
1780 
1781     CodeCache::gc_prologue();
1782 
1783 #if defined(COMPILER2) || INCLUDE_JVMCI
1784     DerivedPointerTable::clear();
1785 #endif
1786 
1787     ref_processor()->enable_discovery();
1788     ref_processor()->setup_policy(maximum_heap_compaction);
1789 
1790     bool marked_for_unloading = false;
1791 
1792     marking_start.update();
1793     marking_phase(vmthread_cm, maximum_heap_compaction, &_gc_tracer);
1794 
1795     bool max_on_system_gc = UseMaximumCompactionOnSystemGC
1796       && GCCause::is_user_requested_gc(gc_cause);
1797     summary_phase(vmthread_cm, maximum_heap_compaction || max_on_system_gc);
1798 
1799 #if defined(COMPILER2) || INCLUDE_JVMCI
1800     assert(DerivedPointerTable::is_active(), "Sanity");
1801     DerivedPointerTable::set_active(false);
1802 #endif
1803 
1804     // adjust_roots() updates Universe::_intArrayKlassObj which is
1805     // needed by the compaction for filling holes in the dense prefix.
1806     adjust_roots(vmthread_cm);
1807 
1808     compaction_start.update();
1809     compact();
1810 
1811     // Reset the mark bitmap, summary data, and do other bookkeeping.  Must be
1812     // done before resizing.
1813     post_compact();
1814 
1815     // Let the size policy know we're done
1816     size_policy->major_collection_end(old_gen->used_in_bytes(), gc_cause);
1817 
1818     if (UseAdaptiveSizePolicy) {
1819       log_debug(gc, ergo)("AdaptiveSizeStart: collection: %d ", heap->total_collections());
1820       log_trace(gc, ergo)("old_gen_capacity: " SIZE_FORMAT " young_gen_capacity: " SIZE_FORMAT,
1821                           old_gen->capacity_in_bytes(), young_gen->capacity_in_bytes());
1822 
1823       // Don't check if the size_policy is ready here.  Let
1824       // the size_policy check that internally.
1825       if (UseAdaptiveGenerationSizePolicyAtMajorCollection &&
1826           AdaptiveSizePolicy::should_update_promo_stats(gc_cause)) {
1827         // Swap the survivor spaces if from_space is empty. The
1828         // resize_young_gen() called below is normally used after
1829         // a successful young GC and swapping of survivor spaces;
1830         // otherwise, it will fail to resize the young gen with
1831         // the current implementation.
1832         if (young_gen->from_space()->is_empty()) {
1833           young_gen->from_space()->clear(SpaceDecorator::Mangle);
1834           young_gen->swap_spaces();
1835         }
1836 
1837         // Calculate optimal free space amounts
1838         assert(young_gen->max_size() >
1839           young_gen->from_space()->capacity_in_bytes() +
1840           young_gen->to_space()->capacity_in_bytes(),
1841           "Sizes of space in young gen are out-of-bounds");
1842 
1843         size_t young_live = young_gen->used_in_bytes();
1844         size_t eden_live = young_gen->eden_space()->used_in_bytes();
1845         size_t old_live = old_gen->used_in_bytes();
1846         size_t cur_eden = young_gen->eden_space()->capacity_in_bytes();
1847         size_t max_old_gen_size = old_gen->max_gen_size();
1848         size_t max_eden_size = young_gen->max_size() -
1849           young_gen->from_space()->capacity_in_bytes() -
1850           young_gen->to_space()->capacity_in_bytes();
1851 
1852         // Used for diagnostics
1853         size_policy->clear_generation_free_space_flags();
1854 
1855         size_policy->compute_generations_free_space(young_live,
1856                                                     eden_live,
1857                                                     old_live,
1858                                                     cur_eden,
1859                                                     max_old_gen_size,
1860                                                     max_eden_size,
1861                                                     true /* full gc*/);
1862 
1863         size_policy->check_gc_overhead_limit(young_live,
1864                                              eden_live,
1865                                              max_old_gen_size,
1866                                              max_eden_size,
1867                                              true /* full gc*/,
1868                                              gc_cause,
1869                                              heap->collector_policy());
1870 
1871         size_policy->decay_supplemental_growth(true /* full gc*/);
1872 
1873         heap->resize_old_gen(
1874           size_policy->calculated_old_free_size_in_bytes());
1875 
1876         heap->resize_young_gen(size_policy->calculated_eden_size_in_bytes(),
1877                                size_policy->calculated_survivor_size_in_bytes());
1878       }
1879 
1880       log_debug(gc, ergo)("AdaptiveSizeStop: collection: %d ", heap->total_collections());
1881     }
1882 
1883     if (UsePerfData) {
1884       PSGCAdaptivePolicyCounters* const counters = heap->gc_policy_counters();
1885       counters->update_counters();
1886       counters->update_old_capacity(old_gen->capacity_in_bytes());
1887       counters->update_young_capacity(young_gen->capacity_in_bytes());
1888     }
1889 
1890     heap->resize_all_tlabs();
1891 
1892     // Resize the metaspace capacity after a collection
1893     MetaspaceGC::compute_new_size();
1894 
1895     if (TraceOldGenTime) {
1896       accumulated_time()->stop();
1897     }
1898 
1899     young_gen->print_used_change(pre_gc_values.young_gen_used());
1900     old_gen->print_used_change(pre_gc_values.old_gen_used());
1901     MetaspaceAux::print_metaspace_change(pre_gc_values.metadata_used());
1902 
1903     // Track memory usage and detect low memory
1904     MemoryService::track_memory_usage();
1905     heap->update_counters();
1906     gc_task_manager()->release_idle_workers();
1907 
1908     heap->post_full_gc_dump(&_gc_timer);
1909   }
1910 
1911 #ifdef ASSERT
1912   for (size_t i = 0; i < ParallelGCThreads + 1; ++i) {
1913     ParCompactionManager* const cm =
1914       ParCompactionManager::manager_array(int(i));
1915     assert(cm->marking_stack()->is_empty(),       "should be empty");
1916     assert(ParCompactionManager::region_list(int(i))->is_empty(), "should be empty");
1917   }
1918 #endif // ASSERT
1919 
1920   if (VerifyAfterGC && heap->total_collections() >= VerifyGCStartAt) {
1921     HandleMark hm;  // Discard invalid handles created during verification
1922     Universe::verify("After GC");
1923   }
1924 
1925   // Re-verify object start arrays
1926   if (VerifyObjectStartArray &&
1927       VerifyAfterGC) {
1928     old_gen->verify_object_start_array();
1929   }
1930 
1931   if (ZapUnusedHeapArea) {
1932     old_gen->object_space()->check_mangled_unused_area_complete();
1933   }
1934 
1935   NOT_PRODUCT(ref_processor()->verify_no_references_recorded());
1936 
1937   collection_exit.update();
1938 
1939   heap->print_heap_after_gc();
1940   heap->trace_heap_after_gc(&_gc_tracer);
1941 
1942   log_debug(gc, task, time)("VM-Thread " JLONG_FORMAT " " JLONG_FORMAT " " JLONG_FORMAT,
1943                          marking_start.ticks(), compaction_start.ticks(),
1944                          collection_exit.ticks());
1945   gc_task_manager()->print_task_time_stamps();
1946 
1947 #ifdef TRACESPINNING
1948   ParallelTaskTerminator::print_termination_counts();
1949 #endif
1950 
1951   AdaptiveSizePolicyOutput::print(size_policy, heap->total_collections());
1952 
1953   _gc_timer.register_gc_end();
1954 
1955   _gc_tracer.report_dense_prefix(dense_prefix(old_space_id));
1956   _gc_tracer.report_gc_end(_gc_timer.gc_end(), _gc_timer.time_partitions());
1957 
1958   return true;
1959 }
1960 
1961 bool PSParallelCompact::absorb_live_data_from_eden(PSAdaptiveSizePolicy* size_policy,
1962                                              PSYoungGen* young_gen,
1963                                              PSOldGen* old_gen) {
1964   MutableSpace* const eden_space = young_gen->eden_space();
1965   assert(!eden_space->is_empty(), "eden must be non-empty");
1966   assert(young_gen->virtual_space()->alignment() ==
1967          old_gen->virtual_space()->alignment(), "alignments do not match");
1968 
1969   if (!(UseAdaptiveSizePolicy && UseAdaptiveGCBoundary)) {
1970     return false;
1971   }
1972 
1973   // Both generations must be completely committed.
1974   if (young_gen->virtual_space()->uncommitted_size() != 0) {
1975     return false;
1976   }
1977   if (old_gen->virtual_space()->uncommitted_size() != 0) {
1978     return false;
1979   }
1980 
1981   // Figure out how much to take from eden.  Include the average amount promoted
1982   // in the total; otherwise the next young gen GC will simply bail out to a
1983   // full GC.
1984   const size_t alignment = old_gen->virtual_space()->alignment();
1985   const size_t eden_used = eden_space->used_in_bytes();
1986   const size_t promoted = (size_t)size_policy->avg_promoted()->padded_average();
1987   const size_t absorb_size = align_size_up(eden_used + promoted, alignment);
1988   const size_t eden_capacity = eden_space->capacity_in_bytes();
1989 
1990   if (absorb_size >= eden_capacity) {
1991     return false; // Must leave some space in eden.
1992   }
1993 
1994   const size_t new_young_size = young_gen->capacity_in_bytes() - absorb_size;
1995   if (new_young_size < young_gen->min_gen_size()) {
1996     return false; // Respect young gen minimum size.
1997   }
1998 
1999   log_trace(heap, ergo)(" absorbing " SIZE_FORMAT "K:  "
2000                         "eden " SIZE_FORMAT "K->" SIZE_FORMAT "K "
2001                         "from " SIZE_FORMAT "K, to " SIZE_FORMAT "K "
2002                         "young_gen " SIZE_FORMAT "K->" SIZE_FORMAT "K ",
2003                         absorb_size / K,
2004                         eden_capacity / K, (eden_capacity - absorb_size) / K,
2005                         young_gen->from_space()->used_in_bytes() / K,
2006                         young_gen->to_space()->used_in_bytes() / K,
2007                         young_gen->capacity_in_bytes() / K, new_young_size / K);
2008 
2009   // Fill the unused part of the old gen.
2010   MutableSpace* const old_space = old_gen->object_space();
2011   HeapWord* const unused_start = old_space->top();
2012   size_t const unused_words = pointer_delta(old_space->end(), unused_start);
2013 
2014   if (unused_words > 0) {
2015     if (unused_words < CollectedHeap::min_fill_size()) {
2016       return false;  // If the old gen cannot be filled, must give up.
2017     }
2018     CollectedHeap::fill_with_objects(unused_start, unused_words);
2019   }
2020 
2021   // Take the live data from eden and set both top and end in the old gen to
2022   // eden top.  (Need to set end because reset_after_change() mangles the region
2023   // from end to virtual_space->high() in debug builds).
2024   HeapWord* const new_top = eden_space->top();
2025   old_gen->virtual_space()->expand_into(young_gen->virtual_space(),
2026                                         absorb_size);
2027   young_gen->reset_after_change();
2028   old_space->set_top(new_top);
2029   old_space->set_end(new_top);
2030   old_gen->reset_after_change();
2031 
2032   // Update the object start array for the filler object and the data from eden.
2033   ObjectStartArray* const start_array = old_gen->start_array();
2034   for (HeapWord* p = unused_start; p < new_top; p += oop(p)->size()) {
2035     start_array->allocate_block(p);
2036   }
2037 
2038   // Could update the promoted average here, but it is not typically updated at
2039   // full GCs and the value to use is unclear.  Something like
2040   //
2041   // cur_promoted_avg + absorb_size / number_of_scavenges_since_last_full_gc.
2042 
2043   size_policy->set_bytes_absorbed_from_eden(absorb_size);
2044   return true;
2045 }
2046 
2047 GCTaskManager* const PSParallelCompact::gc_task_manager() {
2048   assert(ParallelScavengeHeap::gc_task_manager() != NULL,
2049     "shouldn't return NULL");
2050   return ParallelScavengeHeap::gc_task_manager();
2051 }
2052 
2053 void PSParallelCompact::marking_phase(ParCompactionManager* cm,
2054                                       bool maximum_heap_compaction,
2055                                       ParallelOldTracer *gc_tracer) {
2056   // Recursively traverse all live objects and mark them
2057   GCTraceTime(Trace, gc, phases) tm("Marking Phase", &_gc_timer);
2058 
2059   ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
2060   uint parallel_gc_threads = heap->gc_task_manager()->workers();
2061   uint active_gc_threads = heap->gc_task_manager()->active_workers();
2062   TaskQueueSetSuper* qset = ParCompactionManager::region_array();
2063   ParallelTaskTerminator terminator(active_gc_threads, qset);
2064 
2065   ParCompactionManager::MarkAndPushClosure mark_and_push_closure(cm);
2066   ParCompactionManager::FollowStackClosure follow_stack_closure(cm);
2067 
2068   // Need new claim bits before marking starts.
2069   ClassLoaderDataGraph::clear_claimed_marks();
2070 
2071   {
2072     GCTraceTime(Trace, gc, phases) tm("Par Mark", &_gc_timer);
2073 
2074     ParallelScavengeHeap::ParStrongRootsScope psrs;
2075 
2076     GCTaskQueue* q = GCTaskQueue::create();
2077 
2078     q->enqueue(new MarkFromRootsTask(MarkFromRootsTask::universe));
2079     q->enqueue(new MarkFromRootsTask(MarkFromRootsTask::jni_handles));
2080     // We scan the thread roots in parallel
2081     Threads::create_thread_roots_marking_tasks(q);
2082     q->enqueue(new MarkFromRootsTask(MarkFromRootsTask::object_synchronizer));
2083     q->enqueue(new MarkFromRootsTask(MarkFromRootsTask::flat_profiler));
2084     q->enqueue(new MarkFromRootsTask(MarkFromRootsTask::management));
2085     q->enqueue(new MarkFromRootsTask(MarkFromRootsTask::system_dictionary));
2086     q->enqueue(new MarkFromRootsTask(MarkFromRootsTask::class_loader_data));
2087     q->enqueue(new MarkFromRootsTask(MarkFromRootsTask::jvmti));
2088     q->enqueue(new MarkFromRootsTask(MarkFromRootsTask::code_cache));
2089 
2090     if (active_gc_threads > 1) {
2091       for (uint j = 0; j < active_gc_threads; j++) {
2092         q->enqueue(new StealMarkingTask(&terminator));
2093       }
2094     }
2095 
2096     gc_task_manager()->execute_and_wait(q);
2097   }
2098 
2099   // Process reference objects found during marking
2100   {
2101     GCTraceTime(Trace, gc, phases) tm("Reference Processing", &_gc_timer);
2102 
2103     ReferenceProcessorStats stats;
2104     if (ref_processor()->processing_is_mt()) {
2105       RefProcTaskExecutor task_executor;
2106       stats = ref_processor()->process_discovered_references(
2107         is_alive_closure(), &mark_and_push_closure, &follow_stack_closure,
2108         &task_executor, &_gc_timer);
2109     } else {
2110       stats = ref_processor()->process_discovered_references(
2111         is_alive_closure(), &mark_and_push_closure, &follow_stack_closure, NULL,
2112         &_gc_timer);
2113     }
2114 
2115     gc_tracer->report_gc_reference_stats(stats);
2116   }
2117 
2118   GCTraceTime(Trace, gc) tm_m("Class Unloading", &_gc_timer);
2119 
2120   // This is the point where the entire marking should have completed.
2121   assert(cm->marking_stacks_empty(), "Marking should have completed");
2122 
2123   // Follow system dictionary roots and unload classes.
2124   bool purged_class = SystemDictionary::do_unloading(is_alive_closure());
2125 
2126   // Unload nmethods.
2127   CodeCache::do_unloading(is_alive_closure(), purged_class);
2128 
2129   // Prune dead klasses from subklass/sibling/implementor lists.
2130   Klass::clean_weak_klass_links(is_alive_closure());
2131 
2132   // Delete entries for dead interned strings.
2133   StringTable::unlink(is_alive_closure());
2134 
2135   // Clean up unreferenced symbols in symbol table.
2136   SymbolTable::unlink();
2137   _gc_tracer.report_object_count_after_gc(is_alive_closure());
2138 }
2139 
2140 // This should be moved to the shared markSweep code!
2141 class PSAlwaysTrueClosure: public BoolObjectClosure {
2142 public:
2143   bool do_object_b(oop p) { return true; }
2144 };
2145 static PSAlwaysTrueClosure always_true;
2146 
2147 void PSParallelCompact::adjust_roots(ParCompactionManager* cm) {
2148   // Adjust the pointers to reflect the new locations
2149   GCTraceTime(Trace, gc, phases) tm("Adjust Roots", &_gc_timer);
2150 
2151   // Need new claim bits when tracing through and adjusting pointers.
2152   ClassLoaderDataGraph::clear_claimed_marks();
2153 
2154   PSParallelCompact::AdjustPointerClosure oop_closure(cm);
2155   PSParallelCompact::AdjustKlassClosure klass_closure(cm);
2156 
2157   // General strong roots.
2158   Universe::oops_do(&oop_closure);
2159   JNIHandles::oops_do(&oop_closure);   // Global (strong) JNI handles
2160   CLDToOopClosure adjust_from_cld(&oop_closure);
2161   Threads::oops_do(&oop_closure, &adjust_from_cld, NULL);
2162   ObjectSynchronizer::oops_do(&oop_closure);
2163   FlatProfiler::oops_do(&oop_closure);
2164   Management::oops_do(&oop_closure);
2165   JvmtiExport::oops_do(&oop_closure);
2166   SystemDictionary::oops_do(&oop_closure);
2167   ClassLoaderDataGraph::oops_do(&oop_closure, &klass_closure, true);
2168 
2169   // Now adjust pointers in remaining weak roots.  (All of which should
2170   // have been cleared if they pointed to non-surviving objects.)
2171   // Global (weak) JNI handles
2172   JNIHandles::weak_oops_do(&always_true, &oop_closure);
2173 
2174   CodeBlobToOopClosure adjust_from_blobs(&oop_closure, CodeBlobToOopClosure::FixRelocations);
2175   CodeCache::blobs_do(&adjust_from_blobs);
2176   StringTable::oops_do(&oop_closure);
2177   ref_processor()->weak_oops_do(&oop_closure);
2178   // Roots were visited so references into the young gen in roots
2179   // may have been scanned.  Process them also.
2180   // Should the reference processor have a span that excludes
2181   // young gen objects?
2182   PSScavenge::reference_processor()->weak_oops_do(&oop_closure);
2183 }
2184 
2185 // Helper class to print 8 region numbers per line and then print the total at the end.
2186 class FillableRegionLogger : public StackObj {
2187 private:
2188   LogHandle(gc, compaction) log;
2189   static const int LineLength = 8;
2190   size_t _regions[LineLength];
2191   int _next_index;
2192   bool _enabled;
2193   size_t _total_regions;
2194 public:
2195   FillableRegionLogger() : _next_index(0), _total_regions(0), _enabled(log_develop_is_enabled(Trace, gc, compaction)) { }
2196   ~FillableRegionLogger() {
2197     log.trace(SIZE_FORMAT " initially fillable regions", _total_regions);
2198   }
2199 
2200   void print_line() {
2201     if (!_enabled || _next_index == 0) {
2202       return;
2203     }
2204     FormatBuffer<> line("Fillable: ");
2205     for (int i = 0; i < _next_index; i++) {
2206       line.append(" " SIZE_FORMAT_W(7), _regions[i]);
2207     }
2208     log.trace("%s", line.buffer());
2209     _next_index = 0;
2210   }
2211 
2212   void handle(size_t region) {
2213     if (!_enabled) {
2214       return;
2215     }
2216     _regions[_next_index++] = region;
2217     if (_next_index == LineLength) {
2218       print_line();
2219     }
2220     _total_regions++;
2221   }
2222 };
2223 
2224 void PSParallelCompact::enqueue_region_draining_tasks(GCTaskQueue* q,
2225                                                       uint parallel_gc_threads)
2226 {
2227   GCTraceTime(Trace, gc, phases) tm("Drain Task Setup", &_gc_timer);
2228 
2229   // Find the threads that are active
2230   unsigned int which = 0;
2231 
2232   const uint task_count = MAX2(parallel_gc_threads, 1U);
2233   for (uint j = 0; j < task_count; j++) {
2234     q->enqueue(new DrainStacksCompactionTask(j));
2235     ParCompactionManager::verify_region_list_empty(j);
2236     // Set the region stacks variables to "no" region stack values
2237     // so that they will be recognized and needing a region stack
2238     // in the stealing tasks if they do not get one by executing
2239     // a draining stack.
2240     ParCompactionManager* cm = ParCompactionManager::manager_array(j);
2241     cm->set_region_stack(NULL);
2242     cm->set_region_stack_index((uint)max_uintx);
2243   }
2244   ParCompactionManager::reset_recycled_stack_index();
2245 
2246   // Find all regions that are available (can be filled immediately) and
2247   // distribute them to the thread stacks.  The iteration is done in reverse
2248   // order (high to low) so the regions will be removed in ascending order.
2249 
2250   const ParallelCompactData& sd = PSParallelCompact::summary_data();
2251 
2252   // A region index which corresponds to the tasks created above.
2253   // "which" must be 0 <= which < task_count
2254 
2255   which = 0;
2256   // id + 1 is used to test termination so unsigned  can
2257   // be used with an old_space_id == 0.
2258   FillableRegionLogger region_logger;
2259   for (unsigned int id = to_space_id; id + 1 > old_space_id; --id) {
2260     SpaceInfo* const space_info = _space_info + id;
2261     MutableSpace* const space = space_info->space();
2262     HeapWord* const new_top = space_info->new_top();
2263 
2264     const size_t beg_region = sd.addr_to_region_idx(space_info->dense_prefix());
2265     const size_t end_region =
2266       sd.addr_to_region_idx(sd.region_align_up(new_top));
2267 
2268     for (size_t cur = end_region - 1; cur + 1 > beg_region; --cur) {
2269       if (sd.region(cur)->claim_unsafe()) {
2270         ParCompactionManager::region_list_push(which, cur);
2271         region_logger.handle(cur);
2272         // Assign regions to tasks in round-robin fashion.
2273         if (++which == task_count) {
2274           assert(which <= parallel_gc_threads,
2275             "Inconsistent number of workers");
2276           which = 0;
2277         }
2278       }
2279     }
2280     region_logger.print_line();
2281   }
2282 }
2283 
2284 #define PAR_OLD_DENSE_PREFIX_OVER_PARTITIONING 4
2285 
2286 void PSParallelCompact::enqueue_dense_prefix_tasks(GCTaskQueue* q,
2287                                                     uint parallel_gc_threads) {
2288   GCTraceTime(Trace, gc, phases) tm("Dense Prefix Task Setup", &_gc_timer);
2289 
2290   ParallelCompactData& sd = PSParallelCompact::summary_data();
2291 
2292   // Iterate over all the spaces adding tasks for updating
2293   // regions in the dense prefix.  Assume that 1 gc thread
2294   // will work on opening the gaps and the remaining gc threads
2295   // will work on the dense prefix.
2296   unsigned int space_id;
2297   for (space_id = old_space_id; space_id < last_space_id; ++ space_id) {
2298     HeapWord* const dense_prefix_end = _space_info[space_id].dense_prefix();
2299     const MutableSpace* const space = _space_info[space_id].space();
2300 
2301     if (dense_prefix_end == space->bottom()) {
2302       // There is no dense prefix for this space.
2303       continue;
2304     }
2305 
2306     // The dense prefix is before this region.
2307     size_t region_index_end_dense_prefix =
2308         sd.addr_to_region_idx(dense_prefix_end);
2309     RegionData* const dense_prefix_cp =
2310       sd.region(region_index_end_dense_prefix);
2311     assert(dense_prefix_end == space->end() ||
2312            dense_prefix_cp->available() ||
2313            dense_prefix_cp->claimed(),
2314            "The region after the dense prefix should always be ready to fill");
2315 
2316     size_t region_index_start = sd.addr_to_region_idx(space->bottom());
2317 
2318     // Is there dense prefix work?
2319     size_t total_dense_prefix_regions =
2320       region_index_end_dense_prefix - region_index_start;
2321     // How many regions of the dense prefix should be given to
2322     // each thread?
2323     if (total_dense_prefix_regions > 0) {
2324       uint tasks_for_dense_prefix = 1;
2325       if (total_dense_prefix_regions <=
2326           (parallel_gc_threads * PAR_OLD_DENSE_PREFIX_OVER_PARTITIONING)) {
2327         // Don't over partition.  This assumes that
2328         // PAR_OLD_DENSE_PREFIX_OVER_PARTITIONING is a small integer value
2329         // so there are not many regions to process.
2330         tasks_for_dense_prefix = parallel_gc_threads;
2331       } else {
2332         // Over partition
2333         tasks_for_dense_prefix = parallel_gc_threads *
2334           PAR_OLD_DENSE_PREFIX_OVER_PARTITIONING;
2335       }
2336       size_t regions_per_thread = total_dense_prefix_regions /
2337         tasks_for_dense_prefix;
2338       // Give each thread at least 1 region.
2339       if (regions_per_thread == 0) {
2340         regions_per_thread = 1;
2341       }
2342 
2343       for (uint k = 0; k < tasks_for_dense_prefix; k++) {
2344         if (region_index_start >= region_index_end_dense_prefix) {
2345           break;
2346         }
2347         // region_index_end is not processed
2348         size_t region_index_end = MIN2(region_index_start + regions_per_thread,
2349                                        region_index_end_dense_prefix);
2350         q->enqueue(new UpdateDensePrefixTask(SpaceId(space_id),
2351                                              region_index_start,
2352                                              region_index_end));
2353         region_index_start = region_index_end;
2354       }
2355     }
2356     // This gets any part of the dense prefix that did not
2357     // fit evenly.
2358     if (region_index_start < region_index_end_dense_prefix) {
2359       q->enqueue(new UpdateDensePrefixTask(SpaceId(space_id),
2360                                            region_index_start,
2361                                            region_index_end_dense_prefix));
2362     }
2363   }
2364 }
2365 
2366 void PSParallelCompact::enqueue_region_stealing_tasks(
2367                                      GCTaskQueue* q,
2368                                      ParallelTaskTerminator* terminator_ptr,
2369                                      uint parallel_gc_threads) {
2370   GCTraceTime(Trace, gc, phases) tm("Steal Task Setup", &_gc_timer);
2371 
2372   // Once a thread has drained it's stack, it should try to steal regions from
2373   // other threads.
2374   if (parallel_gc_threads > 1) {
2375     for (uint j = 0; j < parallel_gc_threads; j++) {
2376       q->enqueue(new StealRegionCompactionTask(terminator_ptr));
2377     }
2378   }
2379 }
2380 
2381 #ifdef ASSERT
2382 // Write a histogram of the number of times the block table was filled for a
2383 // region.
2384 void PSParallelCompact::write_block_fill_histogram()
2385 {
2386   if (!log_develop_is_enabled(Trace, gc, compaction)) {
2387     return;
2388   }
2389 
2390   LogHandle(gc, compaction) log;
2391   ResourceMark rm;
2392   outputStream* out = log.trace_stream();
2393 
2394   typedef ParallelCompactData::RegionData rd_t;
2395   ParallelCompactData& sd = summary_data();
2396 
2397   for (unsigned int id = old_space_id; id < last_space_id; ++id) {
2398     MutableSpace* const spc = _space_info[id].space();
2399     if (spc->bottom() != spc->top()) {
2400       const rd_t* const beg = sd.addr_to_region_ptr(spc->bottom());
2401       HeapWord* const top_aligned_up = sd.region_align_up(spc->top());
2402       const rd_t* const end = sd.addr_to_region_ptr(top_aligned_up);
2403 
2404       size_t histo[5] = { 0, 0, 0, 0, 0 };
2405       const size_t histo_len = sizeof(histo) / sizeof(size_t);
2406       const size_t region_cnt = pointer_delta(end, beg, sizeof(rd_t));
2407 
2408       for (const rd_t* cur = beg; cur < end; ++cur) {
2409         ++histo[MIN2(cur->blocks_filled_count(), histo_len - 1)];
2410       }
2411       out->print("Block fill histogram: %u %-4s" SIZE_FORMAT_W(5), id, space_names[id], region_cnt);
2412       for (size_t i = 0; i < histo_len; ++i) {
2413         out->print(" " SIZE_FORMAT_W(5) " %5.1f%%",
2414                    histo[i], 100.0 * histo[i] / region_cnt);
2415       }
2416       out->cr();
2417     }
2418   }
2419 }
2420 #endif // #ifdef ASSERT
2421 
2422 void PSParallelCompact::compact() {
2423   GCTraceTime(Trace, gc, phases) tm("Compaction Phase", &_gc_timer);
2424 
2425   ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
2426   PSOldGen* old_gen = heap->old_gen();
2427   old_gen->start_array()->reset();
2428   uint parallel_gc_threads = heap->gc_task_manager()->workers();
2429   uint active_gc_threads = heap->gc_task_manager()->active_workers();
2430   TaskQueueSetSuper* qset = ParCompactionManager::region_array();
2431   ParallelTaskTerminator terminator(active_gc_threads, qset);
2432 
2433   GCTaskQueue* q = GCTaskQueue::create();
2434   enqueue_region_draining_tasks(q, active_gc_threads);
2435   enqueue_dense_prefix_tasks(q, active_gc_threads);
2436   enqueue_region_stealing_tasks(q, &terminator, active_gc_threads);
2437 
2438   {
2439     GCTraceTime(Trace, gc, phases) tm("Par Compact", &_gc_timer);
2440 
2441     gc_task_manager()->execute_and_wait(q);
2442 
2443 #ifdef  ASSERT
2444     // Verify that all regions have been processed before the deferred updates.
2445     for (unsigned int id = old_space_id; id < last_space_id; ++id) {
2446       verify_complete(SpaceId(id));
2447     }
2448 #endif
2449   }
2450 
2451   {
2452     // Update the deferred objects, if any.  Any compaction manager can be used.
2453     GCTraceTime(Trace, gc, phases) tm("Deferred Updates", &_gc_timer);
2454     ParCompactionManager* cm = ParCompactionManager::manager_array(0);
2455     for (unsigned int id = old_space_id; id < last_space_id; ++id) {
2456       update_deferred_objects(cm, SpaceId(id));
2457     }
2458   }
2459 
2460   DEBUG_ONLY(write_block_fill_histogram());
2461 }
2462 
2463 #ifdef  ASSERT
2464 void PSParallelCompact::verify_complete(SpaceId space_id) {
2465   // All Regions between space bottom() to new_top() should be marked as filled
2466   // and all Regions between new_top() and top() should be available (i.e.,
2467   // should have been emptied).
2468   ParallelCompactData& sd = summary_data();
2469   SpaceInfo si = _space_info[space_id];
2470   HeapWord* new_top_addr = sd.region_align_up(si.new_top());
2471   HeapWord* old_top_addr = sd.region_align_up(si.space()->top());
2472   const size_t beg_region = sd.addr_to_region_idx(si.space()->bottom());
2473   const size_t new_top_region = sd.addr_to_region_idx(new_top_addr);
2474   const size_t old_top_region = sd.addr_to_region_idx(old_top_addr);
2475 
2476   bool issued_a_warning = false;
2477 
2478   size_t cur_region;
2479   for (cur_region = beg_region; cur_region < new_top_region; ++cur_region) {
2480     const RegionData* const c = sd.region(cur_region);
2481     if (!c->completed()) {
2482       warning("region " SIZE_FORMAT " not filled:  "
2483               "destination_count=%u",
2484               cur_region, c->destination_count());
2485       issued_a_warning = true;
2486     }
2487   }
2488 
2489   for (cur_region = new_top_region; cur_region < old_top_region; ++cur_region) {
2490     const RegionData* const c = sd.region(cur_region);
2491     if (!c->available()) {
2492       warning("region " SIZE_FORMAT " not empty:   "
2493               "destination_count=%u",
2494               cur_region, c->destination_count());
2495       issued_a_warning = true;
2496     }
2497   }
2498 
2499   if (issued_a_warning) {
2500     print_region_ranges();
2501   }
2502 }
2503 #endif  // #ifdef ASSERT
2504 
2505 inline void UpdateOnlyClosure::do_addr(HeapWord* addr) {
2506   _start_array->allocate_block(addr);
2507   compaction_manager()->update_contents(oop(addr));
2508 }
2509 
2510 // Update interior oops in the ranges of regions [beg_region, end_region).
2511 void
2512 PSParallelCompact::update_and_deadwood_in_dense_prefix(ParCompactionManager* cm,
2513                                                        SpaceId space_id,
2514                                                        size_t beg_region,
2515                                                        size_t end_region) {
2516   ParallelCompactData& sd = summary_data();
2517   ParMarkBitMap* const mbm = mark_bitmap();
2518 
2519   HeapWord* beg_addr = sd.region_to_addr(beg_region);
2520   HeapWord* const end_addr = sd.region_to_addr(end_region);
2521   assert(beg_region <= end_region, "bad region range");
2522   assert(end_addr <= dense_prefix(space_id), "not in the dense prefix");
2523 
2524 #ifdef  ASSERT
2525   // Claim the regions to avoid triggering an assert when they are marked as
2526   // filled.
2527   for (size_t claim_region = beg_region; claim_region < end_region; ++claim_region) {
2528     assert(sd.region(claim_region)->claim_unsafe(), "claim() failed");
2529   }
2530 #endif  // #ifdef ASSERT
2531 
2532   if (beg_addr != space(space_id)->bottom()) {
2533     // Find the first live object or block of dead space that *starts* in this
2534     // range of regions.  If a partial object crosses onto the region, skip it;
2535     // it will be marked for 'deferred update' when the object head is
2536     // processed.  If dead space crosses onto the region, it is also skipped; it
2537     // will be filled when the prior region is processed.  If neither of those
2538     // apply, the first word in the region is the start of a live object or dead
2539     // space.
2540     assert(beg_addr > space(space_id)->bottom(), "sanity");
2541     const RegionData* const cp = sd.region(beg_region);
2542     if (cp->partial_obj_size() != 0) {
2543       beg_addr = sd.partial_obj_end(beg_region);
2544     } else if (dead_space_crosses_boundary(cp, mbm->addr_to_bit(beg_addr))) {
2545       beg_addr = mbm->find_obj_beg(beg_addr, end_addr);
2546     }
2547   }
2548 
2549   if (beg_addr < end_addr) {
2550     // A live object or block of dead space starts in this range of Regions.
2551      HeapWord* const dense_prefix_end = dense_prefix(space_id);
2552 
2553     // Create closures and iterate.
2554     UpdateOnlyClosure update_closure(mbm, cm, space_id);
2555     FillClosure fill_closure(cm, space_id);
2556     ParMarkBitMap::IterationStatus status;
2557     status = mbm->iterate(&update_closure, &fill_closure, beg_addr, end_addr,
2558                           dense_prefix_end);
2559     if (status == ParMarkBitMap::incomplete) {
2560       update_closure.do_addr(update_closure.source());
2561     }
2562   }
2563 
2564   // Mark the regions as filled.
2565   RegionData* const beg_cp = sd.region(beg_region);
2566   RegionData* const end_cp = sd.region(end_region);
2567   for (RegionData* cp = beg_cp; cp < end_cp; ++cp) {
2568     cp->set_completed();
2569   }
2570 }
2571 
2572 // Return the SpaceId for the space containing addr.  If addr is not in the
2573 // heap, last_space_id is returned.  In debug mode it expects the address to be
2574 // in the heap and asserts such.
2575 PSParallelCompact::SpaceId PSParallelCompact::space_id(HeapWord* addr) {
2576   assert(ParallelScavengeHeap::heap()->is_in_reserved(addr), "addr not in the heap");
2577 
2578   for (unsigned int id = old_space_id; id < last_space_id; ++id) {
2579     if (_space_info[id].space()->contains(addr)) {
2580       return SpaceId(id);
2581     }
2582   }
2583 
2584   assert(false, "no space contains the addr");
2585   return last_space_id;
2586 }
2587 
2588 void PSParallelCompact::update_deferred_objects(ParCompactionManager* cm,
2589                                                 SpaceId id) {
2590   assert(id < last_space_id, "bad space id");
2591 
2592   ParallelCompactData& sd = summary_data();
2593   const SpaceInfo* const space_info = _space_info + id;
2594   ObjectStartArray* const start_array = space_info->start_array();
2595 
2596   const MutableSpace* const space = space_info->space();
2597   assert(space_info->dense_prefix() >= space->bottom(), "dense_prefix not set");
2598   HeapWord* const beg_addr = space_info->dense_prefix();
2599   HeapWord* const end_addr = sd.region_align_up(space_info->new_top());
2600 
2601   const RegionData* const beg_region = sd.addr_to_region_ptr(beg_addr);
2602   const RegionData* const end_region = sd.addr_to_region_ptr(end_addr);
2603   const RegionData* cur_region;
2604   for (cur_region = beg_region; cur_region < end_region; ++cur_region) {
2605     HeapWord* const addr = cur_region->deferred_obj_addr();
2606     if (addr != NULL) {
2607       if (start_array != NULL) {
2608         start_array->allocate_block(addr);
2609       }
2610       cm->update_contents(oop(addr));
2611       assert(oop(addr)->is_oop_or_null(), "Expected an oop or NULL at " PTR_FORMAT, p2i(oop(addr)));
2612     }
2613   }
2614 }
2615 
2616 // Skip over count live words starting from beg, and return the address of the
2617 // next live word.  Unless marked, the word corresponding to beg is assumed to
2618 // be dead.  Callers must either ensure beg does not correspond to the middle of
2619 // an object, or account for those live words in some other way.  Callers must
2620 // also ensure that there are enough live words in the range [beg, end) to skip.
2621 HeapWord*
2622 PSParallelCompact::skip_live_words(HeapWord* beg, HeapWord* end, size_t count)
2623 {
2624   assert(count > 0, "sanity");
2625 
2626   ParMarkBitMap* m = mark_bitmap();
2627   idx_t bits_to_skip = m->words_to_bits(count);
2628   idx_t cur_beg = m->addr_to_bit(beg);
2629   const idx_t search_end = BitMap::word_align_up(m->addr_to_bit(end));
2630 
2631   do {
2632     cur_beg = m->find_obj_beg(cur_beg, search_end);
2633     idx_t cur_end = m->find_obj_end(cur_beg, search_end);
2634     const size_t obj_bits = cur_end - cur_beg + 1;
2635     if (obj_bits > bits_to_skip) {
2636       return m->bit_to_addr(cur_beg + bits_to_skip);
2637     }
2638     bits_to_skip -= obj_bits;
2639     cur_beg = cur_end + 1;
2640   } while (bits_to_skip > 0);
2641 
2642   // Skipping the desired number of words landed just past the end of an object.
2643   // Find the start of the next object.
2644   cur_beg = m->find_obj_beg(cur_beg, search_end);
2645   assert(cur_beg < m->addr_to_bit(end), "not enough live words to skip");
2646   return m->bit_to_addr(cur_beg);
2647 }
2648 
2649 HeapWord* PSParallelCompact::first_src_addr(HeapWord* const dest_addr,
2650                                             SpaceId src_space_id,
2651                                             size_t src_region_idx)
2652 {
2653   assert(summary_data().is_region_aligned(dest_addr), "not aligned");
2654 
2655   const SplitInfo& split_info = _space_info[src_space_id].split_info();
2656   if (split_info.dest_region_addr() == dest_addr) {
2657     // The partial object ending at the split point contains the first word to
2658     // be copied to dest_addr.
2659     return split_info.first_src_addr();
2660   }
2661 
2662   const ParallelCompactData& sd = summary_data();
2663   ParMarkBitMap* const bitmap = mark_bitmap();
2664   const size_t RegionSize = ParallelCompactData::RegionSize;
2665 
2666   assert(sd.is_region_aligned(dest_addr), "not aligned");
2667   const RegionData* const src_region_ptr = sd.region(src_region_idx);
2668   const size_t partial_obj_size = src_region_ptr->partial_obj_size();
2669   HeapWord* const src_region_destination = src_region_ptr->destination();
2670 
2671   assert(dest_addr >= src_region_destination, "wrong src region");
2672   assert(src_region_ptr->data_size() > 0, "src region cannot be empty");
2673 
2674   HeapWord* const src_region_beg = sd.region_to_addr(src_region_idx);
2675   HeapWord* const src_region_end = src_region_beg + RegionSize;
2676 
2677   HeapWord* addr = src_region_beg;
2678   if (dest_addr == src_region_destination) {
2679     // Return the first live word in the source region.
2680     if (partial_obj_size == 0) {
2681       addr = bitmap->find_obj_beg(addr, src_region_end);
2682       assert(addr < src_region_end, "no objects start in src region");
2683     }
2684     return addr;
2685   }
2686 
2687   // Must skip some live data.
2688   size_t words_to_skip = dest_addr - src_region_destination;
2689   assert(src_region_ptr->data_size() > words_to_skip, "wrong src region");
2690 
2691   if (partial_obj_size >= words_to_skip) {
2692     // All the live words to skip are part of the partial object.
2693     addr += words_to_skip;
2694     if (partial_obj_size == words_to_skip) {
2695       // Find the first live word past the partial object.
2696       addr = bitmap->find_obj_beg(addr, src_region_end);
2697       assert(addr < src_region_end, "wrong src region");
2698     }
2699     return addr;
2700   }
2701 
2702   // Skip over the partial object (if any).
2703   if (partial_obj_size != 0) {
2704     words_to_skip -= partial_obj_size;
2705     addr += partial_obj_size;
2706   }
2707 
2708   // Skip over live words due to objects that start in the region.
2709   addr = skip_live_words(addr, src_region_end, words_to_skip);
2710   assert(addr < src_region_end, "wrong src region");
2711   return addr;
2712 }
2713 
2714 void PSParallelCompact::decrement_destination_counts(ParCompactionManager* cm,
2715                                                      SpaceId src_space_id,
2716                                                      size_t beg_region,
2717                                                      HeapWord* end_addr)
2718 {
2719   ParallelCompactData& sd = summary_data();
2720 
2721 #ifdef ASSERT
2722   MutableSpace* const src_space = _space_info[src_space_id].space();
2723   HeapWord* const beg_addr = sd.region_to_addr(beg_region);
2724   assert(src_space->contains(beg_addr) || beg_addr == src_space->end(),
2725          "src_space_id does not match beg_addr");
2726   assert(src_space->contains(end_addr) || end_addr == src_space->end(),
2727          "src_space_id does not match end_addr");
2728 #endif // #ifdef ASSERT
2729 
2730   RegionData* const beg = sd.region(beg_region);
2731   RegionData* const end = sd.addr_to_region_ptr(sd.region_align_up(end_addr));
2732 
2733   // Regions up to new_top() are enqueued if they become available.
2734   HeapWord* const new_top = _space_info[src_space_id].new_top();
2735   RegionData* const enqueue_end =
2736     sd.addr_to_region_ptr(sd.region_align_up(new_top));
2737 
2738   for (RegionData* cur = beg; cur < end; ++cur) {
2739     assert(cur->data_size() > 0, "region must have live data");
2740     cur->decrement_destination_count();
2741     if (cur < enqueue_end && cur->available() && cur->claim()) {
2742       cm->push_region(sd.region(cur));
2743     }
2744   }
2745 }
2746 
2747 size_t PSParallelCompact::next_src_region(MoveAndUpdateClosure& closure,
2748                                           SpaceId& src_space_id,
2749                                           HeapWord*& src_space_top,
2750                                           HeapWord* end_addr)
2751 {
2752   typedef ParallelCompactData::RegionData RegionData;
2753 
2754   ParallelCompactData& sd = PSParallelCompact::summary_data();
2755   const size_t region_size = ParallelCompactData::RegionSize;
2756 
2757   size_t src_region_idx = 0;
2758 
2759   // Skip empty regions (if any) up to the top of the space.
2760   HeapWord* const src_aligned_up = sd.region_align_up(end_addr);
2761   RegionData* src_region_ptr = sd.addr_to_region_ptr(src_aligned_up);
2762   HeapWord* const top_aligned_up = sd.region_align_up(src_space_top);
2763   const RegionData* const top_region_ptr =
2764     sd.addr_to_region_ptr(top_aligned_up);
2765   while (src_region_ptr < top_region_ptr && src_region_ptr->data_size() == 0) {
2766     ++src_region_ptr;
2767   }
2768 
2769   if (src_region_ptr < top_region_ptr) {
2770     // The next source region is in the current space.  Update src_region_idx
2771     // and the source address to match src_region_ptr.
2772     src_region_idx = sd.region(src_region_ptr);
2773     HeapWord* const src_region_addr = sd.region_to_addr(src_region_idx);
2774     if (src_region_addr > closure.source()) {
2775       closure.set_source(src_region_addr);
2776     }
2777     return src_region_idx;
2778   }
2779 
2780   // Switch to a new source space and find the first non-empty region.
2781   unsigned int space_id = src_space_id + 1;
2782   assert(space_id < last_space_id, "not enough spaces");
2783 
2784   HeapWord* const destination = closure.destination();
2785 
2786   do {
2787     MutableSpace* space = _space_info[space_id].space();
2788     HeapWord* const bottom = space->bottom();
2789     const RegionData* const bottom_cp = sd.addr_to_region_ptr(bottom);
2790 
2791     // Iterate over the spaces that do not compact into themselves.
2792     if (bottom_cp->destination() != bottom) {
2793       HeapWord* const top_aligned_up = sd.region_align_up(space->top());
2794       const RegionData* const top_cp = sd.addr_to_region_ptr(top_aligned_up);
2795 
2796       for (const RegionData* src_cp = bottom_cp; src_cp < top_cp; ++src_cp) {
2797         if (src_cp->live_obj_size() > 0) {
2798           // Found it.
2799           assert(src_cp->destination() == destination,
2800                  "first live obj in the space must match the destination");
2801           assert(src_cp->partial_obj_size() == 0,
2802                  "a space cannot begin with a partial obj");
2803 
2804           src_space_id = SpaceId(space_id);
2805           src_space_top = space->top();
2806           const size_t src_region_idx = sd.region(src_cp);
2807           closure.set_source(sd.region_to_addr(src_region_idx));
2808           return src_region_idx;
2809         } else {
2810           assert(src_cp->data_size() == 0, "sanity");
2811         }
2812       }
2813     }
2814   } while (++space_id < last_space_id);
2815 
2816   assert(false, "no source region was found");
2817   return 0;
2818 }
2819 
2820 void PSParallelCompact::fill_region(ParCompactionManager* cm, size_t region_idx)
2821 {
2822   typedef ParMarkBitMap::IterationStatus IterationStatus;
2823   const size_t RegionSize = ParallelCompactData::RegionSize;
2824   ParMarkBitMap* const bitmap = mark_bitmap();
2825   ParallelCompactData& sd = summary_data();
2826   RegionData* const region_ptr = sd.region(region_idx);
2827 
2828   // Get the items needed to construct the closure.
2829   HeapWord* dest_addr = sd.region_to_addr(region_idx);
2830   SpaceId dest_space_id = space_id(dest_addr);
2831   ObjectStartArray* start_array = _space_info[dest_space_id].start_array();
2832   HeapWord* new_top = _space_info[dest_space_id].new_top();
2833   assert(dest_addr < new_top, "sanity");
2834   const size_t words = MIN2(pointer_delta(new_top, dest_addr), RegionSize);
2835 
2836   // Get the source region and related info.
2837   size_t src_region_idx = region_ptr->source_region();
2838   SpaceId src_space_id = space_id(sd.region_to_addr(src_region_idx));
2839   HeapWord* src_space_top = _space_info[src_space_id].space()->top();
2840 
2841   MoveAndUpdateClosure closure(bitmap, cm, start_array, dest_addr, words);
2842   closure.set_source(first_src_addr(dest_addr, src_space_id, src_region_idx));
2843 
2844   // Adjust src_region_idx to prepare for decrementing destination counts (the
2845   // destination count is not decremented when a region is copied to itself).
2846   if (src_region_idx == region_idx) {
2847     src_region_idx += 1;
2848   }
2849 
2850   if (bitmap->is_unmarked(closure.source())) {
2851     // The first source word is in the middle of an object; copy the remainder
2852     // of the object or as much as will fit.  The fact that pointer updates were
2853     // deferred will be noted when the object header is processed.
2854     HeapWord* const old_src_addr = closure.source();
2855     closure.copy_partial_obj();
2856     if (closure.is_full()) {
2857       decrement_destination_counts(cm, src_space_id, src_region_idx,
2858                                    closure.source());
2859       region_ptr->set_deferred_obj_addr(NULL);
2860       region_ptr->set_completed();
2861       return;
2862     }
2863 
2864     HeapWord* const end_addr = sd.region_align_down(closure.source());
2865     if (sd.region_align_down(old_src_addr) != end_addr) {
2866       // The partial object was copied from more than one source region.
2867       decrement_destination_counts(cm, src_space_id, src_region_idx, end_addr);
2868 
2869       // Move to the next source region, possibly switching spaces as well.  All
2870       // args except end_addr may be modified.
2871       src_region_idx = next_src_region(closure, src_space_id, src_space_top,
2872                                        end_addr);
2873     }
2874   }
2875 
2876   do {
2877     HeapWord* const cur_addr = closure.source();
2878     HeapWord* const end_addr = MIN2(sd.region_align_up(cur_addr + 1),
2879                                     src_space_top);
2880     IterationStatus status = bitmap->iterate(&closure, cur_addr, end_addr);
2881 
2882     if (status == ParMarkBitMap::incomplete) {
2883       // The last obj that starts in the source region does not end in the
2884       // region.
2885       assert(closure.source() < end_addr, "sanity");
2886       HeapWord* const obj_beg = closure.source();
2887       HeapWord* const range_end = MIN2(obj_beg + closure.words_remaining(),
2888                                        src_space_top);
2889       HeapWord* const obj_end = bitmap->find_obj_end(obj_beg, range_end);
2890       if (obj_end < range_end) {
2891         // The end was found; the entire object will fit.
2892         status = closure.do_addr(obj_beg, bitmap->obj_size(obj_beg, obj_end));
2893         assert(status != ParMarkBitMap::would_overflow, "sanity");
2894       } else {
2895         // The end was not found; the object will not fit.
2896         assert(range_end < src_space_top, "obj cannot cross space boundary");
2897         status = ParMarkBitMap::would_overflow;
2898       }
2899     }
2900 
2901     if (status == ParMarkBitMap::would_overflow) {
2902       // The last object did not fit.  Note that interior oop updates were
2903       // deferred, then copy enough of the object to fill the region.
2904       region_ptr->set_deferred_obj_addr(closure.destination());
2905       status = closure.copy_until_full(); // copies from closure.source()
2906 
2907       decrement_destination_counts(cm, src_space_id, src_region_idx,
2908                                    closure.source());
2909       region_ptr->set_completed();
2910       return;
2911     }
2912 
2913     if (status == ParMarkBitMap::full) {
2914       decrement_destination_counts(cm, src_space_id, src_region_idx,
2915                                    closure.source());
2916       region_ptr->set_deferred_obj_addr(NULL);
2917       region_ptr->set_completed();
2918       return;
2919     }
2920 
2921     decrement_destination_counts(cm, src_space_id, src_region_idx, end_addr);
2922 
2923     // Move to the next source region, possibly switching spaces as well.  All
2924     // args except end_addr may be modified.
2925     src_region_idx = next_src_region(closure, src_space_id, src_space_top,
2926                                      end_addr);
2927   } while (true);
2928 }
2929 
2930 void PSParallelCompact::fill_blocks(size_t region_idx)
2931 {
2932   // Fill in the block table elements for the specified region.  Each block
2933   // table element holds the number of live words in the region that are to the
2934   // left of the first object that starts in the block.  Thus only blocks in
2935   // which an object starts need to be filled.
2936   //
2937   // The algorithm scans the section of the bitmap that corresponds to the
2938   // region, keeping a running total of the live words.  When an object start is
2939   // found, if it's the first to start in the block that contains it, the
2940   // current total is written to the block table element.
2941   const size_t Log2BlockSize = ParallelCompactData::Log2BlockSize;
2942   const size_t Log2RegionSize = ParallelCompactData::Log2RegionSize;
2943   const size_t RegionSize = ParallelCompactData::RegionSize;
2944 
2945   ParallelCompactData& sd = summary_data();
2946   const size_t partial_obj_size = sd.region(region_idx)->partial_obj_size();
2947   if (partial_obj_size >= RegionSize) {
2948     return; // No objects start in this region.
2949   }
2950 
2951   // Ensure the first loop iteration decides that the block has changed.
2952   size_t cur_block = sd.block_count();
2953 
2954   const ParMarkBitMap* const bitmap = mark_bitmap();
2955 
2956   const size_t Log2BitsPerBlock = Log2BlockSize - LogMinObjAlignment;
2957   assert((size_t)1 << Log2BitsPerBlock ==
2958          bitmap->words_to_bits(ParallelCompactData::BlockSize), "sanity");
2959 
2960   size_t beg_bit = bitmap->words_to_bits(region_idx << Log2RegionSize);
2961   const size_t range_end = beg_bit + bitmap->words_to_bits(RegionSize);
2962   size_t live_bits = bitmap->words_to_bits(partial_obj_size);
2963   beg_bit = bitmap->find_obj_beg(beg_bit + live_bits, range_end);
2964   while (beg_bit < range_end) {
2965     const size_t new_block = beg_bit >> Log2BitsPerBlock;
2966     if (new_block != cur_block) {
2967       cur_block = new_block;
2968       sd.block(cur_block)->set_offset(bitmap->bits_to_words(live_bits));
2969     }
2970 
2971     const size_t end_bit = bitmap->find_obj_end(beg_bit, range_end);
2972     if (end_bit < range_end - 1) {
2973       live_bits += end_bit - beg_bit + 1;
2974       beg_bit = bitmap->find_obj_beg(end_bit + 1, range_end);
2975     } else {
2976       return;
2977     }
2978   }
2979 }
2980 
2981 void
2982 PSParallelCompact::move_and_update(ParCompactionManager* cm, SpaceId space_id) {
2983   const MutableSpace* sp = space(space_id);
2984   if (sp->is_empty()) {
2985     return;
2986   }
2987 
2988   ParallelCompactData& sd = PSParallelCompact::summary_data();
2989   ParMarkBitMap* const bitmap = mark_bitmap();
2990   HeapWord* const dp_addr = dense_prefix(space_id);
2991   HeapWord* beg_addr = sp->bottom();
2992   HeapWord* end_addr = sp->top();
2993 
2994   assert(beg_addr <= dp_addr && dp_addr <= end_addr, "bad dense prefix");
2995 
2996   const size_t beg_region = sd.addr_to_region_idx(beg_addr);
2997   const size_t dp_region = sd.addr_to_region_idx(dp_addr);
2998   if (beg_region < dp_region) {
2999     update_and_deadwood_in_dense_prefix(cm, space_id, beg_region, dp_region);
3000   }
3001 
3002   // The destination of the first live object that starts in the region is one
3003   // past the end of the partial object entering the region (if any).
3004   HeapWord* const dest_addr = sd.partial_obj_end(dp_region);
3005   HeapWord* const new_top = _space_info[space_id].new_top();
3006   assert(new_top >= dest_addr, "bad new_top value");
3007   const size_t words = pointer_delta(new_top, dest_addr);
3008 
3009   if (words > 0) {
3010     ObjectStartArray* start_array = _space_info[space_id].start_array();
3011     MoveAndUpdateClosure closure(bitmap, cm, start_array, dest_addr, words);
3012 
3013     ParMarkBitMap::IterationStatus status;
3014     status = bitmap->iterate(&closure, dest_addr, end_addr);
3015     assert(status == ParMarkBitMap::full, "iteration not complete");
3016     assert(bitmap->find_obj_beg(closure.source(), end_addr) == end_addr,
3017            "live objects skipped because closure is full");
3018   }
3019 }
3020 
3021 jlong PSParallelCompact::millis_since_last_gc() {
3022   // We need a monotonically non-decreasing time in ms but
3023   // os::javaTimeMillis() does not guarantee monotonicity.
3024   jlong now = os::javaTimeNanos() / NANOSECS_PER_MILLISEC;
3025   jlong ret_val = now - _time_of_last_gc;
3026   // XXX See note in genCollectedHeap::millis_since_last_gc().
3027   if (ret_val < 0) {
3028     NOT_PRODUCT(warning("time warp: " JLONG_FORMAT, ret_val);)
3029     return 0;
3030   }
3031   return ret_val;
3032 }
3033 
3034 void PSParallelCompact::reset_millis_since_last_gc() {
3035   // We need a monotonically non-decreasing time in ms but
3036   // os::javaTimeMillis() does not guarantee monotonicity.
3037   _time_of_last_gc = os::javaTimeNanos() / NANOSECS_PER_MILLISEC;
3038 }
3039 
3040 ParMarkBitMap::IterationStatus MoveAndUpdateClosure::copy_until_full()
3041 {
3042   if (source() != destination()) {
3043     DEBUG_ONLY(PSParallelCompact::check_new_location(source(), destination());)
3044     Copy::aligned_conjoint_words(source(), destination(), words_remaining());
3045   }
3046   update_state(words_remaining());
3047   assert(is_full(), "sanity");
3048   return ParMarkBitMap::full;
3049 }
3050 
3051 void MoveAndUpdateClosure::copy_partial_obj()
3052 {
3053   size_t words = words_remaining();
3054 
3055   HeapWord* const range_end = MIN2(source() + words, bitmap()->region_end());
3056   HeapWord* const end_addr = bitmap()->find_obj_end(source(), range_end);
3057   if (end_addr < range_end) {
3058     words = bitmap()->obj_size(source(), end_addr);
3059   }
3060 
3061   // This test is necessary; if omitted, the pointer updates to a partial object
3062   // that crosses the dense prefix boundary could be overwritten.
3063   if (source() != destination()) {
3064     DEBUG_ONLY(PSParallelCompact::check_new_location(source(), destination());)
3065     Copy::aligned_conjoint_words(source(), destination(), words);
3066   }
3067   update_state(words);
3068 }
3069 
3070 void InstanceKlass::oop_pc_update_pointers(oop obj, ParCompactionManager* cm) {
3071   PSParallelCompact::AdjustPointerClosure closure(cm);
3072   oop_oop_iterate_oop_maps<true>(obj, &closure);
3073 }
3074 
3075 void InstanceMirrorKlass::oop_pc_update_pointers(oop obj, ParCompactionManager* cm) {
3076   InstanceKlass::oop_pc_update_pointers(obj, cm);
3077 
3078   PSParallelCompact::AdjustPointerClosure closure(cm);
3079   oop_oop_iterate_statics<true>(obj, &closure);
3080 }
3081 
3082 void InstanceClassLoaderKlass::oop_pc_update_pointers(oop obj, ParCompactionManager* cm) {
3083   InstanceKlass::oop_pc_update_pointers(obj, cm);
3084 }
3085 
3086 #ifdef ASSERT
3087 template <class T> static void trace_reference_gc(const char *s, oop obj,
3088                                                   T* referent_addr,
3089                                                   T* next_addr,
3090                                                   T* discovered_addr) {
3091   log_develop_trace(gc, ref)("%s obj " PTR_FORMAT, s, p2i(obj));
3092   log_develop_trace(gc, ref)("     referent_addr/* " PTR_FORMAT " / " PTR_FORMAT,
3093                              p2i(referent_addr), referent_addr ? p2i(oopDesc::load_decode_heap_oop(referent_addr)) : NULL);
3094   log_develop_trace(gc, ref)("     next_addr/* " PTR_FORMAT " / " PTR_FORMAT,
3095                              p2i(next_addr), next_addr ? p2i(oopDesc::load_decode_heap_oop(next_addr)) : NULL);
3096   log_develop_trace(gc, ref)("     discovered_addr/* " PTR_FORMAT " / " PTR_FORMAT,
3097                              p2i(discovered_addr), discovered_addr ? p2i(oopDesc::load_decode_heap_oop(discovered_addr)) : NULL);
3098 }
3099 #endif
3100 
3101 template <class T>
3102 static void oop_pc_update_pointers_specialized(oop obj, ParCompactionManager* cm) {
3103   T* referent_addr = (T*)java_lang_ref_Reference::referent_addr(obj);
3104   PSParallelCompact::adjust_pointer(referent_addr, cm);
3105   T* next_addr = (T*)java_lang_ref_Reference::next_addr(obj);
3106   PSParallelCompact::adjust_pointer(next_addr, cm);
3107   T* discovered_addr = (T*)java_lang_ref_Reference::discovered_addr(obj);
3108   PSParallelCompact::adjust_pointer(discovered_addr, cm);
3109   debug_only(trace_reference_gc("InstanceRefKlass::oop_update_ptrs", obj,
3110                                 referent_addr, next_addr, discovered_addr);)
3111 }
3112 
3113 void InstanceRefKlass::oop_pc_update_pointers(oop obj, ParCompactionManager* cm) {
3114   InstanceKlass::oop_pc_update_pointers(obj, cm);
3115 
3116   if (UseCompressedOops) {
3117     oop_pc_update_pointers_specialized<narrowOop>(obj, cm);
3118   } else {
3119     oop_pc_update_pointers_specialized<oop>(obj, cm);
3120   }
3121 }
3122 
3123 void ObjArrayKlass::oop_pc_update_pointers(oop obj, ParCompactionManager* cm) {
3124   assert(obj->is_objArray(), "obj must be obj array");
3125   PSParallelCompact::AdjustPointerClosure closure(cm);
3126   oop_oop_iterate_elements<true>(objArrayOop(obj), &closure);
3127 }
3128 
3129 void TypeArrayKlass::oop_pc_update_pointers(oop obj, ParCompactionManager* cm) {
3130   assert(obj->is_typeArray(),"must be a type array");
3131 }
3132 
3133 void ValueArrayKlass::oop_pc_update_pointers(oop obj, ParCompactionManager* cm) {
3134   assert(obj->is_valueArray(),"must be a value array");
3135   if (contains_oops()) {
3136     Unimplemented();
3137 #if 0
3138     PSParallelCompact::AdjustPointerClosure closure(cm);
3139     oop_oop_iterate_elements<true>(valueArrayOop(obj), &closure);
3140 #endif
3141   }
3142 }
3143 
3144 ParMarkBitMapClosure::IterationStatus
3145 MoveAndUpdateClosure::do_addr(HeapWord* addr, size_t words) {
3146   assert(destination() != NULL, "sanity");
3147   assert(bitmap()->obj_size(addr) == words, "bad size");
3148 
3149   _source = addr;
3150   assert(PSParallelCompact::summary_data().calc_new_pointer(source(), compaction_manager()) ==
3151          destination(), "wrong destination");
3152 
3153   if (words > words_remaining()) {
3154     return ParMarkBitMap::would_overflow;
3155   }
3156 
3157   // The start_array must be updated even if the object is not moving.
3158   if (_start_array != NULL) {
3159     _start_array->allocate_block(destination());
3160   }
3161 
3162   if (destination() != source()) {
3163     DEBUG_ONLY(PSParallelCompact::check_new_location(source(), destination());)
3164     Copy::aligned_conjoint_words(source(), destination(), words);
3165   }
3166 
3167   oop moved_oop = (oop) destination();
3168   compaction_manager()->update_contents(moved_oop);
3169   assert(moved_oop->is_oop_or_null(), "Expected an oop or NULL at " PTR_FORMAT, p2i(moved_oop));
3170 
3171   update_state(words);
3172   assert(destination() == (HeapWord*)moved_oop + moved_oop->size(), "sanity");
3173   return is_full() ? ParMarkBitMap::full : ParMarkBitMap::incomplete;
3174 }
3175 
3176 UpdateOnlyClosure::UpdateOnlyClosure(ParMarkBitMap* mbm,
3177                                      ParCompactionManager* cm,
3178                                      PSParallelCompact::SpaceId space_id) :
3179   ParMarkBitMapClosure(mbm, cm),
3180   _space_id(space_id),
3181   _start_array(PSParallelCompact::start_array(space_id))
3182 {
3183 }
3184 
3185 // Updates the references in the object to their new values.
3186 ParMarkBitMapClosure::IterationStatus
3187 UpdateOnlyClosure::do_addr(HeapWord* addr, size_t words) {
3188   do_addr(addr);
3189   return ParMarkBitMap::incomplete;
3190 }
3191 
3192 ParMarkBitMapClosure::IterationStatus
3193 FillClosure::do_addr(HeapWord* addr, size_t size) {
3194   CollectedHeap::fill_with_objects(addr, size);
3195   HeapWord* const end = addr + size;
3196   do {
3197     _start_array->allocate_block(addr);
3198     addr += oop(addr)->size();
3199   } while (addr < end);
3200   return ParMarkBitMap::incomplete;
3201 }