1 /*
   2  * Copyright (c) 2005, 2016, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "classfile/stringTable.hpp"
  27 #include "classfile/symbolTable.hpp"
  28 #include "classfile/systemDictionary.hpp"
  29 #include "code/codeCache.hpp"
  30 #include "gc/parallel/gcTaskManager.hpp"
  31 #include "gc/parallel/parallelScavengeHeap.inline.hpp"
  32 #include "gc/parallel/pcTasks.hpp"
  33 #include "gc/parallel/psAdaptiveSizePolicy.hpp"
  34 #include "gc/parallel/psCompactionManager.inline.hpp"
  35 #include "gc/parallel/psMarkSweep.hpp"
  36 #include "gc/parallel/psMarkSweepDecorator.hpp"
  37 #include "gc/parallel/psOldGen.hpp"
  38 #include "gc/parallel/psParallelCompact.inline.hpp"
  39 #include "gc/parallel/psPromotionManager.inline.hpp"
  40 #include "gc/parallel/psScavenge.hpp"
  41 #include "gc/parallel/psYoungGen.hpp"
  42 #include "gc/shared/gcCause.hpp"
  43 #include "gc/shared/gcHeapSummary.hpp"
  44 #include "gc/shared/gcId.hpp"
  45 #include "gc/shared/gcLocker.inline.hpp"
  46 #include "gc/shared/gcTimer.hpp"
  47 #include "gc/shared/gcTrace.hpp"
  48 #include "gc/shared/gcTraceTime.inline.hpp"
  49 #include "gc/shared/isGCActiveMark.hpp"
  50 #include "gc/shared/referencePolicy.hpp"
  51 #include "gc/shared/referenceProcessor.hpp"
  52 #include "gc/shared/spaceDecorator.hpp"
  53 #include "logging/log.hpp"
  54 #include "memory/resourceArea.hpp"
  55 #include "oops/instanceKlass.inline.hpp"
  56 #include "oops/instanceMirrorKlass.inline.hpp"
  57 #include "oops/methodData.hpp"
  58 #include "oops/objArrayKlass.inline.hpp"
  59 #include "oops/oop.inline.hpp"
  60 #include "runtime/atomic.inline.hpp"
  61 #include "runtime/fprofiler.hpp"
  62 #include "runtime/safepoint.hpp"
  63 #include "runtime/vmThread.hpp"
  64 #include "services/management.hpp"
  65 #include "services/memTracker.hpp"
  66 #include "services/memoryService.hpp"
  67 #include "utilities/events.hpp"
  68 #include "utilities/stack.inline.hpp"
  69 
  70 #include <math.h>
  71 
  72 // All sizes are in HeapWords.
  73 const size_t ParallelCompactData::Log2RegionSize  = 16; // 64K words
  74 const size_t ParallelCompactData::RegionSize      = (size_t)1 << Log2RegionSize;
  75 const size_t ParallelCompactData::RegionSizeBytes =
  76   RegionSize << LogHeapWordSize;
  77 const size_t ParallelCompactData::RegionSizeOffsetMask = RegionSize - 1;
  78 const size_t ParallelCompactData::RegionAddrOffsetMask = RegionSizeBytes - 1;
  79 const size_t ParallelCompactData::RegionAddrMask       = ~RegionAddrOffsetMask;
  80 
  81 const size_t ParallelCompactData::Log2BlockSize   = 7; // 128 words
  82 const size_t ParallelCompactData::BlockSize       = (size_t)1 << Log2BlockSize;
  83 const size_t ParallelCompactData::BlockSizeBytes  =
  84   BlockSize << LogHeapWordSize;
  85 const size_t ParallelCompactData::BlockSizeOffsetMask = BlockSize - 1;
  86 const size_t ParallelCompactData::BlockAddrOffsetMask = BlockSizeBytes - 1;
  87 const size_t ParallelCompactData::BlockAddrMask       = ~BlockAddrOffsetMask;
  88 
  89 const size_t ParallelCompactData::BlocksPerRegion = RegionSize / BlockSize;
  90 const size_t ParallelCompactData::Log2BlocksPerRegion =
  91   Log2RegionSize - Log2BlockSize;
  92 
  93 const ParallelCompactData::RegionData::region_sz_t
  94 ParallelCompactData::RegionData::dc_shift = 27;
  95 
  96 const ParallelCompactData::RegionData::region_sz_t
  97 ParallelCompactData::RegionData::dc_mask = ~0U << dc_shift;
  98 
  99 const ParallelCompactData::RegionData::region_sz_t
 100 ParallelCompactData::RegionData::dc_one = 0x1U << dc_shift;
 101 
 102 const ParallelCompactData::RegionData::region_sz_t
 103 ParallelCompactData::RegionData::los_mask = ~dc_mask;
 104 
 105 const ParallelCompactData::RegionData::region_sz_t
 106 ParallelCompactData::RegionData::dc_claimed = 0x8U << dc_shift;
 107 
 108 const ParallelCompactData::RegionData::region_sz_t
 109 ParallelCompactData::RegionData::dc_completed = 0xcU << dc_shift;
 110 
 111 SpaceInfo PSParallelCompact::_space_info[PSParallelCompact::last_space_id];
 112 
 113 ReferenceProcessor* PSParallelCompact::_ref_processor = NULL;
 114 
 115 double PSParallelCompact::_dwl_mean;
 116 double PSParallelCompact::_dwl_std_dev;
 117 double PSParallelCompact::_dwl_first_term;
 118 double PSParallelCompact::_dwl_adjustment;
 119 #ifdef  ASSERT
 120 bool   PSParallelCompact::_dwl_initialized = false;
 121 #endif  // #ifdef ASSERT
 122 
 123 void SplitInfo::record(size_t src_region_idx, size_t partial_obj_size,
 124                        HeapWord* destination)
 125 {
 126   assert(src_region_idx != 0, "invalid src_region_idx");
 127   assert(partial_obj_size != 0, "invalid partial_obj_size argument");
 128   assert(destination != NULL, "invalid destination argument");
 129 
 130   _src_region_idx = src_region_idx;
 131   _partial_obj_size = partial_obj_size;
 132   _destination = destination;
 133 
 134   // These fields may not be updated below, so make sure they're clear.
 135   assert(_dest_region_addr == NULL, "should have been cleared");
 136   assert(_first_src_addr == NULL, "should have been cleared");
 137 
 138   // Determine the number of destination regions for the partial object.
 139   HeapWord* const last_word = destination + partial_obj_size - 1;
 140   const ParallelCompactData& sd = PSParallelCompact::summary_data();
 141   HeapWord* const beg_region_addr = sd.region_align_down(destination);
 142   HeapWord* const end_region_addr = sd.region_align_down(last_word);
 143 
 144   if (beg_region_addr == end_region_addr) {
 145     // One destination region.
 146     _destination_count = 1;
 147     if (end_region_addr == destination) {
 148       // The destination falls on a region boundary, thus the first word of the
 149       // partial object will be the first word copied to the destination region.
 150       _dest_region_addr = end_region_addr;
 151       _first_src_addr = sd.region_to_addr(src_region_idx);
 152     }
 153   } else {
 154     // Two destination regions.  When copied, the partial object will cross a
 155     // destination region boundary, so a word somewhere within the partial
 156     // object will be the first word copied to the second destination region.
 157     _destination_count = 2;
 158     _dest_region_addr = end_region_addr;
 159     const size_t ofs = pointer_delta(end_region_addr, destination);
 160     assert(ofs < _partial_obj_size, "sanity");
 161     _first_src_addr = sd.region_to_addr(src_region_idx) + ofs;
 162   }
 163 }
 164 
 165 void SplitInfo::clear()
 166 {
 167   _src_region_idx = 0;
 168   _partial_obj_size = 0;
 169   _destination = NULL;
 170   _destination_count = 0;
 171   _dest_region_addr = NULL;
 172   _first_src_addr = NULL;
 173   assert(!is_valid(), "sanity");
 174 }
 175 
 176 #ifdef  ASSERT
 177 void SplitInfo::verify_clear()
 178 {
 179   assert(_src_region_idx == 0, "not clear");
 180   assert(_partial_obj_size == 0, "not clear");
 181   assert(_destination == NULL, "not clear");
 182   assert(_destination_count == 0, "not clear");
 183   assert(_dest_region_addr == NULL, "not clear");
 184   assert(_first_src_addr == NULL, "not clear");
 185 }
 186 #endif  // #ifdef ASSERT
 187 
 188 
 189 void PSParallelCompact::print_on_error(outputStream* st) {
 190   _mark_bitmap.print_on_error(st);
 191 }
 192 
 193 #ifndef PRODUCT
 194 const char* PSParallelCompact::space_names[] = {
 195   "old ", "eden", "from", "to  "
 196 };
 197 
 198 void PSParallelCompact::print_region_ranges() {
 199   if (!log_develop_is_enabled(Trace, gc, compaction)) {
 200     return;
 201   }
 202   Log(gc, compaction) log;
 203   ResourceMark rm;
 204   Universe::print_on(log.trace_stream());
 205   log.trace("space  bottom     top        end        new_top");
 206   log.trace("------ ---------- ---------- ---------- ----------");
 207 
 208   for (unsigned int id = 0; id < last_space_id; ++id) {
 209     const MutableSpace* space = _space_info[id].space();
 210     log.trace("%u %s "
 211               SIZE_FORMAT_W(10) " " SIZE_FORMAT_W(10) " "
 212               SIZE_FORMAT_W(10) " " SIZE_FORMAT_W(10) " ",
 213               id, space_names[id],
 214               summary_data().addr_to_region_idx(space->bottom()),
 215               summary_data().addr_to_region_idx(space->top()),
 216               summary_data().addr_to_region_idx(space->end()),
 217               summary_data().addr_to_region_idx(_space_info[id].new_top()));
 218   }
 219 }
 220 
 221 void
 222 print_generic_summary_region(size_t i, const ParallelCompactData::RegionData* c)
 223 {
 224 #define REGION_IDX_FORMAT        SIZE_FORMAT_W(7)
 225 #define REGION_DATA_FORMAT       SIZE_FORMAT_W(5)
 226 
 227   ParallelCompactData& sd = PSParallelCompact::summary_data();
 228   size_t dci = c->destination() ? sd.addr_to_region_idx(c->destination()) : 0;
 229   log_develop_trace(gc, compaction)(
 230       REGION_IDX_FORMAT " " PTR_FORMAT " "
 231       REGION_IDX_FORMAT " " PTR_FORMAT " "
 232       REGION_DATA_FORMAT " " REGION_DATA_FORMAT " "
 233       REGION_DATA_FORMAT " " REGION_IDX_FORMAT " %d",
 234       i, p2i(c->data_location()), dci, p2i(c->destination()),
 235       c->partial_obj_size(), c->live_obj_size(),
 236       c->data_size(), c->source_region(), c->destination_count());
 237 
 238 #undef  REGION_IDX_FORMAT
 239 #undef  REGION_DATA_FORMAT
 240 }
 241 
 242 void
 243 print_generic_summary_data(ParallelCompactData& summary_data,
 244                            HeapWord* const beg_addr,
 245                            HeapWord* const end_addr)
 246 {
 247   size_t total_words = 0;
 248   size_t i = summary_data.addr_to_region_idx(beg_addr);
 249   const size_t last = summary_data.addr_to_region_idx(end_addr);
 250   HeapWord* pdest = 0;
 251 
 252   while (i <= last) {
 253     ParallelCompactData::RegionData* c = summary_data.region(i);
 254     if (c->data_size() != 0 || c->destination() != pdest) {
 255       print_generic_summary_region(i, c);
 256       total_words += c->data_size();
 257       pdest = c->destination();
 258     }
 259     ++i;
 260   }
 261 
 262   log_develop_trace(gc, compaction)("summary_data_bytes=" SIZE_FORMAT, total_words * HeapWordSize);
 263 }
 264 
 265 void
 266 print_generic_summary_data(ParallelCompactData& summary_data,
 267                            SpaceInfo* space_info)
 268 {
 269   if (!log_develop_is_enabled(Trace, gc, compaction)) {
 270     return;
 271   }
 272 
 273   for (unsigned int id = 0; id < PSParallelCompact::last_space_id; ++id) {
 274     const MutableSpace* space = space_info[id].space();
 275     print_generic_summary_data(summary_data, space->bottom(),
 276                                MAX2(space->top(), space_info[id].new_top()));
 277   }
 278 }
 279 
 280 void
 281 print_initial_summary_data(ParallelCompactData& summary_data,
 282                            const MutableSpace* space) {
 283   if (space->top() == space->bottom()) {
 284     return;
 285   }
 286 
 287   const size_t region_size = ParallelCompactData::RegionSize;
 288   typedef ParallelCompactData::RegionData RegionData;
 289   HeapWord* const top_aligned_up = summary_data.region_align_up(space->top());
 290   const size_t end_region = summary_data.addr_to_region_idx(top_aligned_up);
 291   const RegionData* c = summary_data.region(end_region - 1);
 292   HeapWord* end_addr = c->destination() + c->data_size();
 293   const size_t live_in_space = pointer_delta(end_addr, space->bottom());
 294 
 295   // Print (and count) the full regions at the beginning of the space.
 296   size_t full_region_count = 0;
 297   size_t i = summary_data.addr_to_region_idx(space->bottom());
 298   while (i < end_region && summary_data.region(i)->data_size() == region_size) {
 299     ParallelCompactData::RegionData* c = summary_data.region(i);
 300     log_develop_trace(gc, compaction)(
 301         SIZE_FORMAT_W(5) " " PTR_FORMAT " " SIZE_FORMAT_W(5) " " SIZE_FORMAT_W(5) " " SIZE_FORMAT_W(5) " " SIZE_FORMAT_W(5) " %d",
 302         i, p2i(c->destination()),
 303         c->partial_obj_size(), c->live_obj_size(),
 304         c->data_size(), c->source_region(), c->destination_count());
 305     ++full_region_count;
 306     ++i;
 307   }
 308 
 309   size_t live_to_right = live_in_space - full_region_count * region_size;
 310 
 311   double max_reclaimed_ratio = 0.0;
 312   size_t max_reclaimed_ratio_region = 0;
 313   size_t max_dead_to_right = 0;
 314   size_t max_live_to_right = 0;
 315 
 316   // Print the 'reclaimed ratio' for regions while there is something live in
 317   // the region or to the right of it.  The remaining regions are empty (and
 318   // uninteresting), and computing the ratio will result in division by 0.
 319   while (i < end_region && live_to_right > 0) {
 320     c = summary_data.region(i);
 321     HeapWord* const region_addr = summary_data.region_to_addr(i);
 322     const size_t used_to_right = pointer_delta(space->top(), region_addr);
 323     const size_t dead_to_right = used_to_right - live_to_right;
 324     const double reclaimed_ratio = double(dead_to_right) / live_to_right;
 325 
 326     if (reclaimed_ratio > max_reclaimed_ratio) {
 327             max_reclaimed_ratio = reclaimed_ratio;
 328             max_reclaimed_ratio_region = i;
 329             max_dead_to_right = dead_to_right;
 330             max_live_to_right = live_to_right;
 331     }
 332 
 333     ParallelCompactData::RegionData* c = summary_data.region(i);
 334     log_develop_trace(gc, compaction)(
 335         SIZE_FORMAT_W(5) " " PTR_FORMAT " " SIZE_FORMAT_W(5) " " SIZE_FORMAT_W(5) " " SIZE_FORMAT_W(5) " " SIZE_FORMAT_W(5) " %d"
 336         "%12.10f " SIZE_FORMAT_W(10) " " SIZE_FORMAT_W(10),
 337         i, p2i(c->destination()),
 338         c->partial_obj_size(), c->live_obj_size(),
 339         c->data_size(), c->source_region(), c->destination_count(),
 340         reclaimed_ratio, dead_to_right, live_to_right);
 341 
 342 
 343     live_to_right -= c->data_size();
 344     ++i;
 345   }
 346 
 347   // Any remaining regions are empty.  Print one more if there is one.
 348   if (i < end_region) {
 349     ParallelCompactData::RegionData* c = summary_data.region(i);
 350     log_develop_trace(gc, compaction)(
 351         SIZE_FORMAT_W(5) " " PTR_FORMAT " " SIZE_FORMAT_W(5) " " SIZE_FORMAT_W(5) " " SIZE_FORMAT_W(5) " " SIZE_FORMAT_W(5) " %d",
 352          i, p2i(c->destination()),
 353          c->partial_obj_size(), c->live_obj_size(),
 354          c->data_size(), c->source_region(), c->destination_count());
 355   }
 356 
 357   log_develop_trace(gc, compaction)("max:  " SIZE_FORMAT_W(4) " d2r=" SIZE_FORMAT_W(10) " l2r=" SIZE_FORMAT_W(10) " max_ratio=%14.12f",
 358                                     max_reclaimed_ratio_region, max_dead_to_right, max_live_to_right, max_reclaimed_ratio);
 359 }
 360 
 361 void
 362 print_initial_summary_data(ParallelCompactData& summary_data,
 363                            SpaceInfo* space_info) {
 364   if (!log_develop_is_enabled(Trace, gc, compaction)) {
 365     return;
 366   }
 367 
 368   unsigned int id = PSParallelCompact::old_space_id;
 369   const MutableSpace* space;
 370   do {
 371     space = space_info[id].space();
 372     print_initial_summary_data(summary_data, space);
 373   } while (++id < PSParallelCompact::eden_space_id);
 374 
 375   do {
 376     space = space_info[id].space();
 377     print_generic_summary_data(summary_data, space->bottom(), space->top());
 378   } while (++id < PSParallelCompact::last_space_id);
 379 }
 380 #endif  // #ifndef PRODUCT
 381 
 382 #ifdef  ASSERT
 383 size_t add_obj_count;
 384 size_t add_obj_size;
 385 size_t mark_bitmap_count;
 386 size_t mark_bitmap_size;
 387 #endif  // #ifdef ASSERT
 388 
 389 ParallelCompactData::ParallelCompactData()
 390 {
 391   _region_start = 0;
 392 
 393   _region_vspace = 0;
 394   _reserved_byte_size = 0;
 395   _region_data = 0;
 396   _region_count = 0;
 397 
 398   _block_vspace = 0;
 399   _block_data = 0;
 400   _block_count = 0;
 401 }
 402 
 403 bool ParallelCompactData::initialize(MemRegion covered_region)
 404 {
 405   _region_start = covered_region.start();
 406   const size_t region_size = covered_region.word_size();
 407   DEBUG_ONLY(_region_end = _region_start + region_size;)
 408 
 409   assert(region_align_down(_region_start) == _region_start,
 410          "region start not aligned");
 411   assert((region_size & RegionSizeOffsetMask) == 0,
 412          "region size not a multiple of RegionSize");
 413 
 414   bool result = initialize_region_data(region_size) && initialize_block_data();
 415   return result;
 416 }
 417 
 418 PSVirtualSpace*
 419 ParallelCompactData::create_vspace(size_t count, size_t element_size)
 420 {
 421   const size_t raw_bytes = count * element_size;
 422   const size_t page_sz = os::page_size_for_region_aligned(raw_bytes, 10);
 423   const size_t granularity = os::vm_allocation_granularity();
 424   _reserved_byte_size = align_size_up(raw_bytes, MAX2(page_sz, granularity));
 425 
 426   const size_t rs_align = page_sz == (size_t) os::vm_page_size() ? 0 :
 427     MAX2(page_sz, granularity);
 428   ReservedSpace rs(_reserved_byte_size, rs_align, rs_align > 0);
 429   os::trace_page_sizes("Parallel Compact Data", raw_bytes, raw_bytes, page_sz, rs.base(),
 430                        rs.size());
 431 
 432   MemTracker::record_virtual_memory_type((address)rs.base(), mtGC);
 433 
 434   PSVirtualSpace* vspace = new PSVirtualSpace(rs, page_sz);
 435   if (vspace != 0) {
 436     if (vspace->expand_by(_reserved_byte_size)) {
 437       return vspace;
 438     }
 439     delete vspace;
 440     // Release memory reserved in the space.
 441     rs.release();
 442   }
 443 
 444   return 0;
 445 }
 446 
 447 bool ParallelCompactData::initialize_region_data(size_t region_size)
 448 {
 449   const size_t count = (region_size + RegionSizeOffsetMask) >> Log2RegionSize;
 450   _region_vspace = create_vspace(count, sizeof(RegionData));
 451   if (_region_vspace != 0) {
 452     _region_data = (RegionData*)_region_vspace->reserved_low_addr();
 453     _region_count = count;
 454     return true;
 455   }
 456   return false;
 457 }
 458 
 459 bool ParallelCompactData::initialize_block_data()
 460 {
 461   assert(_region_count != 0, "region data must be initialized first");
 462   const size_t count = _region_count << Log2BlocksPerRegion;
 463   _block_vspace = create_vspace(count, sizeof(BlockData));
 464   if (_block_vspace != 0) {
 465     _block_data = (BlockData*)_block_vspace->reserved_low_addr();
 466     _block_count = count;
 467     return true;
 468   }
 469   return false;
 470 }
 471 
 472 void ParallelCompactData::clear()
 473 {
 474   memset(_region_data, 0, _region_vspace->committed_size());
 475   memset(_block_data, 0, _block_vspace->committed_size());
 476 }
 477 
 478 void ParallelCompactData::clear_range(size_t beg_region, size_t end_region) {
 479   assert(beg_region <= _region_count, "beg_region out of range");
 480   assert(end_region <= _region_count, "end_region out of range");
 481   assert(RegionSize % BlockSize == 0, "RegionSize not a multiple of BlockSize");
 482 
 483   const size_t region_cnt = end_region - beg_region;
 484   memset(_region_data + beg_region, 0, region_cnt * sizeof(RegionData));
 485 
 486   const size_t beg_block = beg_region * BlocksPerRegion;
 487   const size_t block_cnt = region_cnt * BlocksPerRegion;
 488   memset(_block_data + beg_block, 0, block_cnt * sizeof(BlockData));
 489 }
 490 
 491 HeapWord* ParallelCompactData::partial_obj_end(size_t region_idx) const
 492 {
 493   const RegionData* cur_cp = region(region_idx);
 494   const RegionData* const end_cp = region(region_count() - 1);
 495 
 496   HeapWord* result = region_to_addr(region_idx);
 497   if (cur_cp < end_cp) {
 498     do {
 499       result += cur_cp->partial_obj_size();
 500     } while (cur_cp->partial_obj_size() == RegionSize && ++cur_cp < end_cp);
 501   }
 502   return result;
 503 }
 504 
 505 void ParallelCompactData::add_obj(HeapWord* addr, size_t len)
 506 {
 507   const size_t obj_ofs = pointer_delta(addr, _region_start);
 508   const size_t beg_region = obj_ofs >> Log2RegionSize;
 509   const size_t end_region = (obj_ofs + len - 1) >> Log2RegionSize;
 510 
 511   DEBUG_ONLY(Atomic::inc_ptr(&add_obj_count);)
 512   DEBUG_ONLY(Atomic::add_ptr(len, &add_obj_size);)
 513 
 514   if (beg_region == end_region) {
 515     // All in one region.
 516     _region_data[beg_region].add_live_obj(len);
 517     return;
 518   }
 519 
 520   // First region.
 521   const size_t beg_ofs = region_offset(addr);
 522   _region_data[beg_region].add_live_obj(RegionSize - beg_ofs);
 523 
 524   Klass* klass = ((oop)addr)->klass();
 525   // Middle regions--completely spanned by this object.
 526   for (size_t region = beg_region + 1; region < end_region; ++region) {
 527     _region_data[region].set_partial_obj_size(RegionSize);
 528     _region_data[region].set_partial_obj_addr(addr);
 529   }
 530 
 531   // Last region.
 532   const size_t end_ofs = region_offset(addr + len - 1);
 533   _region_data[end_region].set_partial_obj_size(end_ofs + 1);
 534   _region_data[end_region].set_partial_obj_addr(addr);
 535 }
 536 
 537 void
 538 ParallelCompactData::summarize_dense_prefix(HeapWord* beg, HeapWord* end)
 539 {
 540   assert(region_offset(beg) == 0, "not RegionSize aligned");
 541   assert(region_offset(end) == 0, "not RegionSize aligned");
 542 
 543   size_t cur_region = addr_to_region_idx(beg);
 544   const size_t end_region = addr_to_region_idx(end);
 545   HeapWord* addr = beg;
 546   while (cur_region < end_region) {
 547     _region_data[cur_region].set_destination(addr);
 548     _region_data[cur_region].set_destination_count(0);
 549     _region_data[cur_region].set_source_region(cur_region);
 550     _region_data[cur_region].set_data_location(addr);
 551 
 552     // Update live_obj_size so the region appears completely full.
 553     size_t live_size = RegionSize - _region_data[cur_region].partial_obj_size();
 554     _region_data[cur_region].set_live_obj_size(live_size);
 555 
 556     ++cur_region;
 557     addr += RegionSize;
 558   }
 559 }
 560 
 561 // Find the point at which a space can be split and, if necessary, record the
 562 // split point.
 563 //
 564 // If the current src region (which overflowed the destination space) doesn't
 565 // have a partial object, the split point is at the beginning of the current src
 566 // region (an "easy" split, no extra bookkeeping required).
 567 //
 568 // If the current src region has a partial object, the split point is in the
 569 // region where that partial object starts (call it the split_region).  If
 570 // split_region has a partial object, then the split point is just after that
 571 // partial object (a "hard" split where we have to record the split data and
 572 // zero the partial_obj_size field).  With a "hard" split, we know that the
 573 // partial_obj ends within split_region because the partial object that caused
 574 // the overflow starts in split_region.  If split_region doesn't have a partial
 575 // obj, then the split is at the beginning of split_region (another "easy"
 576 // split).
 577 HeapWord*
 578 ParallelCompactData::summarize_split_space(size_t src_region,
 579                                            SplitInfo& split_info,
 580                                            HeapWord* destination,
 581                                            HeapWord* target_end,
 582                                            HeapWord** target_next)
 583 {
 584   assert(destination <= target_end, "sanity");
 585   assert(destination + _region_data[src_region].data_size() > target_end,
 586     "region should not fit into target space");
 587   assert(is_region_aligned(target_end), "sanity");
 588 
 589   size_t split_region = src_region;
 590   HeapWord* split_destination = destination;
 591   size_t partial_obj_size = _region_data[src_region].partial_obj_size();
 592 
 593   if (destination + partial_obj_size > target_end) {
 594     // The split point is just after the partial object (if any) in the
 595     // src_region that contains the start of the object that overflowed the
 596     // destination space.
 597     //
 598     // Find the start of the "overflow" object and set split_region to the
 599     // region containing it.
 600     HeapWord* const overflow_obj = _region_data[src_region].partial_obj_addr();
 601     split_region = addr_to_region_idx(overflow_obj);
 602 
 603     // Clear the source_region field of all destination regions whose first word
 604     // came from data after the split point (a non-null source_region field
 605     // implies a region must be filled).
 606     //
 607     // An alternative to the simple loop below:  clear during post_compact(),
 608     // which uses memcpy instead of individual stores, and is easy to
 609     // parallelize.  (The downside is that it clears the entire RegionData
 610     // object as opposed to just one field.)
 611     //
 612     // post_compact() would have to clear the summary data up to the highest
 613     // address that was written during the summary phase, which would be
 614     //
 615     //         max(top, max(new_top, clear_top))
 616     //
 617     // where clear_top is a new field in SpaceInfo.  Would have to set clear_top
 618     // to target_end.
 619     const RegionData* const sr = region(split_region);
 620     const size_t beg_idx =
 621       addr_to_region_idx(region_align_up(sr->destination() +
 622                                          sr->partial_obj_size()));
 623     const size_t end_idx = addr_to_region_idx(target_end);
 624 
 625     log_develop_trace(gc, compaction)("split:  clearing source_region field in [" SIZE_FORMAT ", " SIZE_FORMAT ")", beg_idx, end_idx);
 626     for (size_t idx = beg_idx; idx < end_idx; ++idx) {
 627       _region_data[idx].set_source_region(0);
 628     }
 629 
 630     // Set split_destination and partial_obj_size to reflect the split region.
 631     split_destination = sr->destination();
 632     partial_obj_size = sr->partial_obj_size();
 633   }
 634 
 635   // The split is recorded only if a partial object extends onto the region.
 636   if (partial_obj_size != 0) {
 637     _region_data[split_region].set_partial_obj_size(0);
 638     split_info.record(split_region, partial_obj_size, split_destination);
 639   }
 640 
 641   // Setup the continuation addresses.
 642   *target_next = split_destination + partial_obj_size;
 643   HeapWord* const source_next = region_to_addr(split_region) + partial_obj_size;
 644 
 645   if (log_develop_is_enabled(Trace, gc, compaction)) {
 646     const char * split_type = partial_obj_size == 0 ? "easy" : "hard";
 647     log_develop_trace(gc, compaction)("%s split:  src=" PTR_FORMAT " src_c=" SIZE_FORMAT " pos=" SIZE_FORMAT,
 648                                       split_type, p2i(source_next), split_region, partial_obj_size);
 649     log_develop_trace(gc, compaction)("%s split:  dst=" PTR_FORMAT " dst_c=" SIZE_FORMAT " tn=" PTR_FORMAT,
 650                                       split_type, p2i(split_destination),
 651                                       addr_to_region_idx(split_destination),
 652                                       p2i(*target_next));
 653 
 654     if (partial_obj_size != 0) {
 655       HeapWord* const po_beg = split_info.destination();
 656       HeapWord* const po_end = po_beg + split_info.partial_obj_size();
 657       log_develop_trace(gc, compaction)("%s split:  po_beg=" PTR_FORMAT " " SIZE_FORMAT " po_end=" PTR_FORMAT " " SIZE_FORMAT,
 658                                         split_type,
 659                                         p2i(po_beg), addr_to_region_idx(po_beg),
 660                                         p2i(po_end), addr_to_region_idx(po_end));
 661     }
 662   }
 663 
 664   return source_next;
 665 }
 666 
 667 bool ParallelCompactData::summarize(SplitInfo& split_info,
 668                                     HeapWord* source_beg, HeapWord* source_end,
 669                                     HeapWord** source_next,
 670                                     HeapWord* target_beg, HeapWord* target_end,
 671                                     HeapWord** target_next)
 672 {
 673   HeapWord* const source_next_val = source_next == NULL ? NULL : *source_next;
 674   log_develop_trace(gc, compaction)(
 675       "sb=" PTR_FORMAT " se=" PTR_FORMAT " sn=" PTR_FORMAT
 676       "tb=" PTR_FORMAT " te=" PTR_FORMAT " tn=" PTR_FORMAT,
 677       p2i(source_beg), p2i(source_end), p2i(source_next_val),
 678       p2i(target_beg), p2i(target_end), p2i(*target_next));
 679 
 680   size_t cur_region = addr_to_region_idx(source_beg);
 681   const size_t end_region = addr_to_region_idx(region_align_up(source_end));
 682 
 683   HeapWord *dest_addr = target_beg;
 684   while (cur_region < end_region) {
 685     // The destination must be set even if the region has no data.
 686     _region_data[cur_region].set_destination(dest_addr);
 687 
 688     size_t words = _region_data[cur_region].data_size();
 689     if (words > 0) {
 690       // If cur_region does not fit entirely into the target space, find a point
 691       // at which the source space can be 'split' so that part is copied to the
 692       // target space and the rest is copied elsewhere.
 693       if (dest_addr + words > target_end) {
 694         assert(source_next != NULL, "source_next is NULL when splitting");
 695         *source_next = summarize_split_space(cur_region, split_info, dest_addr,
 696                                              target_end, target_next);
 697         return false;
 698       }
 699 
 700       // Compute the destination_count for cur_region, and if necessary, update
 701       // source_region for a destination region.  The source_region field is
 702       // updated if cur_region is the first (left-most) region to be copied to a
 703       // destination region.
 704       //
 705       // The destination_count calculation is a bit subtle.  A region that has
 706       // data that compacts into itself does not count itself as a destination.
 707       // This maintains the invariant that a zero count means the region is
 708       // available and can be claimed and then filled.
 709       uint destination_count = 0;
 710       if (split_info.is_split(cur_region)) {
 711         // The current region has been split:  the partial object will be copied
 712         // to one destination space and the remaining data will be copied to
 713         // another destination space.  Adjust the initial destination_count and,
 714         // if necessary, set the source_region field if the partial object will
 715         // cross a destination region boundary.
 716         destination_count = split_info.destination_count();
 717         if (destination_count == 2) {
 718           size_t dest_idx = addr_to_region_idx(split_info.dest_region_addr());
 719           _region_data[dest_idx].set_source_region(cur_region);
 720         }
 721       }
 722 
 723       HeapWord* const last_addr = dest_addr + words - 1;
 724       const size_t dest_region_1 = addr_to_region_idx(dest_addr);
 725       const size_t dest_region_2 = addr_to_region_idx(last_addr);
 726 
 727       // Initially assume that the destination regions will be the same and
 728       // adjust the value below if necessary.  Under this assumption, if
 729       // cur_region == dest_region_2, then cur_region will be compacted
 730       // completely into itself.
 731       destination_count += cur_region == dest_region_2 ? 0 : 1;
 732       if (dest_region_1 != dest_region_2) {
 733         // Destination regions differ; adjust destination_count.
 734         destination_count += 1;
 735         // Data from cur_region will be copied to the start of dest_region_2.
 736         _region_data[dest_region_2].set_source_region(cur_region);
 737       } else if (region_offset(dest_addr) == 0) {
 738         // Data from cur_region will be copied to the start of the destination
 739         // region.
 740         _region_data[dest_region_1].set_source_region(cur_region);
 741       }
 742 
 743       _region_data[cur_region].set_destination_count(destination_count);
 744       _region_data[cur_region].set_data_location(region_to_addr(cur_region));
 745       dest_addr += words;
 746     }
 747 
 748     ++cur_region;
 749   }
 750 
 751   *target_next = dest_addr;
 752   return true;
 753 }
 754 
 755 HeapWord* ParallelCompactData::calc_new_pointer(HeapWord* addr, ParCompactionManager* cm) {
 756   assert(addr != NULL, "Should detect NULL oop earlier");
 757   assert(ParallelScavengeHeap::heap()->is_in(addr), "not in heap");
 758   assert(PSParallelCompact::mark_bitmap()->is_marked(addr), "not marked");
 759 
 760   // Region covering the object.
 761   RegionData* const region_ptr = addr_to_region_ptr(addr);
 762   HeapWord* result = region_ptr->destination();
 763 
 764   // If the entire Region is live, the new location is region->destination + the
 765   // offset of the object within in the Region.
 766 
 767   // Run some performance tests to determine if this special case pays off.  It
 768   // is worth it for pointers into the dense prefix.  If the optimization to
 769   // avoid pointer updates in regions that only point to the dense prefix is
 770   // ever implemented, this should be revisited.
 771   if (region_ptr->data_size() == RegionSize) {
 772     result += region_offset(addr);
 773     return result;
 774   }
 775 
 776   // Otherwise, the new location is region->destination + block offset + the
 777   // number of live words in the Block that are (a) to the left of addr and (b)
 778   // due to objects that start in the Block.
 779 
 780   // Fill in the block table if necessary.  This is unsynchronized, so multiple
 781   // threads may fill the block table for a region (harmless, since it is
 782   // idempotent).
 783   if (!region_ptr->blocks_filled()) {
 784     PSParallelCompact::fill_blocks(addr_to_region_idx(addr));
 785     region_ptr->set_blocks_filled();
 786   }
 787 
 788   HeapWord* const search_start = block_align_down(addr);
 789   const size_t block_offset = addr_to_block_ptr(addr)->offset();
 790 
 791   const ParMarkBitMap* bitmap = PSParallelCompact::mark_bitmap();
 792   const size_t live = bitmap->live_words_in_range(cm, search_start, oop(addr));
 793   result += block_offset + live;
 794   DEBUG_ONLY(PSParallelCompact::check_new_location(addr, result));
 795   return result;
 796 }
 797 
 798 #ifdef ASSERT
 799 void ParallelCompactData::verify_clear(const PSVirtualSpace* vspace)
 800 {
 801   const size_t* const beg = (const size_t*)vspace->committed_low_addr();
 802   const size_t* const end = (const size_t*)vspace->committed_high_addr();
 803   for (const size_t* p = beg; p < end; ++p) {
 804     assert(*p == 0, "not zero");
 805   }
 806 }
 807 
 808 void ParallelCompactData::verify_clear()
 809 {
 810   verify_clear(_region_vspace);
 811   verify_clear(_block_vspace);
 812 }
 813 #endif  // #ifdef ASSERT
 814 
 815 STWGCTimer          PSParallelCompact::_gc_timer;
 816 ParallelOldTracer   PSParallelCompact::_gc_tracer;
 817 elapsedTimer        PSParallelCompact::_accumulated_time;
 818 unsigned int        PSParallelCompact::_total_invocations = 0;
 819 unsigned int        PSParallelCompact::_maximum_compaction_gc_num = 0;
 820 jlong               PSParallelCompact::_time_of_last_gc = 0;
 821 CollectorCounters*  PSParallelCompact::_counters = NULL;
 822 ParMarkBitMap       PSParallelCompact::_mark_bitmap;
 823 ParallelCompactData PSParallelCompact::_summary_data;
 824 
 825 PSParallelCompact::IsAliveClosure PSParallelCompact::_is_alive_closure;
 826 
 827 bool PSParallelCompact::IsAliveClosure::do_object_b(oop p) { return mark_bitmap()->is_marked(p); }
 828 
 829 void PSParallelCompact::AdjustKlassClosure::do_klass(Klass* klass) {
 830   PSParallelCompact::AdjustPointerClosure closure(_cm);
 831   klass->oops_do(&closure);
 832 }
 833 
 834 void PSParallelCompact::post_initialize() {
 835   ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
 836   MemRegion mr = heap->reserved_region();
 837   _ref_processor =
 838     new ReferenceProcessor(mr,            // span
 839                            ParallelRefProcEnabled && (ParallelGCThreads > 1), // mt processing
 840                            ParallelGCThreads, // mt processing degree
 841                            true,              // mt discovery
 842                            ParallelGCThreads, // mt discovery degree
 843                            true,              // atomic_discovery
 844                            &_is_alive_closure); // non-header is alive closure
 845   _counters = new CollectorCounters("PSParallelCompact", 1);
 846 
 847   // Initialize static fields in ParCompactionManager.
 848   ParCompactionManager::initialize(mark_bitmap());
 849 }
 850 
 851 bool PSParallelCompact::initialize() {
 852   ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
 853   MemRegion mr = heap->reserved_region();
 854 
 855   // Was the old gen get allocated successfully?
 856   if (!heap->old_gen()->is_allocated()) {
 857     return false;
 858   }
 859 
 860   initialize_space_info();
 861   initialize_dead_wood_limiter();
 862 
 863   if (!_mark_bitmap.initialize(mr)) {
 864     vm_shutdown_during_initialization(
 865       err_msg("Unable to allocate " SIZE_FORMAT "KB bitmaps for parallel "
 866       "garbage collection for the requested " SIZE_FORMAT "KB heap.",
 867       _mark_bitmap.reserved_byte_size()/K, mr.byte_size()/K));
 868     return false;
 869   }
 870 
 871   if (!_summary_data.initialize(mr)) {
 872     vm_shutdown_during_initialization(
 873       err_msg("Unable to allocate " SIZE_FORMAT "KB card tables for parallel "
 874       "garbage collection for the requested " SIZE_FORMAT "KB heap.",
 875       _summary_data.reserved_byte_size()/K, mr.byte_size()/K));
 876     return false;
 877   }
 878 
 879   return true;
 880 }
 881 
 882 void PSParallelCompact::initialize_space_info()
 883 {
 884   memset(&_space_info, 0, sizeof(_space_info));
 885 
 886   ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
 887   PSYoungGen* young_gen = heap->young_gen();
 888 
 889   _space_info[old_space_id].set_space(heap->old_gen()->object_space());
 890   _space_info[eden_space_id].set_space(young_gen->eden_space());
 891   _space_info[from_space_id].set_space(young_gen->from_space());
 892   _space_info[to_space_id].set_space(young_gen->to_space());
 893 
 894   _space_info[old_space_id].set_start_array(heap->old_gen()->start_array());
 895 }
 896 
 897 void PSParallelCompact::initialize_dead_wood_limiter()
 898 {
 899   const size_t max = 100;
 900   _dwl_mean = double(MIN2(ParallelOldDeadWoodLimiterMean, max)) / 100.0;
 901   _dwl_std_dev = double(MIN2(ParallelOldDeadWoodLimiterStdDev, max)) / 100.0;
 902   _dwl_first_term = 1.0 / (sqrt(2.0 * M_PI) * _dwl_std_dev);
 903   DEBUG_ONLY(_dwl_initialized = true;)
 904   _dwl_adjustment = normal_distribution(1.0);
 905 }
 906 
 907 void
 908 PSParallelCompact::clear_data_covering_space(SpaceId id)
 909 {
 910   // At this point, top is the value before GC, new_top() is the value that will
 911   // be set at the end of GC.  The marking bitmap is cleared to top; nothing
 912   // should be marked above top.  The summary data is cleared to the larger of
 913   // top & new_top.
 914   MutableSpace* const space = _space_info[id].space();
 915   HeapWord* const bot = space->bottom();
 916   HeapWord* const top = space->top();
 917   HeapWord* const max_top = MAX2(top, _space_info[id].new_top());
 918 
 919   const idx_t beg_bit = _mark_bitmap.addr_to_bit(bot);
 920   const idx_t end_bit = BitMap::word_align_up(_mark_bitmap.addr_to_bit(top));
 921   _mark_bitmap.clear_range(beg_bit, end_bit);
 922 
 923   const size_t beg_region = _summary_data.addr_to_region_idx(bot);
 924   const size_t end_region =
 925     _summary_data.addr_to_region_idx(_summary_data.region_align_up(max_top));
 926   _summary_data.clear_range(beg_region, end_region);
 927 
 928   // Clear the data used to 'split' regions.
 929   SplitInfo& split_info = _space_info[id].split_info();
 930   if (split_info.is_valid()) {
 931     split_info.clear();
 932   }
 933   DEBUG_ONLY(split_info.verify_clear();)
 934 }
 935 
 936 void PSParallelCompact::pre_compact()
 937 {
 938   // Update the from & to space pointers in space_info, since they are swapped
 939   // at each young gen gc.  Do the update unconditionally (even though a
 940   // promotion failure does not swap spaces) because an unknown number of young
 941   // collections will have swapped the spaces an unknown number of times.
 942   GCTraceTime(Debug, gc, phases) tm("Pre Compact", &_gc_timer);
 943   ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
 944   _space_info[from_space_id].set_space(heap->young_gen()->from_space());
 945   _space_info[to_space_id].set_space(heap->young_gen()->to_space());
 946 
 947   DEBUG_ONLY(add_obj_count = add_obj_size = 0;)
 948   DEBUG_ONLY(mark_bitmap_count = mark_bitmap_size = 0;)
 949 
 950   // Increment the invocation count
 951   heap->increment_total_collections(true);
 952 
 953   // We need to track unique mark sweep invocations as well.
 954   _total_invocations++;
 955 
 956   heap->print_heap_before_gc();
 957   heap->trace_heap_before_gc(&_gc_tracer);
 958 
 959   // Fill in TLABs
 960   heap->accumulate_statistics_all_tlabs();
 961   heap->ensure_parsability(true);  // retire TLABs
 962 
 963   if (VerifyBeforeGC && heap->total_collections() >= VerifyGCStartAt) {
 964     HandleMark hm;  // Discard invalid handles created during verification
 965     Universe::verify("Before GC");
 966   }
 967 
 968   // Verify object start arrays
 969   if (VerifyObjectStartArray &&
 970       VerifyBeforeGC) {
 971     heap->old_gen()->verify_object_start_array();
 972   }
 973 
 974   DEBUG_ONLY(mark_bitmap()->verify_clear();)
 975   DEBUG_ONLY(summary_data().verify_clear();)
 976 
 977   // Have worker threads release resources the next time they run a task.
 978   gc_task_manager()->release_all_resources();
 979 
 980   ParCompactionManager::reset_all_bitmap_query_caches();
 981 }
 982 
 983 void PSParallelCompact::post_compact()
 984 {
 985   GCTraceTime(Info, gc, phases) tm("Post Compact", &_gc_timer);
 986 
 987   for (unsigned int id = old_space_id; id < last_space_id; ++id) {
 988     // Clear the marking bitmap, summary data and split info.
 989     clear_data_covering_space(SpaceId(id));
 990     // Update top().  Must be done after clearing the bitmap and summary data.
 991     _space_info[id].publish_new_top();
 992   }
 993 
 994   MutableSpace* const eden_space = _space_info[eden_space_id].space();
 995   MutableSpace* const from_space = _space_info[from_space_id].space();
 996   MutableSpace* const to_space   = _space_info[to_space_id].space();
 997 
 998   ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
 999   bool eden_empty = eden_space->is_empty();
1000   if (!eden_empty) {
1001     eden_empty = absorb_live_data_from_eden(heap->size_policy(),
1002                                             heap->young_gen(), heap->old_gen());
1003   }
1004 
1005   // Update heap occupancy information which is used as input to the soft ref
1006   // clearing policy at the next gc.
1007   Universe::update_heap_info_at_gc();
1008 
1009   bool young_gen_empty = eden_empty && from_space->is_empty() &&
1010     to_space->is_empty();
1011 
1012   ModRefBarrierSet* modBS = barrier_set_cast<ModRefBarrierSet>(heap->barrier_set());
1013   MemRegion old_mr = heap->old_gen()->reserved();
1014   if (young_gen_empty) {
1015     modBS->clear(MemRegion(old_mr.start(), old_mr.end()));
1016   } else {
1017     modBS->invalidate(MemRegion(old_mr.start(), old_mr.end()));
1018   }
1019 
1020   // Delete metaspaces for unloaded class loaders and clean up loader_data graph
1021   ClassLoaderDataGraph::purge();
1022   MetaspaceAux::verify_metrics();
1023 
1024   CodeCache::gc_epilogue();
1025   JvmtiExport::gc_epilogue();
1026 
1027 #if defined(COMPILER2) || INCLUDE_JVMCI
1028   DerivedPointerTable::update_pointers();
1029 #endif
1030 
1031   ref_processor()->enqueue_discovered_references(NULL);
1032 
1033   if (ZapUnusedHeapArea) {
1034     heap->gen_mangle_unused_area();
1035   }
1036 
1037   // Update time of last GC
1038   reset_millis_since_last_gc();
1039 }
1040 
1041 HeapWord*
1042 PSParallelCompact::compute_dense_prefix_via_density(const SpaceId id,
1043                                                     bool maximum_compaction)
1044 {
1045   const size_t region_size = ParallelCompactData::RegionSize;
1046   const ParallelCompactData& sd = summary_data();
1047 
1048   const MutableSpace* const space = _space_info[id].space();
1049   HeapWord* const top_aligned_up = sd.region_align_up(space->top());
1050   const RegionData* const beg_cp = sd.addr_to_region_ptr(space->bottom());
1051   const RegionData* const end_cp = sd.addr_to_region_ptr(top_aligned_up);
1052 
1053   // Skip full regions at the beginning of the space--they are necessarily part
1054   // of the dense prefix.
1055   size_t full_count = 0;
1056   const RegionData* cp;
1057   for (cp = beg_cp; cp < end_cp && cp->data_size() == region_size; ++cp) {
1058     ++full_count;
1059   }
1060 
1061   assert(total_invocations() >= _maximum_compaction_gc_num, "sanity");
1062   const size_t gcs_since_max = total_invocations() - _maximum_compaction_gc_num;
1063   const bool interval_ended = gcs_since_max > HeapMaximumCompactionInterval;
1064   if (maximum_compaction || cp == end_cp || interval_ended) {
1065     _maximum_compaction_gc_num = total_invocations();
1066     return sd.region_to_addr(cp);
1067   }
1068 
1069   HeapWord* const new_top = _space_info[id].new_top();
1070   const size_t space_live = pointer_delta(new_top, space->bottom());
1071   const size_t space_used = space->used_in_words();
1072   const size_t space_capacity = space->capacity_in_words();
1073 
1074   const double cur_density = double(space_live) / space_capacity;
1075   const double deadwood_density =
1076     (1.0 - cur_density) * (1.0 - cur_density) * cur_density * cur_density;
1077   const size_t deadwood_goal = size_t(space_capacity * deadwood_density);
1078 
1079   if (TraceParallelOldGCDensePrefix) {
1080     tty->print_cr("cur_dens=%5.3f dw_dens=%5.3f dw_goal=" SIZE_FORMAT,
1081                   cur_density, deadwood_density, deadwood_goal);
1082     tty->print_cr("space_live=" SIZE_FORMAT " " "space_used=" SIZE_FORMAT " "
1083                   "space_cap=" SIZE_FORMAT,
1084                   space_live, space_used,
1085                   space_capacity);
1086   }
1087 
1088   // XXX - Use binary search?
1089   HeapWord* dense_prefix = sd.region_to_addr(cp);
1090   const RegionData* full_cp = cp;
1091   const RegionData* const top_cp = sd.addr_to_region_ptr(space->top() - 1);
1092   while (cp < end_cp) {
1093     HeapWord* region_destination = cp->destination();
1094     const size_t cur_deadwood = pointer_delta(dense_prefix, region_destination);
1095     if (TraceParallelOldGCDensePrefix && Verbose) {
1096       tty->print_cr("c#=" SIZE_FORMAT_W(4) " dst=" PTR_FORMAT " "
1097                     "dp=" PTR_FORMAT " " "cdw=" SIZE_FORMAT_W(8),
1098                     sd.region(cp), p2i(region_destination),
1099                     p2i(dense_prefix), cur_deadwood);
1100     }
1101 
1102     if (cur_deadwood >= deadwood_goal) {
1103       // Found the region that has the correct amount of deadwood to the left.
1104       // This typically occurs after crossing a fairly sparse set of regions, so
1105       // iterate backwards over those sparse regions, looking for the region
1106       // that has the lowest density of live objects 'to the right.'
1107       size_t space_to_left = sd.region(cp) * region_size;
1108       size_t live_to_left = space_to_left - cur_deadwood;
1109       size_t space_to_right = space_capacity - space_to_left;
1110       size_t live_to_right = space_live - live_to_left;
1111       double density_to_right = double(live_to_right) / space_to_right;
1112       while (cp > full_cp) {
1113         --cp;
1114         const size_t prev_region_live_to_right = live_to_right -
1115           cp->data_size();
1116         const size_t prev_region_space_to_right = space_to_right + region_size;
1117         double prev_region_density_to_right =
1118           double(prev_region_live_to_right) / prev_region_space_to_right;
1119         if (density_to_right <= prev_region_density_to_right) {
1120           return dense_prefix;
1121         }
1122         if (TraceParallelOldGCDensePrefix && Verbose) {
1123           tty->print_cr("backing up from c=" SIZE_FORMAT_W(4) " d2r=%10.8f "
1124                         "pc_d2r=%10.8f", sd.region(cp), density_to_right,
1125                         prev_region_density_to_right);
1126         }
1127         dense_prefix -= region_size;
1128         live_to_right = prev_region_live_to_right;
1129         space_to_right = prev_region_space_to_right;
1130         density_to_right = prev_region_density_to_right;
1131       }
1132       return dense_prefix;
1133     }
1134 
1135     dense_prefix += region_size;
1136     ++cp;
1137   }
1138 
1139   return dense_prefix;
1140 }
1141 
1142 #ifndef PRODUCT
1143 void PSParallelCompact::print_dense_prefix_stats(const char* const algorithm,
1144                                                  const SpaceId id,
1145                                                  const bool maximum_compaction,
1146                                                  HeapWord* const addr)
1147 {
1148   const size_t region_idx = summary_data().addr_to_region_idx(addr);
1149   RegionData* const cp = summary_data().region(region_idx);
1150   const MutableSpace* const space = _space_info[id].space();
1151   HeapWord* const new_top = _space_info[id].new_top();
1152 
1153   const size_t space_live = pointer_delta(new_top, space->bottom());
1154   const size_t dead_to_left = pointer_delta(addr, cp->destination());
1155   const size_t space_cap = space->capacity_in_words();
1156   const double dead_to_left_pct = double(dead_to_left) / space_cap;
1157   const size_t live_to_right = new_top - cp->destination();
1158   const size_t dead_to_right = space->top() - addr - live_to_right;
1159 
1160   tty->print_cr("%s=" PTR_FORMAT " dpc=" SIZE_FORMAT_W(5) " "
1161                 "spl=" SIZE_FORMAT " "
1162                 "d2l=" SIZE_FORMAT " d2l%%=%6.4f "
1163                 "d2r=" SIZE_FORMAT " l2r=" SIZE_FORMAT
1164                 " ratio=%10.8f",
1165                 algorithm, p2i(addr), region_idx,
1166                 space_live,
1167                 dead_to_left, dead_to_left_pct,
1168                 dead_to_right, live_to_right,
1169                 double(dead_to_right) / live_to_right);
1170 }
1171 #endif  // #ifndef PRODUCT
1172 
1173 // Return a fraction indicating how much of the generation can be treated as
1174 // "dead wood" (i.e., not reclaimed).  The function uses a normal distribution
1175 // based on the density of live objects in the generation to determine a limit,
1176 // which is then adjusted so the return value is min_percent when the density is
1177 // 1.
1178 //
1179 // The following table shows some return values for a different values of the
1180 // standard deviation (ParallelOldDeadWoodLimiterStdDev); the mean is 0.5 and
1181 // min_percent is 1.
1182 //
1183 //                          fraction allowed as dead wood
1184 //         -----------------------------------------------------------------
1185 // density std_dev=70 std_dev=75 std_dev=80 std_dev=85 std_dev=90 std_dev=95
1186 // ------- ---------- ---------- ---------- ---------- ---------- ----------
1187 // 0.00000 0.01000000 0.01000000 0.01000000 0.01000000 0.01000000 0.01000000
1188 // 0.05000 0.03193096 0.02836880 0.02550828 0.02319280 0.02130337 0.01974941
1189 // 0.10000 0.05247504 0.04547452 0.03988045 0.03537016 0.03170171 0.02869272
1190 // 0.15000 0.07135702 0.06111390 0.05296419 0.04641639 0.04110601 0.03676066
1191 // 0.20000 0.08831616 0.07509618 0.06461766 0.05622444 0.04943437 0.04388975
1192 // 0.25000 0.10311208 0.08724696 0.07471205 0.06469760 0.05661313 0.05002313
1193 // 0.30000 0.11553050 0.09741183 0.08313394 0.07175114 0.06257797 0.05511132
1194 // 0.35000 0.12538832 0.10545958 0.08978741 0.07731366 0.06727491 0.05911289
1195 // 0.40000 0.13253818 0.11128511 0.09459590 0.08132834 0.07066107 0.06199500
1196 // 0.45000 0.13687208 0.11481163 0.09750361 0.08375387 0.07270534 0.06373386
1197 // 0.50000 0.13832410 0.11599237 0.09847664 0.08456518 0.07338887 0.06431510
1198 // 0.55000 0.13687208 0.11481163 0.09750361 0.08375387 0.07270534 0.06373386
1199 // 0.60000 0.13253818 0.11128511 0.09459590 0.08132834 0.07066107 0.06199500
1200 // 0.65000 0.12538832 0.10545958 0.08978741 0.07731366 0.06727491 0.05911289
1201 // 0.70000 0.11553050 0.09741183 0.08313394 0.07175114 0.06257797 0.05511132
1202 // 0.75000 0.10311208 0.08724696 0.07471205 0.06469760 0.05661313 0.05002313
1203 // 0.80000 0.08831616 0.07509618 0.06461766 0.05622444 0.04943437 0.04388975
1204 // 0.85000 0.07135702 0.06111390 0.05296419 0.04641639 0.04110601 0.03676066
1205 // 0.90000 0.05247504 0.04547452 0.03988045 0.03537016 0.03170171 0.02869272
1206 // 0.95000 0.03193096 0.02836880 0.02550828 0.02319280 0.02130337 0.01974941
1207 // 1.00000 0.01000000 0.01000000 0.01000000 0.01000000 0.01000000 0.01000000
1208 
1209 double PSParallelCompact::dead_wood_limiter(double density, size_t min_percent)
1210 {
1211   assert(_dwl_initialized, "uninitialized");
1212 
1213   // The raw limit is the value of the normal distribution at x = density.
1214   const double raw_limit = normal_distribution(density);
1215 
1216   // Adjust the raw limit so it becomes the minimum when the density is 1.
1217   //
1218   // First subtract the adjustment value (which is simply the precomputed value
1219   // normal_distribution(1.0)); this yields a value of 0 when the density is 1.
1220   // Then add the minimum value, so the minimum is returned when the density is
1221   // 1.  Finally, prevent negative values, which occur when the mean is not 0.5.
1222   const double min = double(min_percent) / 100.0;
1223   const double limit = raw_limit - _dwl_adjustment + min;
1224   return MAX2(limit, 0.0);
1225 }
1226 
1227 ParallelCompactData::RegionData*
1228 PSParallelCompact::first_dead_space_region(const RegionData* beg,
1229                                            const RegionData* end)
1230 {
1231   const size_t region_size = ParallelCompactData::RegionSize;
1232   ParallelCompactData& sd = summary_data();
1233   size_t left = sd.region(beg);
1234   size_t right = end > beg ? sd.region(end) - 1 : left;
1235 
1236   // Binary search.
1237   while (left < right) {
1238     // Equivalent to (left + right) / 2, but does not overflow.
1239     const size_t middle = left + (right - left) / 2;
1240     RegionData* const middle_ptr = sd.region(middle);
1241     HeapWord* const dest = middle_ptr->destination();
1242     HeapWord* const addr = sd.region_to_addr(middle);
1243     assert(dest != NULL, "sanity");
1244     assert(dest <= addr, "must move left");
1245 
1246     if (middle > left && dest < addr) {
1247       right = middle - 1;
1248     } else if (middle < right && middle_ptr->data_size() == region_size) {
1249       left = middle + 1;
1250     } else {
1251       return middle_ptr;
1252     }
1253   }
1254   return sd.region(left);
1255 }
1256 
1257 ParallelCompactData::RegionData*
1258 PSParallelCompact::dead_wood_limit_region(const RegionData* beg,
1259                                           const RegionData* end,
1260                                           size_t dead_words)
1261 {
1262   ParallelCompactData& sd = summary_data();
1263   size_t left = sd.region(beg);
1264   size_t right = end > beg ? sd.region(end) - 1 : left;
1265 
1266   // Binary search.
1267   while (left < right) {
1268     // Equivalent to (left + right) / 2, but does not overflow.
1269     const size_t middle = left + (right - left) / 2;
1270     RegionData* const middle_ptr = sd.region(middle);
1271     HeapWord* const dest = middle_ptr->destination();
1272     HeapWord* const addr = sd.region_to_addr(middle);
1273     assert(dest != NULL, "sanity");
1274     assert(dest <= addr, "must move left");
1275 
1276     const size_t dead_to_left = pointer_delta(addr, dest);
1277     if (middle > left && dead_to_left > dead_words) {
1278       right = middle - 1;
1279     } else if (middle < right && dead_to_left < dead_words) {
1280       left = middle + 1;
1281     } else {
1282       return middle_ptr;
1283     }
1284   }
1285   return sd.region(left);
1286 }
1287 
1288 // The result is valid during the summary phase, after the initial summarization
1289 // of each space into itself, and before final summarization.
1290 inline double
1291 PSParallelCompact::reclaimed_ratio(const RegionData* const cp,
1292                                    HeapWord* const bottom,
1293                                    HeapWord* const top,
1294                                    HeapWord* const new_top)
1295 {
1296   ParallelCompactData& sd = summary_data();
1297 
1298   assert(cp != NULL, "sanity");
1299   assert(bottom != NULL, "sanity");
1300   assert(top != NULL, "sanity");
1301   assert(new_top != NULL, "sanity");
1302   assert(top >= new_top, "summary data problem?");
1303   assert(new_top > bottom, "space is empty; should not be here");
1304   assert(new_top >= cp->destination(), "sanity");
1305   assert(top >= sd.region_to_addr(cp), "sanity");
1306 
1307   HeapWord* const destination = cp->destination();
1308   const size_t dense_prefix_live  = pointer_delta(destination, bottom);
1309   const size_t compacted_region_live = pointer_delta(new_top, destination);
1310   const size_t compacted_region_used = pointer_delta(top,
1311                                                      sd.region_to_addr(cp));
1312   const size_t reclaimable = compacted_region_used - compacted_region_live;
1313 
1314   const double divisor = dense_prefix_live + 1.25 * compacted_region_live;
1315   return double(reclaimable) / divisor;
1316 }
1317 
1318 // Return the address of the end of the dense prefix, a.k.a. the start of the
1319 // compacted region.  The address is always on a region boundary.
1320 //
1321 // Completely full regions at the left are skipped, since no compaction can
1322 // occur in those regions.  Then the maximum amount of dead wood to allow is
1323 // computed, based on the density (amount live / capacity) of the generation;
1324 // the region with approximately that amount of dead space to the left is
1325 // identified as the limit region.  Regions between the last completely full
1326 // region and the limit region are scanned and the one that has the best
1327 // (maximum) reclaimed_ratio() is selected.
1328 HeapWord*
1329 PSParallelCompact::compute_dense_prefix(const SpaceId id,
1330                                         bool maximum_compaction)
1331 {
1332   const size_t region_size = ParallelCompactData::RegionSize;
1333   const ParallelCompactData& sd = summary_data();
1334 
1335   const MutableSpace* const space = _space_info[id].space();
1336   HeapWord* const top = space->top();
1337   HeapWord* const top_aligned_up = sd.region_align_up(top);
1338   HeapWord* const new_top = _space_info[id].new_top();
1339   HeapWord* const new_top_aligned_up = sd.region_align_up(new_top);
1340   HeapWord* const bottom = space->bottom();
1341   const RegionData* const beg_cp = sd.addr_to_region_ptr(bottom);
1342   const RegionData* const top_cp = sd.addr_to_region_ptr(top_aligned_up);
1343   const RegionData* const new_top_cp =
1344     sd.addr_to_region_ptr(new_top_aligned_up);
1345 
1346   // Skip full regions at the beginning of the space--they are necessarily part
1347   // of the dense prefix.
1348   const RegionData* const full_cp = first_dead_space_region(beg_cp, new_top_cp);
1349   assert(full_cp->destination() == sd.region_to_addr(full_cp) ||
1350          space->is_empty(), "no dead space allowed to the left");
1351   assert(full_cp->data_size() < region_size || full_cp == new_top_cp - 1,
1352          "region must have dead space");
1353 
1354   // The gc number is saved whenever a maximum compaction is done, and used to
1355   // determine when the maximum compaction interval has expired.  This avoids
1356   // successive max compactions for different reasons.
1357   assert(total_invocations() >= _maximum_compaction_gc_num, "sanity");
1358   const size_t gcs_since_max = total_invocations() - _maximum_compaction_gc_num;
1359   const bool interval_ended = gcs_since_max > HeapMaximumCompactionInterval ||
1360     total_invocations() == HeapFirstMaximumCompactionCount;
1361   if (maximum_compaction || full_cp == top_cp || interval_ended) {
1362     _maximum_compaction_gc_num = total_invocations();
1363     return sd.region_to_addr(full_cp);
1364   }
1365 
1366   const size_t space_live = pointer_delta(new_top, bottom);
1367   const size_t space_used = space->used_in_words();
1368   const size_t space_capacity = space->capacity_in_words();
1369 
1370   const double density = double(space_live) / double(space_capacity);
1371   const size_t min_percent_free = MarkSweepDeadRatio;
1372   const double limiter = dead_wood_limiter(density, min_percent_free);
1373   const size_t dead_wood_max = space_used - space_live;
1374   const size_t dead_wood_limit = MIN2(size_t(space_capacity * limiter),
1375                                       dead_wood_max);
1376 
1377   if (TraceParallelOldGCDensePrefix) {
1378     tty->print_cr("space_live=" SIZE_FORMAT " " "space_used=" SIZE_FORMAT " "
1379                   "space_cap=" SIZE_FORMAT,
1380                   space_live, space_used,
1381                   space_capacity);
1382     tty->print_cr("dead_wood_limiter(%6.4f, " SIZE_FORMAT ")=%6.4f "
1383                   "dead_wood_max=" SIZE_FORMAT " dead_wood_limit=" SIZE_FORMAT,
1384                   density, min_percent_free, limiter,
1385                   dead_wood_max, dead_wood_limit);
1386   }
1387 
1388   // Locate the region with the desired amount of dead space to the left.
1389   const RegionData* const limit_cp =
1390     dead_wood_limit_region(full_cp, top_cp, dead_wood_limit);
1391 
1392   // Scan from the first region with dead space to the limit region and find the
1393   // one with the best (largest) reclaimed ratio.
1394   double best_ratio = 0.0;
1395   const RegionData* best_cp = full_cp;
1396   for (const RegionData* cp = full_cp; cp < limit_cp; ++cp) {
1397     double tmp_ratio = reclaimed_ratio(cp, bottom, top, new_top);
1398     if (tmp_ratio > best_ratio) {
1399       best_cp = cp;
1400       best_ratio = tmp_ratio;
1401     }
1402   }
1403 
1404   return sd.region_to_addr(best_cp);
1405 }
1406 
1407 void PSParallelCompact::summarize_spaces_quick()
1408 {
1409   for (unsigned int i = 0; i < last_space_id; ++i) {
1410     const MutableSpace* space = _space_info[i].space();
1411     HeapWord** nta = _space_info[i].new_top_addr();
1412     bool result = _summary_data.summarize(_space_info[i].split_info(),
1413                                           space->bottom(), space->top(), NULL,
1414                                           space->bottom(), space->end(), nta);
1415     assert(result, "space must fit into itself");
1416     _space_info[i].set_dense_prefix(space->bottom());
1417   }
1418 }
1419 
1420 void PSParallelCompact::fill_dense_prefix_end(SpaceId id)
1421 {
1422   HeapWord* const dense_prefix_end = dense_prefix(id);
1423   const RegionData* region = _summary_data.addr_to_region_ptr(dense_prefix_end);
1424   const idx_t dense_prefix_bit = _mark_bitmap.addr_to_bit(dense_prefix_end);
1425   if (dead_space_crosses_boundary(region, dense_prefix_bit)) {
1426     // Only enough dead space is filled so that any remaining dead space to the
1427     // left is larger than the minimum filler object.  (The remainder is filled
1428     // during the copy/update phase.)
1429     //
1430     // The size of the dead space to the right of the boundary is not a
1431     // concern, since compaction will be able to use whatever space is
1432     // available.
1433     //
1434     // Here '||' is the boundary, 'x' represents a don't care bit and a box
1435     // surrounds the space to be filled with an object.
1436     //
1437     // In the 32-bit VM, each bit represents two 32-bit words:
1438     //                              +---+
1439     // a) beg_bits:  ...  x   x   x | 0 | ||   0   x  x  ...
1440     //    end_bits:  ...  x   x   x | 0 | ||   0   x  x  ...
1441     //                              +---+
1442     //
1443     // In the 64-bit VM, each bit represents one 64-bit word:
1444     //                              +------------+
1445     // b) beg_bits:  ...  x   x   x | 0   ||   0 | x  x  ...
1446     //    end_bits:  ...  x   x   1 | 0   ||   0 | x  x  ...
1447     //                              +------------+
1448     //                          +-------+
1449     // c) beg_bits:  ...  x   x | 0   0 | ||   0   x  x  ...
1450     //    end_bits:  ...  x   1 | 0   0 | ||   0   x  x  ...
1451     //                          +-------+
1452     //                      +-----------+
1453     // d) beg_bits:  ...  x | 0   0   0 | ||   0   x  x  ...
1454     //    end_bits:  ...  1 | 0   0   0 | ||   0   x  x  ...
1455     //                      +-----------+
1456     //                          +-------+
1457     // e) beg_bits:  ...  0   0 | 0   0 | ||   0   x  x  ...
1458     //    end_bits:  ...  0   0 | 0   0 | ||   0   x  x  ...
1459     //                          +-------+
1460 
1461     // Initially assume case a, c or e will apply.
1462     size_t obj_len = CollectedHeap::min_fill_size();
1463     HeapWord* obj_beg = dense_prefix_end - obj_len;
1464 
1465 #ifdef  _LP64
1466     if (MinObjAlignment > 1) { // object alignment > heap word size
1467       // Cases a, c or e.
1468     } else if (_mark_bitmap.is_obj_end(dense_prefix_bit - 2)) {
1469       // Case b above.
1470       obj_beg = dense_prefix_end - 1;
1471     } else if (!_mark_bitmap.is_obj_end(dense_prefix_bit - 3) &&
1472                _mark_bitmap.is_obj_end(dense_prefix_bit - 4)) {
1473       // Case d above.
1474       obj_beg = dense_prefix_end - 3;
1475       obj_len = 3;
1476     }
1477 #endif  // #ifdef _LP64
1478 
1479     CollectedHeap::fill_with_object(obj_beg, obj_len);
1480     _mark_bitmap.mark_obj(obj_beg, obj_len);
1481     _summary_data.add_obj(obj_beg, obj_len);
1482     assert(start_array(id) != NULL, "sanity");
1483     start_array(id)->allocate_block(obj_beg);
1484   }
1485 }
1486 
1487 void
1488 PSParallelCompact::summarize_space(SpaceId id, bool maximum_compaction)
1489 {
1490   assert(id < last_space_id, "id out of range");
1491   assert(_space_info[id].dense_prefix() == _space_info[id].space()->bottom(),
1492          "should have been reset in summarize_spaces_quick()");
1493 
1494   const MutableSpace* space = _space_info[id].space();
1495   if (_space_info[id].new_top() != space->bottom()) {
1496     HeapWord* dense_prefix_end = compute_dense_prefix(id, maximum_compaction);
1497     _space_info[id].set_dense_prefix(dense_prefix_end);
1498 
1499 #ifndef PRODUCT
1500     if (TraceParallelOldGCDensePrefix) {
1501       print_dense_prefix_stats("ratio", id, maximum_compaction,
1502                                dense_prefix_end);
1503       HeapWord* addr = compute_dense_prefix_via_density(id, maximum_compaction);
1504       print_dense_prefix_stats("density", id, maximum_compaction, addr);
1505     }
1506 #endif  // #ifndef PRODUCT
1507 
1508     // Recompute the summary data, taking into account the dense prefix.  If
1509     // every last byte will be reclaimed, then the existing summary data which
1510     // compacts everything can be left in place.
1511     if (!maximum_compaction && dense_prefix_end != space->bottom()) {
1512       // If dead space crosses the dense prefix boundary, it is (at least
1513       // partially) filled with a dummy object, marked live and added to the
1514       // summary data.  This simplifies the copy/update phase and must be done
1515       // before the final locations of objects are determined, to prevent
1516       // leaving a fragment of dead space that is too small to fill.
1517       fill_dense_prefix_end(id);
1518 
1519       // Compute the destination of each Region, and thus each object.
1520       _summary_data.summarize_dense_prefix(space->bottom(), dense_prefix_end);
1521       _summary_data.summarize(_space_info[id].split_info(),
1522                               dense_prefix_end, space->top(), NULL,
1523                               dense_prefix_end, space->end(),
1524                               _space_info[id].new_top_addr());
1525     }
1526   }
1527 
1528   if (log_develop_is_enabled(Trace, gc, compaction)) {
1529     const size_t region_size = ParallelCompactData::RegionSize;
1530     HeapWord* const dense_prefix_end = _space_info[id].dense_prefix();
1531     const size_t dp_region = _summary_data.addr_to_region_idx(dense_prefix_end);
1532     const size_t dp_words = pointer_delta(dense_prefix_end, space->bottom());
1533     HeapWord* const new_top = _space_info[id].new_top();
1534     const HeapWord* nt_aligned_up = _summary_data.region_align_up(new_top);
1535     const size_t cr_words = pointer_delta(nt_aligned_up, dense_prefix_end);
1536     log_develop_trace(gc, compaction)(
1537         "id=%d cap=" SIZE_FORMAT " dp=" PTR_FORMAT " "
1538         "dp_region=" SIZE_FORMAT " " "dp_count=" SIZE_FORMAT " "
1539         "cr_count=" SIZE_FORMAT " " "nt=" PTR_FORMAT,
1540         id, space->capacity_in_words(), p2i(dense_prefix_end),
1541         dp_region, dp_words / region_size,
1542         cr_words / region_size, p2i(new_top));
1543   }
1544 }
1545 
1546 #ifndef PRODUCT
1547 void PSParallelCompact::summary_phase_msg(SpaceId dst_space_id,
1548                                           HeapWord* dst_beg, HeapWord* dst_end,
1549                                           SpaceId src_space_id,
1550                                           HeapWord* src_beg, HeapWord* src_end)
1551 {
1552   log_develop_trace(gc, compaction)(
1553       "Summarizing %d [%s] into %d [%s]:  "
1554       "src=" PTR_FORMAT "-" PTR_FORMAT " "
1555       SIZE_FORMAT "-" SIZE_FORMAT " "
1556       "dst=" PTR_FORMAT "-" PTR_FORMAT " "
1557       SIZE_FORMAT "-" SIZE_FORMAT,
1558       src_space_id, space_names[src_space_id],
1559       dst_space_id, space_names[dst_space_id],
1560       p2i(src_beg), p2i(src_end),
1561       _summary_data.addr_to_region_idx(src_beg),
1562       _summary_data.addr_to_region_idx(src_end),
1563       p2i(dst_beg), p2i(dst_end),
1564       _summary_data.addr_to_region_idx(dst_beg),
1565       _summary_data.addr_to_region_idx(dst_end));
1566 }
1567 #endif  // #ifndef PRODUCT
1568 
1569 void PSParallelCompact::summary_phase(ParCompactionManager* cm,
1570                                       bool maximum_compaction)
1571 {
1572   GCTraceTime(Info, gc, phases) tm("Summary Phase", &_gc_timer);
1573 
1574 #ifdef  ASSERT
1575   if (TraceParallelOldGCMarkingPhase) {
1576     tty->print_cr("add_obj_count=" SIZE_FORMAT " "
1577                   "add_obj_bytes=" SIZE_FORMAT,
1578                   add_obj_count, add_obj_size * HeapWordSize);
1579     tty->print_cr("mark_bitmap_count=" SIZE_FORMAT " "
1580                   "mark_bitmap_bytes=" SIZE_FORMAT,
1581                   mark_bitmap_count, mark_bitmap_size * HeapWordSize);
1582   }
1583 #endif  // #ifdef ASSERT
1584 
1585   // Quick summarization of each space into itself, to see how much is live.
1586   summarize_spaces_quick();
1587 
1588   log_develop_trace(gc, compaction)("summary phase:  after summarizing each space to self");
1589   NOT_PRODUCT(print_region_ranges());
1590   NOT_PRODUCT(print_initial_summary_data(_summary_data, _space_info));
1591 
1592   // The amount of live data that will end up in old space (assuming it fits).
1593   size_t old_space_total_live = 0;
1594   for (unsigned int id = old_space_id; id < last_space_id; ++id) {
1595     old_space_total_live += pointer_delta(_space_info[id].new_top(),
1596                                           _space_info[id].space()->bottom());
1597   }
1598 
1599   MutableSpace* const old_space = _space_info[old_space_id].space();
1600   const size_t old_capacity = old_space->capacity_in_words();
1601   if (old_space_total_live > old_capacity) {
1602     // XXX - should also try to expand
1603     maximum_compaction = true;
1604   }
1605 
1606   // Old generations.
1607   summarize_space(old_space_id, maximum_compaction);
1608 
1609   // Summarize the remaining spaces in the young gen.  The initial target space
1610   // is the old gen.  If a space does not fit entirely into the target, then the
1611   // remainder is compacted into the space itself and that space becomes the new
1612   // target.
1613   SpaceId dst_space_id = old_space_id;
1614   HeapWord* dst_space_end = old_space->end();
1615   HeapWord** new_top_addr = _space_info[dst_space_id].new_top_addr();
1616   for (unsigned int id = eden_space_id; id < last_space_id; ++id) {
1617     const MutableSpace* space = _space_info[id].space();
1618     const size_t live = pointer_delta(_space_info[id].new_top(),
1619                                       space->bottom());
1620     const size_t available = pointer_delta(dst_space_end, *new_top_addr);
1621 
1622     NOT_PRODUCT(summary_phase_msg(dst_space_id, *new_top_addr, dst_space_end,
1623                                   SpaceId(id), space->bottom(), space->top());)
1624     if (live > 0 && live <= available) {
1625       // All the live data will fit.
1626       bool done = _summary_data.summarize(_space_info[id].split_info(),
1627                                           space->bottom(), space->top(),
1628                                           NULL,
1629                                           *new_top_addr, dst_space_end,
1630                                           new_top_addr);
1631       assert(done, "space must fit into old gen");
1632 
1633       // Reset the new_top value for the space.
1634       _space_info[id].set_new_top(space->bottom());
1635     } else if (live > 0) {
1636       // Attempt to fit part of the source space into the target space.
1637       HeapWord* next_src_addr = NULL;
1638       bool done = _summary_data.summarize(_space_info[id].split_info(),
1639                                           space->bottom(), space->top(),
1640                                           &next_src_addr,
1641                                           *new_top_addr, dst_space_end,
1642                                           new_top_addr);
1643       assert(!done, "space should not fit into old gen");
1644       assert(next_src_addr != NULL, "sanity");
1645 
1646       // The source space becomes the new target, so the remainder is compacted
1647       // within the space itself.
1648       dst_space_id = SpaceId(id);
1649       dst_space_end = space->end();
1650       new_top_addr = _space_info[id].new_top_addr();
1651       NOT_PRODUCT(summary_phase_msg(dst_space_id,
1652                                     space->bottom(), dst_space_end,
1653                                     SpaceId(id), next_src_addr, space->top());)
1654       done = _summary_data.summarize(_space_info[id].split_info(),
1655                                      next_src_addr, space->top(),
1656                                      NULL,
1657                                      space->bottom(), dst_space_end,
1658                                      new_top_addr);
1659       assert(done, "space must fit when compacted into itself");
1660       assert(*new_top_addr <= space->top(), "usage should not grow");
1661     }
1662   }
1663 
1664   log_develop_trace(gc, compaction)("Summary_phase:  after final summarization");
1665   NOT_PRODUCT(print_region_ranges());
1666   NOT_PRODUCT(print_initial_summary_data(_summary_data, _space_info));
1667 }
1668 
1669 // This method should contain all heap-specific policy for invoking a full
1670 // collection.  invoke_no_policy() will only attempt to compact the heap; it
1671 // will do nothing further.  If we need to bail out for policy reasons, scavenge
1672 // before full gc, or any other specialized behavior, it needs to be added here.
1673 //
1674 // Note that this method should only be called from the vm_thread while at a
1675 // safepoint.
1676 //
1677 // Note that the all_soft_refs_clear flag in the collector policy
1678 // may be true because this method can be called without intervening
1679 // activity.  For example when the heap space is tight and full measure
1680 // are being taken to free space.
1681 void PSParallelCompact::invoke(bool maximum_heap_compaction) {
1682   assert(SafepointSynchronize::is_at_safepoint(), "should be at safepoint");
1683   assert(Thread::current() == (Thread*)VMThread::vm_thread(),
1684          "should be in vm thread");
1685 
1686   ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
1687   GCCause::Cause gc_cause = heap->gc_cause();
1688   assert(!heap->is_gc_active(), "not reentrant");
1689 
1690   PSAdaptiveSizePolicy* policy = heap->size_policy();
1691   IsGCActiveMark mark;
1692 
1693   if (ScavengeBeforeFullGC) {
1694     PSScavenge::invoke_no_policy();
1695   }
1696 
1697   const bool clear_all_soft_refs =
1698     heap->collector_policy()->should_clear_all_soft_refs();
1699 
1700   PSParallelCompact::invoke_no_policy(clear_all_soft_refs ||
1701                                       maximum_heap_compaction);
1702 }
1703 
1704 // This method contains no policy. You should probably
1705 // be calling invoke() instead.
1706 bool PSParallelCompact::invoke_no_policy(bool maximum_heap_compaction) {
1707   assert(SafepointSynchronize::is_at_safepoint(), "must be at a safepoint");
1708   assert(ref_processor() != NULL, "Sanity");
1709 
1710   if (GCLocker::check_active_before_gc()) {
1711     return false;
1712   }
1713 
1714   ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
1715 
1716   GCIdMark gc_id_mark;
1717   _gc_timer.register_gc_start();
1718   _gc_tracer.report_gc_start(heap->gc_cause(), _gc_timer.gc_start());
1719 
1720   TimeStamp marking_start;
1721   TimeStamp compaction_start;
1722   TimeStamp collection_exit;
1723 
1724   GCCause::Cause gc_cause = heap->gc_cause();
1725   PSYoungGen* young_gen = heap->young_gen();
1726   PSOldGen* old_gen = heap->old_gen();
1727   PSAdaptiveSizePolicy* size_policy = heap->size_policy();
1728 
1729   // The scope of casr should end after code that can change
1730   // CollectorPolicy::_should_clear_all_soft_refs.
1731   ClearedAllSoftRefs casr(maximum_heap_compaction,
1732                           heap->collector_policy());
1733 
1734   if (ZapUnusedHeapArea) {
1735     // Save information needed to minimize mangling
1736     heap->record_gen_tops_before_GC();
1737   }
1738 
1739   // Make sure data structures are sane, make the heap parsable, and do other
1740   // miscellaneous bookkeeping.
1741   pre_compact();
1742 
1743   PreGCValues pre_gc_values(heap);
1744 
1745   // Get the compaction manager reserved for the VM thread.
1746   ParCompactionManager* const vmthread_cm =
1747     ParCompactionManager::manager_array(gc_task_manager()->workers());
1748 
1749   {
1750     ResourceMark rm;
1751     HandleMark hm;
1752 
1753     // Set the number of GC threads to be used in this collection
1754     gc_task_manager()->set_active_gang();
1755     gc_task_manager()->task_idle_workers();
1756 
1757     GCTraceCPUTime tcpu;
1758     GCTraceTime(Info, gc) tm("Pause Full", NULL, gc_cause, true);
1759 
1760     heap->pre_full_gc_dump(&_gc_timer);
1761 
1762     TraceCollectorStats tcs(counters());
1763     TraceMemoryManagerStats tms(true /* Full GC */,gc_cause);
1764 
1765     if (TraceOldGenTime) accumulated_time()->start();
1766 
1767     // Let the size policy know we're starting
1768     size_policy->major_collection_begin();
1769 
1770     CodeCache::gc_prologue();
1771 
1772 #if defined(COMPILER2) || INCLUDE_JVMCI
1773     DerivedPointerTable::clear();
1774 #endif
1775 
1776     ref_processor()->enable_discovery();
1777     ref_processor()->setup_policy(maximum_heap_compaction);
1778 
1779     bool marked_for_unloading = false;
1780 
1781     marking_start.update();
1782     marking_phase(vmthread_cm, maximum_heap_compaction, &_gc_tracer);
1783 
1784     bool max_on_system_gc = UseMaximumCompactionOnSystemGC
1785       && GCCause::is_user_requested_gc(gc_cause);
1786     summary_phase(vmthread_cm, maximum_heap_compaction || max_on_system_gc);
1787 
1788 #if defined(COMPILER2) || INCLUDE_JVMCI
1789     assert(DerivedPointerTable::is_active(), "Sanity");
1790     DerivedPointerTable::set_active(false);
1791 #endif
1792 
1793     // adjust_roots() updates Universe::_intArrayKlassObj which is
1794     // needed by the compaction for filling holes in the dense prefix.
1795     adjust_roots(vmthread_cm);
1796 
1797     compaction_start.update();
1798     compact();
1799 
1800     // Reset the mark bitmap, summary data, and do other bookkeeping.  Must be
1801     // done before resizing.
1802     post_compact();
1803 
1804     // Let the size policy know we're done
1805     size_policy->major_collection_end(old_gen->used_in_bytes(), gc_cause);
1806 
1807     if (UseAdaptiveSizePolicy) {
1808       log_debug(gc, ergo)("AdaptiveSizeStart: collection: %d ", heap->total_collections());
1809       log_trace(gc, ergo)("old_gen_capacity: " SIZE_FORMAT " young_gen_capacity: " SIZE_FORMAT,
1810                           old_gen->capacity_in_bytes(), young_gen->capacity_in_bytes());
1811 
1812       // Don't check if the size_policy is ready here.  Let
1813       // the size_policy check that internally.
1814       if (UseAdaptiveGenerationSizePolicyAtMajorCollection &&
1815           AdaptiveSizePolicy::should_update_promo_stats(gc_cause)) {
1816         // Swap the survivor spaces if from_space is empty. The
1817         // resize_young_gen() called below is normally used after
1818         // a successful young GC and swapping of survivor spaces;
1819         // otherwise, it will fail to resize the young gen with
1820         // the current implementation.
1821         if (young_gen->from_space()->is_empty()) {
1822           young_gen->from_space()->clear(SpaceDecorator::Mangle);
1823           young_gen->swap_spaces();
1824         }
1825 
1826         // Calculate optimal free space amounts
1827         assert(young_gen->max_size() >
1828           young_gen->from_space()->capacity_in_bytes() +
1829           young_gen->to_space()->capacity_in_bytes(),
1830           "Sizes of space in young gen are out-of-bounds");
1831 
1832         size_t young_live = young_gen->used_in_bytes();
1833         size_t eden_live = young_gen->eden_space()->used_in_bytes();
1834         size_t old_live = old_gen->used_in_bytes();
1835         size_t cur_eden = young_gen->eden_space()->capacity_in_bytes();
1836         size_t max_old_gen_size = old_gen->max_gen_size();
1837         size_t max_eden_size = young_gen->max_size() -
1838           young_gen->from_space()->capacity_in_bytes() -
1839           young_gen->to_space()->capacity_in_bytes();
1840 
1841         // Used for diagnostics
1842         size_policy->clear_generation_free_space_flags();
1843 
1844         size_policy->compute_generations_free_space(young_live,
1845                                                     eden_live,
1846                                                     old_live,
1847                                                     cur_eden,
1848                                                     max_old_gen_size,
1849                                                     max_eden_size,
1850                                                     true /* full gc*/);
1851 
1852         size_policy->check_gc_overhead_limit(young_live,
1853                                              eden_live,
1854                                              max_old_gen_size,
1855                                              max_eden_size,
1856                                              true /* full gc*/,
1857                                              gc_cause,
1858                                              heap->collector_policy());
1859 
1860         size_policy->decay_supplemental_growth(true /* full gc*/);
1861 
1862         heap->resize_old_gen(
1863           size_policy->calculated_old_free_size_in_bytes());
1864 
1865         heap->resize_young_gen(size_policy->calculated_eden_size_in_bytes(),
1866                                size_policy->calculated_survivor_size_in_bytes());
1867       }
1868 
1869       log_debug(gc, ergo)("AdaptiveSizeStop: collection: %d ", heap->total_collections());
1870     }
1871 
1872     if (UsePerfData) {
1873       PSGCAdaptivePolicyCounters* const counters = heap->gc_policy_counters();
1874       counters->update_counters();
1875       counters->update_old_capacity(old_gen->capacity_in_bytes());
1876       counters->update_young_capacity(young_gen->capacity_in_bytes());
1877     }
1878 
1879     heap->resize_all_tlabs();
1880 
1881     // Resize the metaspace capacity after a collection
1882     MetaspaceGC::compute_new_size();
1883 
1884     if (TraceOldGenTime) {
1885       accumulated_time()->stop();
1886     }
1887 
1888     young_gen->print_used_change(pre_gc_values.young_gen_used());
1889     old_gen->print_used_change(pre_gc_values.old_gen_used());
1890     MetaspaceAux::print_metaspace_change(pre_gc_values.metadata_used());
1891 
1892     // Track memory usage and detect low memory
1893     MemoryService::track_memory_usage();
1894     heap->update_counters();
1895     gc_task_manager()->release_idle_workers();
1896 
1897     heap->post_full_gc_dump(&_gc_timer);
1898   }
1899 
1900 #ifdef ASSERT
1901   for (size_t i = 0; i < ParallelGCThreads + 1; ++i) {
1902     ParCompactionManager* const cm =
1903       ParCompactionManager::manager_array(int(i));
1904     assert(cm->marking_stack()->is_empty(),       "should be empty");
1905     assert(ParCompactionManager::region_list(int(i))->is_empty(), "should be empty");
1906   }
1907 #endif // ASSERT
1908 
1909   if (VerifyAfterGC && heap->total_collections() >= VerifyGCStartAt) {
1910     HandleMark hm;  // Discard invalid handles created during verification
1911     Universe::verify("After GC");
1912   }
1913 
1914   // Re-verify object start arrays
1915   if (VerifyObjectStartArray &&
1916       VerifyAfterGC) {
1917     old_gen->verify_object_start_array();
1918   }
1919 
1920   if (ZapUnusedHeapArea) {
1921     old_gen->object_space()->check_mangled_unused_area_complete();
1922   }
1923 
1924   NOT_PRODUCT(ref_processor()->verify_no_references_recorded());
1925 
1926   collection_exit.update();
1927 
1928   heap->print_heap_after_gc();
1929   heap->trace_heap_after_gc(&_gc_tracer);
1930 
1931   log_debug(gc, task, time)("VM-Thread " JLONG_FORMAT " " JLONG_FORMAT " " JLONG_FORMAT,
1932                          marking_start.ticks(), compaction_start.ticks(),
1933                          collection_exit.ticks());
1934   gc_task_manager()->print_task_time_stamps();
1935 
1936 #ifdef TRACESPINNING
1937   ParallelTaskTerminator::print_termination_counts();
1938 #endif
1939 
1940   AdaptiveSizePolicyOutput::print(size_policy, heap->total_collections());
1941 
1942   _gc_timer.register_gc_end();
1943 
1944   _gc_tracer.report_dense_prefix(dense_prefix(old_space_id));
1945   _gc_tracer.report_gc_end(_gc_timer.gc_end(), _gc_timer.time_partitions());
1946 
1947   return true;
1948 }
1949 
1950 bool PSParallelCompact::absorb_live_data_from_eden(PSAdaptiveSizePolicy* size_policy,
1951                                              PSYoungGen* young_gen,
1952                                              PSOldGen* old_gen) {
1953   MutableSpace* const eden_space = young_gen->eden_space();
1954   assert(!eden_space->is_empty(), "eden must be non-empty");
1955   assert(young_gen->virtual_space()->alignment() ==
1956          old_gen->virtual_space()->alignment(), "alignments do not match");
1957 
1958   if (!(UseAdaptiveSizePolicy && UseAdaptiveGCBoundary)) {
1959     return false;
1960   }
1961 
1962   // Both generations must be completely committed.
1963   if (young_gen->virtual_space()->uncommitted_size() != 0) {
1964     return false;
1965   }
1966   if (old_gen->virtual_space()->uncommitted_size() != 0) {
1967     return false;
1968   }
1969 
1970   // Figure out how much to take from eden.  Include the average amount promoted
1971   // in the total; otherwise the next young gen GC will simply bail out to a
1972   // full GC.
1973   const size_t alignment = old_gen->virtual_space()->alignment();
1974   const size_t eden_used = eden_space->used_in_bytes();
1975   const size_t promoted = (size_t)size_policy->avg_promoted()->padded_average();
1976   const size_t absorb_size = align_size_up(eden_used + promoted, alignment);
1977   const size_t eden_capacity = eden_space->capacity_in_bytes();
1978 
1979   if (absorb_size >= eden_capacity) {
1980     return false; // Must leave some space in eden.
1981   }
1982 
1983   const size_t new_young_size = young_gen->capacity_in_bytes() - absorb_size;
1984   if (new_young_size < young_gen->min_gen_size()) {
1985     return false; // Respect young gen minimum size.
1986   }
1987 
1988   log_trace(heap, ergo)(" absorbing " SIZE_FORMAT "K:  "
1989                         "eden " SIZE_FORMAT "K->" SIZE_FORMAT "K "
1990                         "from " SIZE_FORMAT "K, to " SIZE_FORMAT "K "
1991                         "young_gen " SIZE_FORMAT "K->" SIZE_FORMAT "K ",
1992                         absorb_size / K,
1993                         eden_capacity / K, (eden_capacity - absorb_size) / K,
1994                         young_gen->from_space()->used_in_bytes() / K,
1995                         young_gen->to_space()->used_in_bytes() / K,
1996                         young_gen->capacity_in_bytes() / K, new_young_size / K);
1997 
1998   // Fill the unused part of the old gen.
1999   MutableSpace* const old_space = old_gen->object_space();
2000   HeapWord* const unused_start = old_space->top();
2001   size_t const unused_words = pointer_delta(old_space->end(), unused_start);
2002 
2003   if (unused_words > 0) {
2004     if (unused_words < CollectedHeap::min_fill_size()) {
2005       return false;  // If the old gen cannot be filled, must give up.
2006     }
2007     CollectedHeap::fill_with_objects(unused_start, unused_words);
2008   }
2009 
2010   // Take the live data from eden and set both top and end in the old gen to
2011   // eden top.  (Need to set end because reset_after_change() mangles the region
2012   // from end to virtual_space->high() in debug builds).
2013   HeapWord* const new_top = eden_space->top();
2014   old_gen->virtual_space()->expand_into(young_gen->virtual_space(),
2015                                         absorb_size);
2016   young_gen->reset_after_change();
2017   old_space->set_top(new_top);
2018   old_space->set_end(new_top);
2019   old_gen->reset_after_change();
2020 
2021   // Update the object start array for the filler object and the data from eden.
2022   ObjectStartArray* const start_array = old_gen->start_array();
2023   for (HeapWord* p = unused_start; p < new_top; p += oop(p)->size()) {
2024     start_array->allocate_block(p);
2025   }
2026 
2027   // Could update the promoted average here, but it is not typically updated at
2028   // full GCs and the value to use is unclear.  Something like
2029   //
2030   // cur_promoted_avg + absorb_size / number_of_scavenges_since_last_full_gc.
2031 
2032   size_policy->set_bytes_absorbed_from_eden(absorb_size);
2033   return true;
2034 }
2035 
2036 GCTaskManager* const PSParallelCompact::gc_task_manager() {
2037   assert(ParallelScavengeHeap::gc_task_manager() != NULL,
2038     "shouldn't return NULL");
2039   return ParallelScavengeHeap::gc_task_manager();
2040 }
2041 
2042 void PSParallelCompact::marking_phase(ParCompactionManager* cm,
2043                                       bool maximum_heap_compaction,
2044                                       ParallelOldTracer *gc_tracer) {
2045   // Recursively traverse all live objects and mark them
2046   GCTraceTime(Info, gc, phases) tm("Marking Phase", &_gc_timer);
2047 
2048   ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
2049   uint parallel_gc_threads = heap->gc_task_manager()->workers();
2050   uint active_gc_threads = heap->gc_task_manager()->active_workers();
2051   TaskQueueSetSuper* qset = ParCompactionManager::region_array();
2052   ParallelTaskTerminator terminator(active_gc_threads, qset);
2053 
2054   ParCompactionManager::MarkAndPushClosure mark_and_push_closure(cm);
2055   ParCompactionManager::FollowStackClosure follow_stack_closure(cm);
2056 
2057   // Need new claim bits before marking starts.
2058   ClassLoaderDataGraph::clear_claimed_marks();
2059 
2060   {
2061     GCTraceTime(Debug, gc, phases) tm("Par Mark", &_gc_timer);
2062 
2063     ParallelScavengeHeap::ParStrongRootsScope psrs;
2064 
2065     GCTaskQueue* q = GCTaskQueue::create();
2066 
2067     q->enqueue(new MarkFromRootsTask(MarkFromRootsTask::universe));
2068     q->enqueue(new MarkFromRootsTask(MarkFromRootsTask::jni_handles));
2069     // We scan the thread roots in parallel
2070     Threads::create_thread_roots_marking_tasks(q);
2071     q->enqueue(new MarkFromRootsTask(MarkFromRootsTask::object_synchronizer));
2072     q->enqueue(new MarkFromRootsTask(MarkFromRootsTask::flat_profiler));
2073     q->enqueue(new MarkFromRootsTask(MarkFromRootsTask::management));
2074     q->enqueue(new MarkFromRootsTask(MarkFromRootsTask::system_dictionary));
2075     q->enqueue(new MarkFromRootsTask(MarkFromRootsTask::class_loader_data));
2076     q->enqueue(new MarkFromRootsTask(MarkFromRootsTask::jvmti));
2077     q->enqueue(new MarkFromRootsTask(MarkFromRootsTask::code_cache));
2078 
2079     if (active_gc_threads > 1) {
2080       for (uint j = 0; j < active_gc_threads; j++) {
2081         q->enqueue(new StealMarkingTask(&terminator));
2082       }
2083     }
2084 
2085     gc_task_manager()->execute_and_wait(q);
2086   }
2087 
2088   // Process reference objects found during marking
2089   {
2090     GCTraceTime(Debug, gc, phases) tm("Reference Processing", &_gc_timer);
2091 
2092     ReferenceProcessorStats stats;
2093     if (ref_processor()->processing_is_mt()) {
2094       RefProcTaskExecutor task_executor;
2095       stats = ref_processor()->process_discovered_references(
2096         is_alive_closure(), &mark_and_push_closure, &follow_stack_closure,
2097         &task_executor, &_gc_timer);
2098     } else {
2099       stats = ref_processor()->process_discovered_references(
2100         is_alive_closure(), &mark_and_push_closure, &follow_stack_closure, NULL,
2101         &_gc_timer);
2102     }
2103 
2104     gc_tracer->report_gc_reference_stats(stats);
2105   }
2106 
2107   // This is the point where the entire marking should have completed.
2108   assert(cm->marking_stacks_empty(), "Marking should have completed");
2109 
2110   {
2111     GCTraceTime(Debug, gc, phases) tm_m("Class Unloading", &_gc_timer);
2112 
2113     // Follow system dictionary roots and unload classes.
2114     bool purged_class = SystemDictionary::do_unloading(is_alive_closure());
2115 
2116     // Unload nmethods.
2117     CodeCache::do_unloading(is_alive_closure(), purged_class);
2118 
2119     // Prune dead klasses from subklass/sibling/implementor lists.
2120     Klass::clean_weak_klass_links(is_alive_closure());
2121   }
2122 
2123   {
2124     GCTraceTime(Debug, gc, phases) t("Scrub String Table", &_gc_timer);
2125     // Delete entries for dead interned strings.
2126     StringTable::unlink(is_alive_closure());
2127   }
2128 
2129   {
2130     GCTraceTime(Debug, gc, phases) t("Scrub Symbol Table", &_gc_timer);
2131     // Clean up unreferenced symbols in symbol table.
2132     SymbolTable::unlink();
2133   }
2134 
2135   _gc_tracer.report_object_count_after_gc(is_alive_closure());
2136 }
2137 
2138 void PSParallelCompact::adjust_roots(ParCompactionManager* cm) {
2139   // Adjust the pointers to reflect the new locations
2140   GCTraceTime(Info, gc, phases) tm("Adjust Roots", &_gc_timer);
2141 
2142   // Need new claim bits when tracing through and adjusting pointers.
2143   ClassLoaderDataGraph::clear_claimed_marks();
2144 
2145   PSParallelCompact::AdjustPointerClosure oop_closure(cm);
2146   PSParallelCompact::AdjustKlassClosure klass_closure(cm);
2147 
2148   // General strong roots.
2149   Universe::oops_do(&oop_closure);
2150   JNIHandles::oops_do(&oop_closure);   // Global (strong) JNI handles
2151   CLDToOopClosure adjust_from_cld(&oop_closure);
2152   Threads::oops_do(&oop_closure, &adjust_from_cld, NULL);
2153   ObjectSynchronizer::oops_do(&oop_closure);
2154   FlatProfiler::oops_do(&oop_closure);
2155   Management::oops_do(&oop_closure);
2156   JvmtiExport::oops_do(&oop_closure);
2157   SystemDictionary::oops_do(&oop_closure);
2158   ClassLoaderDataGraph::oops_do(&oop_closure, &klass_closure, true);
2159 
2160   // Now adjust pointers in remaining weak roots.  (All of which should
2161   // have been cleared if they pointed to non-surviving objects.)
2162   // Global (weak) JNI handles
2163   JNIHandles::weak_oops_do(&oop_closure);
2164 
2165   CodeBlobToOopClosure adjust_from_blobs(&oop_closure, CodeBlobToOopClosure::FixRelocations);
2166   CodeCache::blobs_do(&adjust_from_blobs);
2167   StringTable::oops_do(&oop_closure);
2168   ref_processor()->weak_oops_do(&oop_closure);
2169   // Roots were visited so references into the young gen in roots
2170   // may have been scanned.  Process them also.
2171   // Should the reference processor have a span that excludes
2172   // young gen objects?
2173   PSScavenge::reference_processor()->weak_oops_do(&oop_closure);
2174 }
2175 
2176 // Helper class to print 8 region numbers per line and then print the total at the end.
2177 class FillableRegionLogger : public StackObj {
2178 private:
2179   Log(gc, compaction) log;
2180   static const int LineLength = 8;
2181   size_t _regions[LineLength];
2182   int _next_index;
2183   bool _enabled;
2184   size_t _total_regions;
2185 public:
2186   FillableRegionLogger() : _next_index(0), _total_regions(0), _enabled(log_develop_is_enabled(Trace, gc, compaction)) { }
2187   ~FillableRegionLogger() {
2188     log.trace(SIZE_FORMAT " initially fillable regions", _total_regions);
2189   }
2190 
2191   void print_line() {
2192     if (!_enabled || _next_index == 0) {
2193       return;
2194     }
2195     FormatBuffer<> line("Fillable: ");
2196     for (int i = 0; i < _next_index; i++) {
2197       line.append(" " SIZE_FORMAT_W(7), _regions[i]);
2198     }
2199     log.trace("%s", line.buffer());
2200     _next_index = 0;
2201   }
2202 
2203   void handle(size_t region) {
2204     if (!_enabled) {
2205       return;
2206     }
2207     _regions[_next_index++] = region;
2208     if (_next_index == LineLength) {
2209       print_line();
2210     }
2211     _total_regions++;
2212   }
2213 };
2214 
2215 void PSParallelCompact::enqueue_region_draining_tasks(GCTaskQueue* q,
2216                                                       uint parallel_gc_threads)
2217 {
2218   GCTraceTime(Trace, gc, phases) tm("Drain Task Setup", &_gc_timer);
2219 
2220   // Find the threads that are active
2221   unsigned int which = 0;
2222 
2223   const uint task_count = MAX2(parallel_gc_threads, 1U);
2224   for (uint j = 0; j < task_count; j++) {
2225     q->enqueue(new DrainStacksCompactionTask(j));
2226     ParCompactionManager::verify_region_list_empty(j);
2227     // Set the region stacks variables to "no" region stack values
2228     // so that they will be recognized and needing a region stack
2229     // in the stealing tasks if they do not get one by executing
2230     // a draining stack.
2231     ParCompactionManager* cm = ParCompactionManager::manager_array(j);
2232     cm->set_region_stack(NULL);
2233     cm->set_region_stack_index((uint)max_uintx);
2234   }
2235   ParCompactionManager::reset_recycled_stack_index();
2236 
2237   // Find all regions that are available (can be filled immediately) and
2238   // distribute them to the thread stacks.  The iteration is done in reverse
2239   // order (high to low) so the regions will be removed in ascending order.
2240 
2241   const ParallelCompactData& sd = PSParallelCompact::summary_data();
2242 
2243   // A region index which corresponds to the tasks created above.
2244   // "which" must be 0 <= which < task_count
2245 
2246   which = 0;
2247   // id + 1 is used to test termination so unsigned  can
2248   // be used with an old_space_id == 0.
2249   FillableRegionLogger region_logger;
2250   for (unsigned int id = to_space_id; id + 1 > old_space_id; --id) {
2251     SpaceInfo* const space_info = _space_info + id;
2252     MutableSpace* const space = space_info->space();
2253     HeapWord* const new_top = space_info->new_top();
2254 
2255     const size_t beg_region = sd.addr_to_region_idx(space_info->dense_prefix());
2256     const size_t end_region =
2257       sd.addr_to_region_idx(sd.region_align_up(new_top));
2258 
2259     for (size_t cur = end_region - 1; cur + 1 > beg_region; --cur) {
2260       if (sd.region(cur)->claim_unsafe()) {
2261         ParCompactionManager::region_list_push(which, cur);
2262         region_logger.handle(cur);
2263         // Assign regions to tasks in round-robin fashion.
2264         if (++which == task_count) {
2265           assert(which <= parallel_gc_threads,
2266             "Inconsistent number of workers");
2267           which = 0;
2268         }
2269       }
2270     }
2271     region_logger.print_line();
2272   }
2273 }
2274 
2275 #define PAR_OLD_DENSE_PREFIX_OVER_PARTITIONING 4
2276 
2277 void PSParallelCompact::enqueue_dense_prefix_tasks(GCTaskQueue* q,
2278                                                     uint parallel_gc_threads) {
2279   GCTraceTime(Trace, gc, phases) tm("Dense Prefix Task Setup", &_gc_timer);
2280 
2281   ParallelCompactData& sd = PSParallelCompact::summary_data();
2282 
2283   // Iterate over all the spaces adding tasks for updating
2284   // regions in the dense prefix.  Assume that 1 gc thread
2285   // will work on opening the gaps and the remaining gc threads
2286   // will work on the dense prefix.
2287   unsigned int space_id;
2288   for (space_id = old_space_id; space_id < last_space_id; ++ space_id) {
2289     HeapWord* const dense_prefix_end = _space_info[space_id].dense_prefix();
2290     const MutableSpace* const space = _space_info[space_id].space();
2291 
2292     if (dense_prefix_end == space->bottom()) {
2293       // There is no dense prefix for this space.
2294       continue;
2295     }
2296 
2297     // The dense prefix is before this region.
2298     size_t region_index_end_dense_prefix =
2299         sd.addr_to_region_idx(dense_prefix_end);
2300     RegionData* const dense_prefix_cp =
2301       sd.region(region_index_end_dense_prefix);
2302     assert(dense_prefix_end == space->end() ||
2303            dense_prefix_cp->available() ||
2304            dense_prefix_cp->claimed(),
2305            "The region after the dense prefix should always be ready to fill");
2306 
2307     size_t region_index_start = sd.addr_to_region_idx(space->bottom());
2308 
2309     // Is there dense prefix work?
2310     size_t total_dense_prefix_regions =
2311       region_index_end_dense_prefix - region_index_start;
2312     // How many regions of the dense prefix should be given to
2313     // each thread?
2314     if (total_dense_prefix_regions > 0) {
2315       uint tasks_for_dense_prefix = 1;
2316       if (total_dense_prefix_regions <=
2317           (parallel_gc_threads * PAR_OLD_DENSE_PREFIX_OVER_PARTITIONING)) {
2318         // Don't over partition.  This assumes that
2319         // PAR_OLD_DENSE_PREFIX_OVER_PARTITIONING is a small integer value
2320         // so there are not many regions to process.
2321         tasks_for_dense_prefix = parallel_gc_threads;
2322       } else {
2323         // Over partition
2324         tasks_for_dense_prefix = parallel_gc_threads *
2325           PAR_OLD_DENSE_PREFIX_OVER_PARTITIONING;
2326       }
2327       size_t regions_per_thread = total_dense_prefix_regions /
2328         tasks_for_dense_prefix;
2329       // Give each thread at least 1 region.
2330       if (regions_per_thread == 0) {
2331         regions_per_thread = 1;
2332       }
2333 
2334       for (uint k = 0; k < tasks_for_dense_prefix; k++) {
2335         if (region_index_start >= region_index_end_dense_prefix) {
2336           break;
2337         }
2338         // region_index_end is not processed
2339         size_t region_index_end = MIN2(region_index_start + regions_per_thread,
2340                                        region_index_end_dense_prefix);
2341         q->enqueue(new UpdateDensePrefixTask(SpaceId(space_id),
2342                                              region_index_start,
2343                                              region_index_end));
2344         region_index_start = region_index_end;
2345       }
2346     }
2347     // This gets any part of the dense prefix that did not
2348     // fit evenly.
2349     if (region_index_start < region_index_end_dense_prefix) {
2350       q->enqueue(new UpdateDensePrefixTask(SpaceId(space_id),
2351                                            region_index_start,
2352                                            region_index_end_dense_prefix));
2353     }
2354   }
2355 }
2356 
2357 void PSParallelCompact::enqueue_region_stealing_tasks(
2358                                      GCTaskQueue* q,
2359                                      ParallelTaskTerminator* terminator_ptr,
2360                                      uint parallel_gc_threads) {
2361   GCTraceTime(Trace, gc, phases) tm("Steal Task Setup", &_gc_timer);
2362 
2363   // Once a thread has drained it's stack, it should try to steal regions from
2364   // other threads.
2365   if (parallel_gc_threads > 1) {
2366     for (uint j = 0; j < parallel_gc_threads; j++) {
2367       q->enqueue(new StealRegionCompactionTask(terminator_ptr));
2368     }
2369   }
2370 }
2371 
2372 #ifdef ASSERT
2373 // Write a histogram of the number of times the block table was filled for a
2374 // region.
2375 void PSParallelCompact::write_block_fill_histogram()
2376 {
2377   if (!log_develop_is_enabled(Trace, gc, compaction)) {
2378     return;
2379   }
2380 
2381   Log(gc, compaction) log;
2382   ResourceMark rm;
2383   outputStream* out = log.trace_stream();
2384 
2385   typedef ParallelCompactData::RegionData rd_t;
2386   ParallelCompactData& sd = summary_data();
2387 
2388   for (unsigned int id = old_space_id; id < last_space_id; ++id) {
2389     MutableSpace* const spc = _space_info[id].space();
2390     if (spc->bottom() != spc->top()) {
2391       const rd_t* const beg = sd.addr_to_region_ptr(spc->bottom());
2392       HeapWord* const top_aligned_up = sd.region_align_up(spc->top());
2393       const rd_t* const end = sd.addr_to_region_ptr(top_aligned_up);
2394 
2395       size_t histo[5] = { 0, 0, 0, 0, 0 };
2396       const size_t histo_len = sizeof(histo) / sizeof(size_t);
2397       const size_t region_cnt = pointer_delta(end, beg, sizeof(rd_t));
2398 
2399       for (const rd_t* cur = beg; cur < end; ++cur) {
2400         ++histo[MIN2(cur->blocks_filled_count(), histo_len - 1)];
2401       }
2402       out->print("Block fill histogram: %u %-4s" SIZE_FORMAT_W(5), id, space_names[id], region_cnt);
2403       for (size_t i = 0; i < histo_len; ++i) {
2404         out->print(" " SIZE_FORMAT_W(5) " %5.1f%%",
2405                    histo[i], 100.0 * histo[i] / region_cnt);
2406       }
2407       out->cr();
2408     }
2409   }
2410 }
2411 #endif // #ifdef ASSERT
2412 
2413 void PSParallelCompact::compact() {
2414   GCTraceTime(Info, gc, phases) tm("Compaction Phase", &_gc_timer);
2415 
2416   ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
2417   PSOldGen* old_gen = heap->old_gen();
2418   old_gen->start_array()->reset();
2419   uint parallel_gc_threads = heap->gc_task_manager()->workers();
2420   uint active_gc_threads = heap->gc_task_manager()->active_workers();
2421   TaskQueueSetSuper* qset = ParCompactionManager::region_array();
2422   ParallelTaskTerminator terminator(active_gc_threads, qset);
2423 
2424   GCTaskQueue* q = GCTaskQueue::create();
2425   enqueue_region_draining_tasks(q, active_gc_threads);
2426   enqueue_dense_prefix_tasks(q, active_gc_threads);
2427   enqueue_region_stealing_tasks(q, &terminator, active_gc_threads);
2428 
2429   {
2430     GCTraceTime(Trace, gc, phases) tm("Par Compact", &_gc_timer);
2431 
2432     gc_task_manager()->execute_and_wait(q);
2433 
2434 #ifdef  ASSERT
2435     // Verify that all regions have been processed before the deferred updates.
2436     for (unsigned int id = old_space_id; id < last_space_id; ++id) {
2437       verify_complete(SpaceId(id));
2438     }
2439 #endif
2440   }
2441 
2442   {
2443     // Update the deferred objects, if any.  Any compaction manager can be used.
2444     GCTraceTime(Trace, gc, phases) tm("Deferred Updates", &_gc_timer);
2445     ParCompactionManager* cm = ParCompactionManager::manager_array(0);
2446     for (unsigned int id = old_space_id; id < last_space_id; ++id) {
2447       update_deferred_objects(cm, SpaceId(id));
2448     }
2449   }
2450 
2451   DEBUG_ONLY(write_block_fill_histogram());
2452 }
2453 
2454 #ifdef  ASSERT
2455 void PSParallelCompact::verify_complete(SpaceId space_id) {
2456   // All Regions between space bottom() to new_top() should be marked as filled
2457   // and all Regions between new_top() and top() should be available (i.e.,
2458   // should have been emptied).
2459   ParallelCompactData& sd = summary_data();
2460   SpaceInfo si = _space_info[space_id];
2461   HeapWord* new_top_addr = sd.region_align_up(si.new_top());
2462   HeapWord* old_top_addr = sd.region_align_up(si.space()->top());
2463   const size_t beg_region = sd.addr_to_region_idx(si.space()->bottom());
2464   const size_t new_top_region = sd.addr_to_region_idx(new_top_addr);
2465   const size_t old_top_region = sd.addr_to_region_idx(old_top_addr);
2466 
2467   bool issued_a_warning = false;
2468 
2469   size_t cur_region;
2470   for (cur_region = beg_region; cur_region < new_top_region; ++cur_region) {
2471     const RegionData* const c = sd.region(cur_region);
2472     if (!c->completed()) {
2473       log_warning(gc)("region " SIZE_FORMAT " not filled: destination_count=%u",
2474                       cur_region, c->destination_count());
2475       issued_a_warning = true;
2476     }
2477   }
2478 
2479   for (cur_region = new_top_region; cur_region < old_top_region; ++cur_region) {
2480     const RegionData* const c = sd.region(cur_region);
2481     if (!c->available()) {
2482       log_warning(gc)("region " SIZE_FORMAT " not empty: destination_count=%u",
2483                       cur_region, c->destination_count());
2484       issued_a_warning = true;
2485     }
2486   }
2487 
2488   if (issued_a_warning) {
2489     print_region_ranges();
2490   }
2491 }
2492 #endif  // #ifdef ASSERT
2493 
2494 inline void UpdateOnlyClosure::do_addr(HeapWord* addr) {
2495   _start_array->allocate_block(addr);
2496   compaction_manager()->update_contents(oop(addr));
2497 }
2498 
2499 // Update interior oops in the ranges of regions [beg_region, end_region).
2500 void
2501 PSParallelCompact::update_and_deadwood_in_dense_prefix(ParCompactionManager* cm,
2502                                                        SpaceId space_id,
2503                                                        size_t beg_region,
2504                                                        size_t end_region) {
2505   ParallelCompactData& sd = summary_data();
2506   ParMarkBitMap* const mbm = mark_bitmap();
2507 
2508   HeapWord* beg_addr = sd.region_to_addr(beg_region);
2509   HeapWord* const end_addr = sd.region_to_addr(end_region);
2510   assert(beg_region <= end_region, "bad region range");
2511   assert(end_addr <= dense_prefix(space_id), "not in the dense prefix");
2512 
2513 #ifdef  ASSERT
2514   // Claim the regions to avoid triggering an assert when they are marked as
2515   // filled.
2516   for (size_t claim_region = beg_region; claim_region < end_region; ++claim_region) {
2517     assert(sd.region(claim_region)->claim_unsafe(), "claim() failed");
2518   }
2519 #endif  // #ifdef ASSERT
2520 
2521   if (beg_addr != space(space_id)->bottom()) {
2522     // Find the first live object or block of dead space that *starts* in this
2523     // range of regions.  If a partial object crosses onto the region, skip it;
2524     // it will be marked for 'deferred update' when the object head is
2525     // processed.  If dead space crosses onto the region, it is also skipped; it
2526     // will be filled when the prior region is processed.  If neither of those
2527     // apply, the first word in the region is the start of a live object or dead
2528     // space.
2529     assert(beg_addr > space(space_id)->bottom(), "sanity");
2530     const RegionData* const cp = sd.region(beg_region);
2531     if (cp->partial_obj_size() != 0) {
2532       beg_addr = sd.partial_obj_end(beg_region);
2533     } else if (dead_space_crosses_boundary(cp, mbm->addr_to_bit(beg_addr))) {
2534       beg_addr = mbm->find_obj_beg(beg_addr, end_addr);
2535     }
2536   }
2537 
2538   if (beg_addr < end_addr) {
2539     // A live object or block of dead space starts in this range of Regions.
2540      HeapWord* const dense_prefix_end = dense_prefix(space_id);
2541 
2542     // Create closures and iterate.
2543     UpdateOnlyClosure update_closure(mbm, cm, space_id);
2544     FillClosure fill_closure(cm, space_id);
2545     ParMarkBitMap::IterationStatus status;
2546     status = mbm->iterate(&update_closure, &fill_closure, beg_addr, end_addr,
2547                           dense_prefix_end);
2548     if (status == ParMarkBitMap::incomplete) {
2549       update_closure.do_addr(update_closure.source());
2550     }
2551   }
2552 
2553   // Mark the regions as filled.
2554   RegionData* const beg_cp = sd.region(beg_region);
2555   RegionData* const end_cp = sd.region(end_region);
2556   for (RegionData* cp = beg_cp; cp < end_cp; ++cp) {
2557     cp->set_completed();
2558   }
2559 }
2560 
2561 // Return the SpaceId for the space containing addr.  If addr is not in the
2562 // heap, last_space_id is returned.  In debug mode it expects the address to be
2563 // in the heap and asserts such.
2564 PSParallelCompact::SpaceId PSParallelCompact::space_id(HeapWord* addr) {
2565   assert(ParallelScavengeHeap::heap()->is_in_reserved(addr), "addr not in the heap");
2566 
2567   for (unsigned int id = old_space_id; id < last_space_id; ++id) {
2568     if (_space_info[id].space()->contains(addr)) {
2569       return SpaceId(id);
2570     }
2571   }
2572 
2573   assert(false, "no space contains the addr");
2574   return last_space_id;
2575 }
2576 
2577 void PSParallelCompact::update_deferred_objects(ParCompactionManager* cm,
2578                                                 SpaceId id) {
2579   assert(id < last_space_id, "bad space id");
2580 
2581   ParallelCompactData& sd = summary_data();
2582   const SpaceInfo* const space_info = _space_info + id;
2583   ObjectStartArray* const start_array = space_info->start_array();
2584 
2585   const MutableSpace* const space = space_info->space();
2586   assert(space_info->dense_prefix() >= space->bottom(), "dense_prefix not set");
2587   HeapWord* const beg_addr = space_info->dense_prefix();
2588   HeapWord* const end_addr = sd.region_align_up(space_info->new_top());
2589 
2590   const RegionData* const beg_region = sd.addr_to_region_ptr(beg_addr);
2591   const RegionData* const end_region = sd.addr_to_region_ptr(end_addr);
2592   const RegionData* cur_region;
2593   for (cur_region = beg_region; cur_region < end_region; ++cur_region) {
2594     HeapWord* const addr = cur_region->deferred_obj_addr();
2595     if (addr != NULL) {
2596       if (start_array != NULL) {
2597         start_array->allocate_block(addr);
2598       }
2599       cm->update_contents(oop(addr));
2600       assert(oop(addr)->is_oop_or_null(), "Expected an oop or NULL at " PTR_FORMAT, p2i(oop(addr)));
2601     }
2602   }
2603 }
2604 
2605 // Skip over count live words starting from beg, and return the address of the
2606 // next live word.  Unless marked, the word corresponding to beg is assumed to
2607 // be dead.  Callers must either ensure beg does not correspond to the middle of
2608 // an object, or account for those live words in some other way.  Callers must
2609 // also ensure that there are enough live words in the range [beg, end) to skip.
2610 HeapWord*
2611 PSParallelCompact::skip_live_words(HeapWord* beg, HeapWord* end, size_t count)
2612 {
2613   assert(count > 0, "sanity");
2614 
2615   ParMarkBitMap* m = mark_bitmap();
2616   idx_t bits_to_skip = m->words_to_bits(count);
2617   idx_t cur_beg = m->addr_to_bit(beg);
2618   const idx_t search_end = BitMap::word_align_up(m->addr_to_bit(end));
2619 
2620   do {
2621     cur_beg = m->find_obj_beg(cur_beg, search_end);
2622     idx_t cur_end = m->find_obj_end(cur_beg, search_end);
2623     const size_t obj_bits = cur_end - cur_beg + 1;
2624     if (obj_bits > bits_to_skip) {
2625       return m->bit_to_addr(cur_beg + bits_to_skip);
2626     }
2627     bits_to_skip -= obj_bits;
2628     cur_beg = cur_end + 1;
2629   } while (bits_to_skip > 0);
2630 
2631   // Skipping the desired number of words landed just past the end of an object.
2632   // Find the start of the next object.
2633   cur_beg = m->find_obj_beg(cur_beg, search_end);
2634   assert(cur_beg < m->addr_to_bit(end), "not enough live words to skip");
2635   return m->bit_to_addr(cur_beg);
2636 }
2637 
2638 HeapWord* PSParallelCompact::first_src_addr(HeapWord* const dest_addr,
2639                                             SpaceId src_space_id,
2640                                             size_t src_region_idx)
2641 {
2642   assert(summary_data().is_region_aligned(dest_addr), "not aligned");
2643 
2644   const SplitInfo& split_info = _space_info[src_space_id].split_info();
2645   if (split_info.dest_region_addr() == dest_addr) {
2646     // The partial object ending at the split point contains the first word to
2647     // be copied to dest_addr.
2648     return split_info.first_src_addr();
2649   }
2650 
2651   const ParallelCompactData& sd = summary_data();
2652   ParMarkBitMap* const bitmap = mark_bitmap();
2653   const size_t RegionSize = ParallelCompactData::RegionSize;
2654 
2655   assert(sd.is_region_aligned(dest_addr), "not aligned");
2656   const RegionData* const src_region_ptr = sd.region(src_region_idx);
2657   const size_t partial_obj_size = src_region_ptr->partial_obj_size();
2658   HeapWord* const src_region_destination = src_region_ptr->destination();
2659 
2660   assert(dest_addr >= src_region_destination, "wrong src region");
2661   assert(src_region_ptr->data_size() > 0, "src region cannot be empty");
2662 
2663   HeapWord* const src_region_beg = sd.region_to_addr(src_region_idx);
2664   HeapWord* const src_region_end = src_region_beg + RegionSize;
2665 
2666   HeapWord* addr = src_region_beg;
2667   if (dest_addr == src_region_destination) {
2668     // Return the first live word in the source region.
2669     if (partial_obj_size == 0) {
2670       addr = bitmap->find_obj_beg(addr, src_region_end);
2671       assert(addr < src_region_end, "no objects start in src region");
2672     }
2673     return addr;
2674   }
2675 
2676   // Must skip some live data.
2677   size_t words_to_skip = dest_addr - src_region_destination;
2678   assert(src_region_ptr->data_size() > words_to_skip, "wrong src region");
2679 
2680   if (partial_obj_size >= words_to_skip) {
2681     // All the live words to skip are part of the partial object.
2682     addr += words_to_skip;
2683     if (partial_obj_size == words_to_skip) {
2684       // Find the first live word past the partial object.
2685       addr = bitmap->find_obj_beg(addr, src_region_end);
2686       assert(addr < src_region_end, "wrong src region");
2687     }
2688     return addr;
2689   }
2690 
2691   // Skip over the partial object (if any).
2692   if (partial_obj_size != 0) {
2693     words_to_skip -= partial_obj_size;
2694     addr += partial_obj_size;
2695   }
2696 
2697   // Skip over live words due to objects that start in the region.
2698   addr = skip_live_words(addr, src_region_end, words_to_skip);
2699   assert(addr < src_region_end, "wrong src region");
2700   return addr;
2701 }
2702 
2703 void PSParallelCompact::decrement_destination_counts(ParCompactionManager* cm,
2704                                                      SpaceId src_space_id,
2705                                                      size_t beg_region,
2706                                                      HeapWord* end_addr)
2707 {
2708   ParallelCompactData& sd = summary_data();
2709 
2710 #ifdef ASSERT
2711   MutableSpace* const src_space = _space_info[src_space_id].space();
2712   HeapWord* const beg_addr = sd.region_to_addr(beg_region);
2713   assert(src_space->contains(beg_addr) || beg_addr == src_space->end(),
2714          "src_space_id does not match beg_addr");
2715   assert(src_space->contains(end_addr) || end_addr == src_space->end(),
2716          "src_space_id does not match end_addr");
2717 #endif // #ifdef ASSERT
2718 
2719   RegionData* const beg = sd.region(beg_region);
2720   RegionData* const end = sd.addr_to_region_ptr(sd.region_align_up(end_addr));
2721 
2722   // Regions up to new_top() are enqueued if they become available.
2723   HeapWord* const new_top = _space_info[src_space_id].new_top();
2724   RegionData* const enqueue_end =
2725     sd.addr_to_region_ptr(sd.region_align_up(new_top));
2726 
2727   for (RegionData* cur = beg; cur < end; ++cur) {
2728     assert(cur->data_size() > 0, "region must have live data");
2729     cur->decrement_destination_count();
2730     if (cur < enqueue_end && cur->available() && cur->claim()) {
2731       cm->push_region(sd.region(cur));
2732     }
2733   }
2734 }
2735 
2736 size_t PSParallelCompact::next_src_region(MoveAndUpdateClosure& closure,
2737                                           SpaceId& src_space_id,
2738                                           HeapWord*& src_space_top,
2739                                           HeapWord* end_addr)
2740 {
2741   typedef ParallelCompactData::RegionData RegionData;
2742 
2743   ParallelCompactData& sd = PSParallelCompact::summary_data();
2744   const size_t region_size = ParallelCompactData::RegionSize;
2745 
2746   size_t src_region_idx = 0;
2747 
2748   // Skip empty regions (if any) up to the top of the space.
2749   HeapWord* const src_aligned_up = sd.region_align_up(end_addr);
2750   RegionData* src_region_ptr = sd.addr_to_region_ptr(src_aligned_up);
2751   HeapWord* const top_aligned_up = sd.region_align_up(src_space_top);
2752   const RegionData* const top_region_ptr =
2753     sd.addr_to_region_ptr(top_aligned_up);
2754   while (src_region_ptr < top_region_ptr && src_region_ptr->data_size() == 0) {
2755     ++src_region_ptr;
2756   }
2757 
2758   if (src_region_ptr < top_region_ptr) {
2759     // The next source region is in the current space.  Update src_region_idx
2760     // and the source address to match src_region_ptr.
2761     src_region_idx = sd.region(src_region_ptr);
2762     HeapWord* const src_region_addr = sd.region_to_addr(src_region_idx);
2763     if (src_region_addr > closure.source()) {
2764       closure.set_source(src_region_addr);
2765     }
2766     return src_region_idx;
2767   }
2768 
2769   // Switch to a new source space and find the first non-empty region.
2770   unsigned int space_id = src_space_id + 1;
2771   assert(space_id < last_space_id, "not enough spaces");
2772 
2773   HeapWord* const destination = closure.destination();
2774 
2775   do {
2776     MutableSpace* space = _space_info[space_id].space();
2777     HeapWord* const bottom = space->bottom();
2778     const RegionData* const bottom_cp = sd.addr_to_region_ptr(bottom);
2779 
2780     // Iterate over the spaces that do not compact into themselves.
2781     if (bottom_cp->destination() != bottom) {
2782       HeapWord* const top_aligned_up = sd.region_align_up(space->top());
2783       const RegionData* const top_cp = sd.addr_to_region_ptr(top_aligned_up);
2784 
2785       for (const RegionData* src_cp = bottom_cp; src_cp < top_cp; ++src_cp) {
2786         if (src_cp->live_obj_size() > 0) {
2787           // Found it.
2788           assert(src_cp->destination() == destination,
2789                  "first live obj in the space must match the destination");
2790           assert(src_cp->partial_obj_size() == 0,
2791                  "a space cannot begin with a partial obj");
2792 
2793           src_space_id = SpaceId(space_id);
2794           src_space_top = space->top();
2795           const size_t src_region_idx = sd.region(src_cp);
2796           closure.set_source(sd.region_to_addr(src_region_idx));
2797           return src_region_idx;
2798         } else {
2799           assert(src_cp->data_size() == 0, "sanity");
2800         }
2801       }
2802     }
2803   } while (++space_id < last_space_id);
2804 
2805   assert(false, "no source region was found");
2806   return 0;
2807 }
2808 
2809 void PSParallelCompact::fill_region(ParCompactionManager* cm, size_t region_idx)
2810 {
2811   typedef ParMarkBitMap::IterationStatus IterationStatus;
2812   const size_t RegionSize = ParallelCompactData::RegionSize;
2813   ParMarkBitMap* const bitmap = mark_bitmap();
2814   ParallelCompactData& sd = summary_data();
2815   RegionData* const region_ptr = sd.region(region_idx);
2816 
2817   // Get the items needed to construct the closure.
2818   HeapWord* dest_addr = sd.region_to_addr(region_idx);
2819   SpaceId dest_space_id = space_id(dest_addr);
2820   ObjectStartArray* start_array = _space_info[dest_space_id].start_array();
2821   HeapWord* new_top = _space_info[dest_space_id].new_top();
2822   assert(dest_addr < new_top, "sanity");
2823   const size_t words = MIN2(pointer_delta(new_top, dest_addr), RegionSize);
2824 
2825   // Get the source region and related info.
2826   size_t src_region_idx = region_ptr->source_region();
2827   SpaceId src_space_id = space_id(sd.region_to_addr(src_region_idx));
2828   HeapWord* src_space_top = _space_info[src_space_id].space()->top();
2829 
2830   MoveAndUpdateClosure closure(bitmap, cm, start_array, dest_addr, words);
2831   closure.set_source(first_src_addr(dest_addr, src_space_id, src_region_idx));
2832 
2833   // Adjust src_region_idx to prepare for decrementing destination counts (the
2834   // destination count is not decremented when a region is copied to itself).
2835   if (src_region_idx == region_idx) {
2836     src_region_idx += 1;
2837   }
2838 
2839   if (bitmap->is_unmarked(closure.source())) {
2840     // The first source word is in the middle of an object; copy the remainder
2841     // of the object or as much as will fit.  The fact that pointer updates were
2842     // deferred will be noted when the object header is processed.
2843     HeapWord* const old_src_addr = closure.source();
2844     closure.copy_partial_obj();
2845     if (closure.is_full()) {
2846       decrement_destination_counts(cm, src_space_id, src_region_idx,
2847                                    closure.source());
2848       region_ptr->set_deferred_obj_addr(NULL);
2849       region_ptr->set_completed();
2850       return;
2851     }
2852 
2853     HeapWord* const end_addr = sd.region_align_down(closure.source());
2854     if (sd.region_align_down(old_src_addr) != end_addr) {
2855       // The partial object was copied from more than one source region.
2856       decrement_destination_counts(cm, src_space_id, src_region_idx, end_addr);
2857 
2858       // Move to the next source region, possibly switching spaces as well.  All
2859       // args except end_addr may be modified.
2860       src_region_idx = next_src_region(closure, src_space_id, src_space_top,
2861                                        end_addr);
2862     }
2863   }
2864 
2865   do {
2866     HeapWord* const cur_addr = closure.source();
2867     HeapWord* const end_addr = MIN2(sd.region_align_up(cur_addr + 1),
2868                                     src_space_top);
2869     IterationStatus status = bitmap->iterate(&closure, cur_addr, end_addr);
2870 
2871     if (status == ParMarkBitMap::incomplete) {
2872       // The last obj that starts in the source region does not end in the
2873       // region.
2874       assert(closure.source() < end_addr, "sanity");
2875       HeapWord* const obj_beg = closure.source();
2876       HeapWord* const range_end = MIN2(obj_beg + closure.words_remaining(),
2877                                        src_space_top);
2878       HeapWord* const obj_end = bitmap->find_obj_end(obj_beg, range_end);
2879       if (obj_end < range_end) {
2880         // The end was found; the entire object will fit.
2881         status = closure.do_addr(obj_beg, bitmap->obj_size(obj_beg, obj_end));
2882         assert(status != ParMarkBitMap::would_overflow, "sanity");
2883       } else {
2884         // The end was not found; the object will not fit.
2885         assert(range_end < src_space_top, "obj cannot cross space boundary");
2886         status = ParMarkBitMap::would_overflow;
2887       }
2888     }
2889 
2890     if (status == ParMarkBitMap::would_overflow) {
2891       // The last object did not fit.  Note that interior oop updates were
2892       // deferred, then copy enough of the object to fill the region.
2893       region_ptr->set_deferred_obj_addr(closure.destination());
2894       status = closure.copy_until_full(); // copies from closure.source()
2895 
2896       decrement_destination_counts(cm, src_space_id, src_region_idx,
2897                                    closure.source());
2898       region_ptr->set_completed();
2899       return;
2900     }
2901 
2902     if (status == ParMarkBitMap::full) {
2903       decrement_destination_counts(cm, src_space_id, src_region_idx,
2904                                    closure.source());
2905       region_ptr->set_deferred_obj_addr(NULL);
2906       region_ptr->set_completed();
2907       return;
2908     }
2909 
2910     decrement_destination_counts(cm, src_space_id, src_region_idx, end_addr);
2911 
2912     // Move to the next source region, possibly switching spaces as well.  All
2913     // args except end_addr may be modified.
2914     src_region_idx = next_src_region(closure, src_space_id, src_space_top,
2915                                      end_addr);
2916   } while (true);
2917 }
2918 
2919 void PSParallelCompact::fill_blocks(size_t region_idx)
2920 {
2921   // Fill in the block table elements for the specified region.  Each block
2922   // table element holds the number of live words in the region that are to the
2923   // left of the first object that starts in the block.  Thus only blocks in
2924   // which an object starts need to be filled.
2925   //
2926   // The algorithm scans the section of the bitmap that corresponds to the
2927   // region, keeping a running total of the live words.  When an object start is
2928   // found, if it's the first to start in the block that contains it, the
2929   // current total is written to the block table element.
2930   const size_t Log2BlockSize = ParallelCompactData::Log2BlockSize;
2931   const size_t Log2RegionSize = ParallelCompactData::Log2RegionSize;
2932   const size_t RegionSize = ParallelCompactData::RegionSize;
2933 
2934   ParallelCompactData& sd = summary_data();
2935   const size_t partial_obj_size = sd.region(region_idx)->partial_obj_size();
2936   if (partial_obj_size >= RegionSize) {
2937     return; // No objects start in this region.
2938   }
2939 
2940   // Ensure the first loop iteration decides that the block has changed.
2941   size_t cur_block = sd.block_count();
2942 
2943   const ParMarkBitMap* const bitmap = mark_bitmap();
2944 
2945   const size_t Log2BitsPerBlock = Log2BlockSize - LogMinObjAlignment;
2946   assert((size_t)1 << Log2BitsPerBlock ==
2947          bitmap->words_to_bits(ParallelCompactData::BlockSize), "sanity");
2948 
2949   size_t beg_bit = bitmap->words_to_bits(region_idx << Log2RegionSize);
2950   const size_t range_end = beg_bit + bitmap->words_to_bits(RegionSize);
2951   size_t live_bits = bitmap->words_to_bits(partial_obj_size);
2952   beg_bit = bitmap->find_obj_beg(beg_bit + live_bits, range_end);
2953   while (beg_bit < range_end) {
2954     const size_t new_block = beg_bit >> Log2BitsPerBlock;
2955     if (new_block != cur_block) {
2956       cur_block = new_block;
2957       sd.block(cur_block)->set_offset(bitmap->bits_to_words(live_bits));
2958     }
2959 
2960     const size_t end_bit = bitmap->find_obj_end(beg_bit, range_end);
2961     if (end_bit < range_end - 1) {
2962       live_bits += end_bit - beg_bit + 1;
2963       beg_bit = bitmap->find_obj_beg(end_bit + 1, range_end);
2964     } else {
2965       return;
2966     }
2967   }
2968 }
2969 
2970 void
2971 PSParallelCompact::move_and_update(ParCompactionManager* cm, SpaceId space_id) {
2972   const MutableSpace* sp = space(space_id);
2973   if (sp->is_empty()) {
2974     return;
2975   }
2976 
2977   ParallelCompactData& sd = PSParallelCompact::summary_data();
2978   ParMarkBitMap* const bitmap = mark_bitmap();
2979   HeapWord* const dp_addr = dense_prefix(space_id);
2980   HeapWord* beg_addr = sp->bottom();
2981   HeapWord* end_addr = sp->top();
2982 
2983   assert(beg_addr <= dp_addr && dp_addr <= end_addr, "bad dense prefix");
2984 
2985   const size_t beg_region = sd.addr_to_region_idx(beg_addr);
2986   const size_t dp_region = sd.addr_to_region_idx(dp_addr);
2987   if (beg_region < dp_region) {
2988     update_and_deadwood_in_dense_prefix(cm, space_id, beg_region, dp_region);
2989   }
2990 
2991   // The destination of the first live object that starts in the region is one
2992   // past the end of the partial object entering the region (if any).
2993   HeapWord* const dest_addr = sd.partial_obj_end(dp_region);
2994   HeapWord* const new_top = _space_info[space_id].new_top();
2995   assert(new_top >= dest_addr, "bad new_top value");
2996   const size_t words = pointer_delta(new_top, dest_addr);
2997 
2998   if (words > 0) {
2999     ObjectStartArray* start_array = _space_info[space_id].start_array();
3000     MoveAndUpdateClosure closure(bitmap, cm, start_array, dest_addr, words);
3001 
3002     ParMarkBitMap::IterationStatus status;
3003     status = bitmap->iterate(&closure, dest_addr, end_addr);
3004     assert(status == ParMarkBitMap::full, "iteration not complete");
3005     assert(bitmap->find_obj_beg(closure.source(), end_addr) == end_addr,
3006            "live objects skipped because closure is full");
3007   }
3008 }
3009 
3010 jlong PSParallelCompact::millis_since_last_gc() {
3011   // We need a monotonically non-decreasing time in ms but
3012   // os::javaTimeMillis() does not guarantee monotonicity.
3013   jlong now = os::javaTimeNanos() / NANOSECS_PER_MILLISEC;
3014   jlong ret_val = now - _time_of_last_gc;
3015   // XXX See note in genCollectedHeap::millis_since_last_gc().
3016   if (ret_val < 0) {
3017     NOT_PRODUCT(log_warning(gc)("time warp: " JLONG_FORMAT, ret_val);)
3018     return 0;
3019   }
3020   return ret_val;
3021 }
3022 
3023 void PSParallelCompact::reset_millis_since_last_gc() {
3024   // We need a monotonically non-decreasing time in ms but
3025   // os::javaTimeMillis() does not guarantee monotonicity.
3026   _time_of_last_gc = os::javaTimeNanos() / NANOSECS_PER_MILLISEC;
3027 }
3028 
3029 ParMarkBitMap::IterationStatus MoveAndUpdateClosure::copy_until_full()
3030 {
3031   if (source() != destination()) {
3032     DEBUG_ONLY(PSParallelCompact::check_new_location(source(), destination());)
3033     Copy::aligned_conjoint_words(source(), destination(), words_remaining());
3034   }
3035   update_state(words_remaining());
3036   assert(is_full(), "sanity");
3037   return ParMarkBitMap::full;
3038 }
3039 
3040 void MoveAndUpdateClosure::copy_partial_obj()
3041 {
3042   size_t words = words_remaining();
3043 
3044   HeapWord* const range_end = MIN2(source() + words, bitmap()->region_end());
3045   HeapWord* const end_addr = bitmap()->find_obj_end(source(), range_end);
3046   if (end_addr < range_end) {
3047     words = bitmap()->obj_size(source(), end_addr);
3048   }
3049 
3050   // This test is necessary; if omitted, the pointer updates to a partial object
3051   // that crosses the dense prefix boundary could be overwritten.
3052   if (source() != destination()) {
3053     DEBUG_ONLY(PSParallelCompact::check_new_location(source(), destination());)
3054     Copy::aligned_conjoint_words(source(), destination(), words);
3055   }
3056   update_state(words);
3057 }
3058 
3059 void InstanceKlass::oop_pc_update_pointers(oop obj, ParCompactionManager* cm) {
3060   PSParallelCompact::AdjustPointerClosure closure(cm);
3061   oop_oop_iterate_oop_maps<true>(obj, &closure);
3062 }
3063 
3064 void InstanceMirrorKlass::oop_pc_update_pointers(oop obj, ParCompactionManager* cm) {
3065   InstanceKlass::oop_pc_update_pointers(obj, cm);
3066 
3067   PSParallelCompact::AdjustPointerClosure closure(cm);
3068   oop_oop_iterate_statics<true>(obj, &closure);
3069 }
3070 
3071 void InstanceClassLoaderKlass::oop_pc_update_pointers(oop obj, ParCompactionManager* cm) {
3072   InstanceKlass::oop_pc_update_pointers(obj, cm);
3073 }
3074 
3075 #ifdef ASSERT
3076 template <class T> static void trace_reference_gc(const char *s, oop obj,
3077                                                   T* referent_addr,
3078                                                   T* next_addr,
3079                                                   T* discovered_addr) {
3080   log_develop_trace(gc, ref)("%s obj " PTR_FORMAT, s, p2i(obj));
3081   log_develop_trace(gc, ref)("     referent_addr/* " PTR_FORMAT " / " PTR_FORMAT,
3082                              p2i(referent_addr), referent_addr ? p2i(oopDesc::load_decode_heap_oop(referent_addr)) : NULL);
3083   log_develop_trace(gc, ref)("     next_addr/* " PTR_FORMAT " / " PTR_FORMAT,
3084                              p2i(next_addr), next_addr ? p2i(oopDesc::load_decode_heap_oop(next_addr)) : NULL);
3085   log_develop_trace(gc, ref)("     discovered_addr/* " PTR_FORMAT " / " PTR_FORMAT,
3086                              p2i(discovered_addr), discovered_addr ? p2i(oopDesc::load_decode_heap_oop(discovered_addr)) : NULL);
3087 }
3088 #endif
3089 
3090 template <class T>
3091 static void oop_pc_update_pointers_specialized(oop obj, ParCompactionManager* cm) {
3092   T* referent_addr = (T*)java_lang_ref_Reference::referent_addr(obj);
3093   PSParallelCompact::adjust_pointer(referent_addr, cm);
3094   T* next_addr = (T*)java_lang_ref_Reference::next_addr(obj);
3095   PSParallelCompact::adjust_pointer(next_addr, cm);
3096   T* discovered_addr = (T*)java_lang_ref_Reference::discovered_addr(obj);
3097   PSParallelCompact::adjust_pointer(discovered_addr, cm);
3098   debug_only(trace_reference_gc("InstanceRefKlass::oop_update_ptrs", obj,
3099                                 referent_addr, next_addr, discovered_addr);)
3100 }
3101 
3102 void InstanceRefKlass::oop_pc_update_pointers(oop obj, ParCompactionManager* cm) {
3103   InstanceKlass::oop_pc_update_pointers(obj, cm);
3104 
3105   if (UseCompressedOops) {
3106     oop_pc_update_pointers_specialized<narrowOop>(obj, cm);
3107   } else {
3108     oop_pc_update_pointers_specialized<oop>(obj, cm);
3109   }
3110 }
3111 
3112 void ObjArrayKlass::oop_pc_update_pointers(oop obj, ParCompactionManager* cm) {
3113   assert(obj->is_objArray(), "obj must be obj array");
3114   PSParallelCompact::AdjustPointerClosure closure(cm);
3115   oop_oop_iterate_elements<true>(objArrayOop(obj), &closure);
3116 }
3117 
3118 void TypeArrayKlass::oop_pc_update_pointers(oop obj, ParCompactionManager* cm) {
3119   assert(obj->is_typeArray(),"must be a type array");
3120 }
3121 
3122 ParMarkBitMapClosure::IterationStatus
3123 MoveAndUpdateClosure::do_addr(HeapWord* addr, size_t words) {
3124   assert(destination() != NULL, "sanity");
3125   assert(bitmap()->obj_size(addr) == words, "bad size");
3126 
3127   _source = addr;
3128   assert(PSParallelCompact::summary_data().calc_new_pointer(source(), compaction_manager()) ==
3129          destination(), "wrong destination");
3130 
3131   if (words > words_remaining()) {
3132     return ParMarkBitMap::would_overflow;
3133   }
3134 
3135   // The start_array must be updated even if the object is not moving.
3136   if (_start_array != NULL) {
3137     _start_array->allocate_block(destination());
3138   }
3139 
3140   if (destination() != source()) {
3141     DEBUG_ONLY(PSParallelCompact::check_new_location(source(), destination());)
3142     Copy::aligned_conjoint_words(source(), destination(), words);
3143   }
3144 
3145   oop moved_oop = (oop) destination();
3146   compaction_manager()->update_contents(moved_oop);
3147   assert(moved_oop->is_oop_or_null(), "Expected an oop or NULL at " PTR_FORMAT, p2i(moved_oop));
3148 
3149   update_state(words);
3150   assert(destination() == (HeapWord*)moved_oop + moved_oop->size(), "sanity");
3151   return is_full() ? ParMarkBitMap::full : ParMarkBitMap::incomplete;
3152 }
3153 
3154 UpdateOnlyClosure::UpdateOnlyClosure(ParMarkBitMap* mbm,
3155                                      ParCompactionManager* cm,
3156                                      PSParallelCompact::SpaceId space_id) :
3157   ParMarkBitMapClosure(mbm, cm),
3158   _space_id(space_id),
3159   _start_array(PSParallelCompact::start_array(space_id))
3160 {
3161 }
3162 
3163 // Updates the references in the object to their new values.
3164 ParMarkBitMapClosure::IterationStatus
3165 UpdateOnlyClosure::do_addr(HeapWord* addr, size_t words) {
3166   do_addr(addr);
3167   return ParMarkBitMap::incomplete;
3168 }
3169 
3170 ParMarkBitMapClosure::IterationStatus
3171 FillClosure::do_addr(HeapWord* addr, size_t size) {
3172   CollectedHeap::fill_with_objects(addr, size);
3173   HeapWord* const end = addr + size;
3174   do {
3175     _start_array->allocate_block(addr);
3176     addr += oop(addr)->size();
3177   } while (addr < end);
3178   return ParMarkBitMap::incomplete;
3179 }