1 /* 2 * Copyright (c) 1997, 2018, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "classfile/systemDictionary.hpp" 27 #include "classfile/vmSymbols.hpp" 28 #include "gc/shared/blockOffsetTable.inline.hpp" 29 #include "gc/shared/collectedHeap.inline.hpp" 30 #include "gc/shared/genCollectedHeap.hpp" 31 #include "gc/shared/genOopClosures.inline.hpp" 32 #include "gc/shared/space.hpp" 33 #include "gc/shared/space.inline.hpp" 34 #include "gc/shared/spaceDecorator.inline.hpp" 35 #include "memory/iterator.inline.hpp" 36 #include "memory/universe.hpp" 37 #include "oops/oop.inline.hpp" 38 #include "runtime/atomic.hpp" 39 #include "runtime/java.hpp" 40 #include "runtime/orderAccess.hpp" 41 #include "runtime/prefetch.inline.hpp" 42 #include "runtime/safepoint.hpp" 43 #include "utilities/align.hpp" 44 #include "utilities/copy.hpp" 45 #include "utilities/globalDefinitions.hpp" 46 #include "utilities/macros.hpp" 47 #if INCLUDE_SERIALGC 48 #include "gc/serial/defNewGeneration.hpp" 49 #endif 50 51 HeapWord* DirtyCardToOopClosure::get_actual_top(HeapWord* top, 52 HeapWord* top_obj) { 53 if (top_obj != NULL) { 54 if (_sp->block_is_obj(top_obj)) { 55 if (_precision == CardTable::ObjHeadPreciseArray) { 56 if (oop(top_obj)->is_objArray() || oop(top_obj)->is_typeArray()) { 57 // An arrayOop is starting on the dirty card - since we do exact 58 // store checks for objArrays we are done. 59 } else { 60 // Otherwise, it is possible that the object starting on the dirty 61 // card spans the entire card, and that the store happened on a 62 // later card. Figure out where the object ends. 63 // Use the block_size() method of the space over which 64 // the iteration is being done. That space (e.g. CMS) may have 65 // specific requirements on object sizes which will 66 // be reflected in the block_size() method. 67 top = top_obj + oop(top_obj)->size(); 68 } 69 } 70 } else { 71 top = top_obj; 72 } 73 } else { 74 assert(top == _sp->end(), "only case where top_obj == NULL"); 75 } 76 return top; 77 } 78 79 void DirtyCardToOopClosure::walk_mem_region(MemRegion mr, 80 HeapWord* bottom, 81 HeapWord* top) { 82 // 1. Blocks may or may not be objects. 83 // 2. Even when a block_is_obj(), it may not entirely 84 // occupy the block if the block quantum is larger than 85 // the object size. 86 // We can and should try to optimize by calling the non-MemRegion 87 // version of oop_iterate() for all but the extremal objects 88 // (for which we need to call the MemRegion version of 89 // oop_iterate()) To be done post-beta XXX 90 for (; bottom < top; bottom += _sp->block_size(bottom)) { 91 // As in the case of contiguous space above, we'd like to 92 // just use the value returned by oop_iterate to increment the 93 // current pointer; unfortunately, that won't work in CMS because 94 // we'd need an interface change (it seems) to have the space 95 // "adjust the object size" (for instance pad it up to its 96 // block alignment or minimum block size restrictions. XXX 97 if (_sp->block_is_obj(bottom) && 98 !_sp->obj_allocated_since_save_marks(oop(bottom))) { 99 oop(bottom)->oop_iterate(_cl, mr); 100 } 101 } 102 } 103 104 // We get called with "mr" representing the dirty region 105 // that we want to process. Because of imprecise marking, 106 // we may need to extend the incoming "mr" to the right, 107 // and scan more. However, because we may already have 108 // scanned some of that extended region, we may need to 109 // trim its right-end back some so we do not scan what 110 // we (or another worker thread) may already have scanned 111 // or planning to scan. 112 void DirtyCardToOopClosure::do_MemRegion(MemRegion mr) { 113 HeapWord* bottom = mr.start(); 114 HeapWord* last = mr.last(); 115 HeapWord* top = mr.end(); 116 HeapWord* bottom_obj; 117 HeapWord* top_obj; 118 119 assert(_precision == CardTable::ObjHeadPreciseArray || 120 _precision == CardTable::Precise, 121 "Only ones we deal with for now."); 122 123 assert(_precision != CardTable::ObjHeadPreciseArray || 124 _last_bottom == NULL || top <= _last_bottom, 125 "Not decreasing"); 126 NOT_PRODUCT(_last_bottom = mr.start()); 127 128 bottom_obj = _sp->block_start(bottom); 129 top_obj = _sp->block_start(last); 130 131 assert(bottom_obj <= bottom, "just checking"); 132 assert(top_obj <= top, "just checking"); 133 134 // Given what we think is the top of the memory region and 135 // the start of the object at the top, get the actual 136 // value of the top. 137 top = get_actual_top(top, top_obj); 138 139 // If the previous call did some part of this region, don't redo. 140 if (_precision == CardTable::ObjHeadPreciseArray && 141 _min_done != NULL && 142 _min_done < top) { 143 top = _min_done; 144 } 145 146 // Top may have been reset, and in fact may be below bottom, 147 // e.g. the dirty card region is entirely in a now free object 148 // -- something that could happen with a concurrent sweeper. 149 bottom = MIN2(bottom, top); 150 MemRegion extended_mr = MemRegion(bottom, top); 151 assert(bottom <= top && 152 (_precision != CardTable::ObjHeadPreciseArray || 153 _min_done == NULL || 154 top <= _min_done), 155 "overlap!"); 156 157 // Walk the region if it is not empty; otherwise there is nothing to do. 158 if (!extended_mr.is_empty()) { 159 walk_mem_region(extended_mr, bottom_obj, top); 160 } 161 162 _min_done = bottom; 163 } 164 165 DirtyCardToOopClosure* Space::new_dcto_cl(OopIterateClosure* cl, 166 CardTable::PrecisionStyle precision, 167 HeapWord* boundary, 168 bool parallel) { 169 return new DirtyCardToOopClosure(this, cl, precision, boundary); 170 } 171 172 HeapWord* ContiguousSpaceDCTOC::get_actual_top(HeapWord* top, 173 HeapWord* top_obj) { 174 if (top_obj != NULL && top_obj < (_sp->toContiguousSpace())->top()) { 175 if (_precision == CardTable::ObjHeadPreciseArray) { 176 if (oop(top_obj)->is_objArray() || oop(top_obj)->is_typeArray()) { 177 // An arrayOop is starting on the dirty card - since we do exact 178 // store checks for objArrays we are done. 179 } else { 180 // Otherwise, it is possible that the object starting on the dirty 181 // card spans the entire card, and that the store happened on a 182 // later card. Figure out where the object ends. 183 assert(_sp->block_size(top_obj) == (size_t) oop(top_obj)->size(), 184 "Block size and object size mismatch"); 185 top = top_obj + oop(top_obj)->size(); 186 } 187 } 188 } else { 189 top = (_sp->toContiguousSpace())->top(); 190 } 191 return top; 192 } 193 194 void FilteringDCTOC::walk_mem_region(MemRegion mr, 195 HeapWord* bottom, 196 HeapWord* top) { 197 // Note that this assumption won't hold if we have a concurrent 198 // collector in this space, which may have freed up objects after 199 // they were dirtied and before the stop-the-world GC that is 200 // examining cards here. 201 assert(bottom < top, "ought to be at least one obj on a dirty card."); 202 203 if (_boundary != NULL) { 204 // We have a boundary outside of which we don't want to look 205 // at objects, so create a filtering closure around the 206 // oop closure before walking the region. 207 FilteringClosure filter(_boundary, _cl); 208 walk_mem_region_with_cl(mr, bottom, top, &filter); 209 } else { 210 // No boundary, simply walk the heap with the oop closure. 211 walk_mem_region_with_cl(mr, bottom, top, _cl); 212 } 213 214 } 215 216 // We must replicate this so that the static type of "FilteringClosure" 217 // (see above) is apparent at the oop_iterate calls. 218 #define ContiguousSpaceDCTOC__walk_mem_region_with_cl_DEFN(ClosureType) \ 219 void ContiguousSpaceDCTOC::walk_mem_region_with_cl(MemRegion mr, \ 220 HeapWord* bottom, \ 221 HeapWord* top, \ 222 ClosureType* cl) { \ 223 bottom += oop(bottom)->oop_iterate_size(cl, mr); \ 224 if (bottom < top) { \ 225 HeapWord* next_obj = bottom + oop(bottom)->size(); \ 226 while (next_obj < top) { \ 227 /* Bottom lies entirely below top, so we can call the */ \ 228 /* non-memRegion version of oop_iterate below. */ \ 229 oop(bottom)->oop_iterate(cl); \ 230 bottom = next_obj; \ 231 next_obj = bottom + oop(bottom)->size(); \ 232 } \ 233 /* Last object. */ \ 234 oop(bottom)->oop_iterate(cl, mr); \ 235 } \ 236 } 237 238 // (There are only two of these, rather than N, because the split is due 239 // only to the introduction of the FilteringClosure, a local part of the 240 // impl of this abstraction.) 241 ContiguousSpaceDCTOC__walk_mem_region_with_cl_DEFN(OopIterateClosure) 242 ContiguousSpaceDCTOC__walk_mem_region_with_cl_DEFN(FilteringClosure) 243 244 DirtyCardToOopClosure* 245 ContiguousSpace::new_dcto_cl(OopIterateClosure* cl, 246 CardTable::PrecisionStyle precision, 247 HeapWord* boundary, 248 bool parallel) { 249 return new ContiguousSpaceDCTOC(this, cl, precision, boundary); 250 } 251 252 void Space::initialize(MemRegion mr, 253 bool clear_space, 254 bool mangle_space) { 255 HeapWord* bottom = mr.start(); 256 HeapWord* end = mr.end(); 257 assert(Universe::on_page_boundary(bottom) && Universe::on_page_boundary(end), 258 "invalid space boundaries"); 259 set_bottom(bottom); 260 set_end(end); 261 if (clear_space) clear(mangle_space); 262 } 263 264 void Space::clear(bool mangle_space) { 265 if (ZapUnusedHeapArea && mangle_space) { 266 mangle_unused_area(); 267 } 268 } 269 270 ContiguousSpace::ContiguousSpace(): CompactibleSpace(), _top(NULL), 271 _concurrent_iteration_safe_limit(NULL) { 272 _mangler = new GenSpaceMangler(this); 273 } 274 275 ContiguousSpace::~ContiguousSpace() { 276 delete _mangler; 277 } 278 279 void ContiguousSpace::initialize(MemRegion mr, 280 bool clear_space, 281 bool mangle_space) 282 { 283 CompactibleSpace::initialize(mr, clear_space, mangle_space); 284 set_concurrent_iteration_safe_limit(top()); 285 } 286 287 void ContiguousSpace::clear(bool mangle_space) { 288 set_top(bottom()); 289 set_saved_mark(); 290 CompactibleSpace::clear(mangle_space); 291 } 292 293 bool ContiguousSpace::is_free_block(const HeapWord* p) const { 294 return p >= _top; 295 } 296 297 void OffsetTableContigSpace::clear(bool mangle_space) { 298 ContiguousSpace::clear(mangle_space); 299 _offsets.initialize_threshold(); 300 } 301 302 void OffsetTableContigSpace::set_bottom(HeapWord* new_bottom) { 303 Space::set_bottom(new_bottom); 304 _offsets.set_bottom(new_bottom); 305 } 306 307 void OffsetTableContigSpace::set_end(HeapWord* new_end) { 308 // Space should not advertise an increase in size 309 // until after the underlying offset table has been enlarged. 310 _offsets.resize(pointer_delta(new_end, bottom())); 311 Space::set_end(new_end); 312 } 313 314 #ifndef PRODUCT 315 316 void ContiguousSpace::set_top_for_allocations(HeapWord* v) { 317 mangler()->set_top_for_allocations(v); 318 } 319 void ContiguousSpace::set_top_for_allocations() { 320 mangler()->set_top_for_allocations(top()); 321 } 322 void ContiguousSpace::check_mangled_unused_area(HeapWord* limit) { 323 mangler()->check_mangled_unused_area(limit); 324 } 325 326 void ContiguousSpace::check_mangled_unused_area_complete() { 327 mangler()->check_mangled_unused_area_complete(); 328 } 329 330 // Mangled only the unused space that has not previously 331 // been mangled and that has not been allocated since being 332 // mangled. 333 void ContiguousSpace::mangle_unused_area() { 334 mangler()->mangle_unused_area(); 335 } 336 void ContiguousSpace::mangle_unused_area_complete() { 337 mangler()->mangle_unused_area_complete(); 338 } 339 #endif // NOT_PRODUCT 340 341 void CompactibleSpace::initialize(MemRegion mr, 342 bool clear_space, 343 bool mangle_space) { 344 Space::initialize(mr, clear_space, mangle_space); 345 set_compaction_top(bottom()); 346 _next_compaction_space = NULL; 347 } 348 349 void CompactibleSpace::clear(bool mangle_space) { 350 Space::clear(mangle_space); 351 _compaction_top = bottom(); 352 } 353 354 HeapWord* CompactibleSpace::forward(oop q, size_t size, 355 CompactPoint* cp, HeapWord* compact_top) { 356 // q is alive 357 // First check if we should switch compaction space 358 assert(this == cp->space, "'this' should be current compaction space."); 359 size_t compaction_max_size = pointer_delta(end(), compact_top); 360 while (size > compaction_max_size) { 361 // switch to next compaction space 362 cp->space->set_compaction_top(compact_top); 363 cp->space = cp->space->next_compaction_space(); 364 if (cp->space == NULL) { 365 cp->gen = GenCollectedHeap::heap()->young_gen(); 366 assert(cp->gen != NULL, "compaction must succeed"); 367 cp->space = cp->gen->first_compaction_space(); 368 assert(cp->space != NULL, "generation must have a first compaction space"); 369 } 370 compact_top = cp->space->bottom(); 371 cp->space->set_compaction_top(compact_top); 372 cp->threshold = cp->space->initialize_threshold(); 373 compaction_max_size = pointer_delta(cp->space->end(), compact_top); 374 } 375 376 // store the forwarding pointer into the mark word 377 if ((HeapWord*)q != compact_top) { 378 q->forward_to(oop(compact_top)); 379 assert(q->is_gc_marked(), "encoding the pointer should preserve the mark"); 380 } else { 381 // if the object isn't moving we can just set the mark to the default 382 // mark and handle it specially later on. 383 q->init_mark_raw(); 384 assert(q->forwardee() == NULL, "should be forwarded to NULL"); 385 } 386 387 compact_top += size; 388 389 // we need to update the offset table so that the beginnings of objects can be 390 // found during scavenge. Note that we are updating the offset table based on 391 // where the object will be once the compaction phase finishes. 392 if (compact_top > cp->threshold) 393 cp->threshold = 394 cp->space->cross_threshold(compact_top - size, compact_top); 395 return compact_top; 396 } 397 398 #if INCLUDE_SERIALGC 399 400 void ContiguousSpace::prepare_for_compaction(CompactPoint* cp) { 401 scan_and_forward(this, cp); 402 } 403 404 void CompactibleSpace::adjust_pointers() { 405 // Check first is there is any work to do. 406 if (used() == 0) { 407 return; // Nothing to do. 408 } 409 410 scan_and_adjust_pointers(this); 411 } 412 413 void CompactibleSpace::compact() { 414 scan_and_compact(this); 415 } 416 417 #endif // INCLUDE_SERIALGC 418 419 void Space::print_short() const { print_short_on(tty); } 420 421 void Space::print_short_on(outputStream* st) const { 422 st->print(" space " SIZE_FORMAT "K, %3d%% used", capacity() / K, 423 (int) ((double) used() * 100 / capacity())); 424 } 425 426 void Space::print() const { print_on(tty); } 427 428 void Space::print_on(outputStream* st) const { 429 print_short_on(st); 430 st->print_cr(" [" INTPTR_FORMAT ", " INTPTR_FORMAT ")", 431 p2i(bottom()), p2i(end())); 432 } 433 434 void ContiguousSpace::print_on(outputStream* st) const { 435 print_short_on(st); 436 st->print_cr(" [" INTPTR_FORMAT ", " INTPTR_FORMAT ", " INTPTR_FORMAT ")", 437 p2i(bottom()), p2i(top()), p2i(end())); 438 } 439 440 void OffsetTableContigSpace::print_on(outputStream* st) const { 441 print_short_on(st); 442 st->print_cr(" [" INTPTR_FORMAT ", " INTPTR_FORMAT ", " 443 INTPTR_FORMAT ", " INTPTR_FORMAT ")", 444 p2i(bottom()), p2i(top()), p2i(_offsets.threshold()), p2i(end())); 445 } 446 447 void ContiguousSpace::verify() const { 448 HeapWord* p = bottom(); 449 HeapWord* t = top(); 450 HeapWord* prev_p = NULL; 451 while (p < t) { 452 oopDesc::verify(oop(p)); 453 prev_p = p; 454 p += oop(p)->size(); 455 } 456 guarantee(p == top(), "end of last object must match end of space"); 457 if (top() != end()) { 458 guarantee(top() == block_start_const(end()-1) && 459 top() == block_start_const(top()), 460 "top should be start of unallocated block, if it exists"); 461 } 462 } 463 464 void Space::oop_iterate(OopIterateClosure* blk) { 465 ObjectToOopClosure blk2(blk); 466 object_iterate(&blk2); 467 } 468 469 bool Space::obj_is_alive(const HeapWord* p) const { 470 assert (block_is_obj(p), "The address should point to an object"); 471 return true; 472 } 473 474 void ContiguousSpace::oop_iterate(OopIterateClosure* blk) { 475 if (is_empty()) return; 476 HeapWord* obj_addr = bottom(); 477 HeapWord* t = top(); 478 // Could call objects iterate, but this is easier. 479 while (obj_addr < t) { 480 obj_addr += oop(obj_addr)->oop_iterate_size(blk); 481 } 482 } 483 484 void ContiguousSpace::object_iterate(ObjectClosure* blk) { 485 if (is_empty()) return; 486 object_iterate_from(bottom(), blk); 487 } 488 489 void ContiguousSpace::object_iterate_from(HeapWord* mark, ObjectClosure* blk) { 490 while (mark < top()) { 491 blk->do_object(oop(mark)); 492 mark += oop(mark)->size(); 493 } 494 } 495 496 // Very general, slow implementation. 497 HeapWord* ContiguousSpace::block_start_const(const void* p) const { 498 assert(MemRegion(bottom(), end()).contains(p), 499 "p (" PTR_FORMAT ") not in space [" PTR_FORMAT ", " PTR_FORMAT ")", 500 p2i(p), p2i(bottom()), p2i(end())); 501 if (p >= top()) { 502 return top(); 503 } else { 504 HeapWord* last = bottom(); 505 HeapWord* cur = last; 506 while (cur <= p) { 507 last = cur; 508 cur += oop(cur)->size(); 509 } 510 assert(oopDesc::is_oop(oop(last)), PTR_FORMAT " should be an object start", p2i(last)); 511 return last; 512 } 513 } 514 515 size_t ContiguousSpace::block_size(const HeapWord* p) const { 516 assert(MemRegion(bottom(), end()).contains(p), 517 "p (" PTR_FORMAT ") not in space [" PTR_FORMAT ", " PTR_FORMAT ")", 518 p2i(p), p2i(bottom()), p2i(end())); 519 HeapWord* current_top = top(); 520 assert(p <= current_top, 521 "p > current top - p: " PTR_FORMAT ", current top: " PTR_FORMAT, 522 p2i(p), p2i(current_top)); 523 assert(p == current_top || oopDesc::is_oop(oop(p)), 524 "p (" PTR_FORMAT ") is not a block start - " 525 "current_top: " PTR_FORMAT ", is_oop: %s", 526 p2i(p), p2i(current_top), BOOL_TO_STR(oopDesc::is_oop(oop(p)))); 527 if (p < current_top) { 528 return oop(p)->size(); 529 } else { 530 assert(p == current_top, "just checking"); 531 return pointer_delta(end(), (HeapWord*) p); 532 } 533 } 534 535 // This version requires locking. 536 inline HeapWord* ContiguousSpace::allocate_impl(size_t size) { 537 assert(Heap_lock->owned_by_self() || 538 (SafepointSynchronize::is_at_safepoint() && Thread::current()->is_VM_thread()), 539 "not locked"); 540 HeapWord* obj = top(); 541 if (pointer_delta(end(), obj) >= size) { 542 HeapWord* new_top = obj + size; 543 set_top(new_top); 544 assert(is_aligned(obj) && is_aligned(new_top), "checking alignment"); 545 return obj; 546 } else { 547 return NULL; 548 } 549 } 550 551 // This version is lock-free. 552 inline HeapWord* ContiguousSpace::par_allocate_impl(size_t size) { 553 do { 554 HeapWord* obj = top(); 555 if (pointer_delta(end(), obj) >= size) { 556 HeapWord* new_top = obj + size; 557 HeapWord* result = Atomic::cmpxchg(new_top, top_addr(), obj); 558 // result can be one of two: 559 // the old top value: the exchange succeeded 560 // otherwise: the new value of the top is returned. 561 if (result == obj) { 562 assert(is_aligned(obj) && is_aligned(new_top), "checking alignment"); 563 return obj; 564 } 565 } else { 566 return NULL; 567 } 568 } while (true); 569 } 570 571 HeapWord* ContiguousSpace::allocate_aligned(size_t size) { 572 assert(Heap_lock->owned_by_self() || (SafepointSynchronize::is_at_safepoint() && Thread::current()->is_VM_thread()), "not locked"); 573 HeapWord* end_value = end(); 574 575 HeapWord* obj = CollectedHeap::align_allocation_or_fail(top(), end_value, SurvivorAlignmentInBytes); 576 if (obj == NULL) { 577 return NULL; 578 } 579 580 if (pointer_delta(end_value, obj) >= size) { 581 HeapWord* new_top = obj + size; 582 set_top(new_top); 583 assert(::is_aligned(obj, SurvivorAlignmentInBytes) && is_aligned(new_top), 584 "checking alignment"); 585 return obj; 586 } else { 587 set_top(obj); 588 return NULL; 589 } 590 } 591 592 // Requires locking. 593 HeapWord* ContiguousSpace::allocate(size_t size) { 594 return allocate_impl(size); 595 } 596 597 // Lock-free. 598 HeapWord* ContiguousSpace::par_allocate(size_t size) { 599 return par_allocate_impl(size); 600 } 601 602 void ContiguousSpace::allocate_temporary_filler(int factor) { 603 // allocate temporary type array decreasing free size with factor 'factor' 604 assert(factor >= 0, "just checking"); 605 size_t size = pointer_delta(end(), top()); 606 607 // if space is full, return 608 if (size == 0) return; 609 610 if (factor > 0) { 611 size -= size/factor; 612 } 613 size = align_object_size(size); 614 615 const size_t array_header_size = typeArrayOopDesc::header_size(T_INT); 616 if (size >= align_object_size(array_header_size)) { 617 size_t length = (size - array_header_size) * (HeapWordSize / sizeof(jint)); 618 // allocate uninitialized int array 619 typeArrayOop t = (typeArrayOop) allocate(size); 620 assert(t != NULL, "allocation should succeed"); 621 t->set_mark_raw(markWord::prototype()); 622 t->set_klass(Universe::intArrayKlassObj()); 623 t->set_length((int)length); 624 } else { 625 assert(size == CollectedHeap::min_fill_size(), 626 "size for smallest fake object doesn't match"); 627 instanceOop obj = (instanceOop) allocate(size); 628 obj->set_mark_raw(markWord::prototype()); 629 obj->set_klass_gap(0); 630 obj->set_klass(SystemDictionary::Object_klass()); 631 } 632 } 633 634 HeapWord* OffsetTableContigSpace::initialize_threshold() { 635 return _offsets.initialize_threshold(); 636 } 637 638 HeapWord* OffsetTableContigSpace::cross_threshold(HeapWord* start, HeapWord* end) { 639 _offsets.alloc_block(start, end); 640 return _offsets.threshold(); 641 } 642 643 OffsetTableContigSpace::OffsetTableContigSpace(BlockOffsetSharedArray* sharedOffsetArray, 644 MemRegion mr) : 645 _offsets(sharedOffsetArray, mr), 646 _par_alloc_lock(Mutex::leaf, "OffsetTableContigSpace par alloc lock", true) 647 { 648 _offsets.set_contig_space(this); 649 initialize(mr, SpaceDecorator::Clear, SpaceDecorator::Mangle); 650 } 651 652 #define OBJ_SAMPLE_INTERVAL 0 653 #define BLOCK_SAMPLE_INTERVAL 100 654 655 void OffsetTableContigSpace::verify() const { 656 HeapWord* p = bottom(); 657 HeapWord* prev_p = NULL; 658 int objs = 0; 659 int blocks = 0; 660 661 if (VerifyObjectStartArray) { 662 _offsets.verify(); 663 } 664 665 while (p < top()) { 666 size_t size = oop(p)->size(); 667 // For a sampling of objects in the space, find it using the 668 // block offset table. 669 if (blocks == BLOCK_SAMPLE_INTERVAL) { 670 guarantee(p == block_start_const(p + (size/2)), 671 "check offset computation"); 672 blocks = 0; 673 } else { 674 blocks++; 675 } 676 677 if (objs == OBJ_SAMPLE_INTERVAL) { 678 oopDesc::verify(oop(p)); 679 objs = 0; 680 } else { 681 objs++; 682 } 683 prev_p = p; 684 p += size; 685 } 686 guarantee(p == top(), "end of last object must match end of space"); 687 } 688 689 690 size_t TenuredSpace::allowed_dead_ratio() const { 691 return MarkSweepDeadRatio; 692 }