1 /* 2 * Copyright (c) 1997, 2018, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "classfile/systemDictionary.hpp" 27 #include "classfile/vmSymbols.hpp" 28 #include "gc/shared/blockOffsetTable.inline.hpp" 29 #include "gc/shared/collectedHeap.inline.hpp" 30 #include "gc/shared/genCollectedHeap.hpp" 31 #include "gc/shared/genOopClosures.inline.hpp" 32 #include "gc/shared/space.hpp" 33 #include "gc/shared/space.inline.hpp" 34 #include "gc/shared/spaceDecorator.hpp" 35 #include "memory/universe.hpp" 36 #include "oops/oop.inline.hpp" 37 #include "runtime/atomic.hpp" 38 #include "runtime/java.hpp" 39 #include "runtime/orderAccess.hpp" 40 #include "runtime/prefetch.inline.hpp" 41 #include "runtime/safepoint.hpp" 42 #include "utilities/align.hpp" 43 #include "utilities/copy.hpp" 44 #include "utilities/globalDefinitions.hpp" 45 #include "utilities/macros.hpp" 46 #if INCLUDE_SERIALGC 47 #include "gc/serial/defNewGeneration.hpp" 48 #endif 49 50 HeapWord* DirtyCardToOopClosure::get_actual_top(HeapWord* top, 51 HeapWord* top_obj) { 52 if (top_obj != NULL) { 53 if (_sp->block_is_obj(top_obj)) { 54 if (_precision == CardTable::ObjHeadPreciseArray) { 55 if (oop(top_obj)->is_objArray() || oop(top_obj)->is_typeArray()) { 56 // An arrayOop is starting on the dirty card - since we do exact 57 // store checks for objArrays we are done. 58 } else { 59 // Otherwise, it is possible that the object starting on the dirty 60 // card spans the entire card, and that the store happened on a 61 // later card. Figure out where the object ends. 62 // Use the block_size() method of the space over which 63 // the iteration is being done. That space (e.g. CMS) may have 64 // specific requirements on object sizes which will 65 // be reflected in the block_size() method. 66 top = top_obj + oop(top_obj)->size(); 67 } 68 } 69 } else { 70 top = top_obj; 71 } 72 } else { 73 assert(top == _sp->end(), "only case where top_obj == NULL"); 74 } 75 return top; 76 } 77 78 void DirtyCardToOopClosure::walk_mem_region(MemRegion mr, 79 HeapWord* bottom, 80 HeapWord* top) { 81 // 1. Blocks may or may not be objects. 82 // 2. Even when a block_is_obj(), it may not entirely 83 // occupy the block if the block quantum is larger than 84 // the object size. 85 // We can and should try to optimize by calling the non-MemRegion 86 // version of oop_iterate() for all but the extremal objects 87 // (for which we need to call the MemRegion version of 88 // oop_iterate()) To be done post-beta XXX 89 for (; bottom < top; bottom += _sp->block_size(bottom)) { 90 // As in the case of contiguous space above, we'd like to 91 // just use the value returned by oop_iterate to increment the 92 // current pointer; unfortunately, that won't work in CMS because 93 // we'd need an interface change (it seems) to have the space 94 // "adjust the object size" (for instance pad it up to its 95 // block alignment or minimum block size restrictions. XXX 96 if (_sp->block_is_obj(bottom) && 97 !_sp->obj_allocated_since_save_marks(oop(bottom))) { 98 oop(bottom)->oop_iterate(_cl, mr); 99 } 100 } 101 } 102 103 // We get called with "mr" representing the dirty region 104 // that we want to process. Because of imprecise marking, 105 // we may need to extend the incoming "mr" to the right, 106 // and scan more. However, because we may already have 107 // scanned some of that extended region, we may need to 108 // trim its right-end back some so we do not scan what 109 // we (or another worker thread) may already have scanned 110 // or planning to scan. 111 void DirtyCardToOopClosure::do_MemRegion(MemRegion mr) { 112 113 // Some collectors need to do special things whenever their dirty 114 // cards are processed. For instance, CMS must remember mutator updates 115 // (i.e. dirty cards) so as to re-scan mutated objects. 116 // Such work can be piggy-backed here on dirty card scanning, so as to make 117 // it slightly more efficient than doing a complete non-destructive pre-scan 118 // of the card table. 119 MemRegionClosure* pCl = _sp->preconsumptionDirtyCardClosure(); 120 if (pCl != NULL) { 121 pCl->do_MemRegion(mr); 122 } 123 124 HeapWord* bottom = mr.start(); 125 HeapWord* last = mr.last(); 126 HeapWord* top = mr.end(); 127 HeapWord* bottom_obj; 128 HeapWord* top_obj; 129 130 assert(_precision == CardTable::ObjHeadPreciseArray || 131 _precision == CardTable::Precise, 132 "Only ones we deal with for now."); 133 134 assert(_precision != CardTable::ObjHeadPreciseArray || 135 _cl->idempotent() || _last_bottom == NULL || 136 top <= _last_bottom, 137 "Not decreasing"); 138 NOT_PRODUCT(_last_bottom = mr.start()); 139 140 bottom_obj = _sp->block_start(bottom); 141 top_obj = _sp->block_start(last); 142 143 assert(bottom_obj <= bottom, "just checking"); 144 assert(top_obj <= top, "just checking"); 145 146 // Given what we think is the top of the memory region and 147 // the start of the object at the top, get the actual 148 // value of the top. 149 top = get_actual_top(top, top_obj); 150 151 // If the previous call did some part of this region, don't redo. 152 if (_precision == CardTable::ObjHeadPreciseArray && 153 _min_done != NULL && 154 _min_done < top) { 155 top = _min_done; 156 } 157 158 // Top may have been reset, and in fact may be below bottom, 159 // e.g. the dirty card region is entirely in a now free object 160 // -- something that could happen with a concurrent sweeper. 161 bottom = MIN2(bottom, top); 162 MemRegion extended_mr = MemRegion(bottom, top); 163 assert(bottom <= top && 164 (_precision != CardTable::ObjHeadPreciseArray || 165 _min_done == NULL || 166 top <= _min_done), 167 "overlap!"); 168 169 // Walk the region if it is not empty; otherwise there is nothing to do. 170 if (!extended_mr.is_empty()) { 171 walk_mem_region(extended_mr, bottom_obj, top); 172 } 173 174 // An idempotent closure might be applied in any order, so we don't 175 // record a _min_done for it. 176 if (!_cl->idempotent()) { 177 _min_done = bottom; 178 } else { 179 assert(_min_done == _last_explicit_min_done, 180 "Don't update _min_done for idempotent cl"); 181 } 182 } 183 184 DirtyCardToOopClosure* Space::new_dcto_cl(ExtendedOopClosure* cl, 185 CardTable::PrecisionStyle precision, 186 HeapWord* boundary, 187 bool parallel) { 188 return new DirtyCardToOopClosure(this, cl, precision, boundary); 189 } 190 191 HeapWord* ContiguousSpaceDCTOC::get_actual_top(HeapWord* top, 192 HeapWord* top_obj) { 193 if (top_obj != NULL && top_obj < (_sp->toContiguousSpace())->top()) { 194 if (_precision == CardTable::ObjHeadPreciseArray) { 195 if (oop(top_obj)->is_objArray() || oop(top_obj)->is_typeArray()) { 196 // An arrayOop is starting on the dirty card - since we do exact 197 // store checks for objArrays we are done. 198 } else { 199 // Otherwise, it is possible that the object starting on the dirty 200 // card spans the entire card, and that the store happened on a 201 // later card. Figure out where the object ends. 202 assert(_sp->block_size(top_obj) == (size_t) oop(top_obj)->size(), 203 "Block size and object size mismatch"); 204 top = top_obj + oop(top_obj)->size(); 205 } 206 } 207 } else { 208 top = (_sp->toContiguousSpace())->top(); 209 } 210 return top; 211 } 212 213 void FilteringDCTOC::walk_mem_region(MemRegion mr, 214 HeapWord* bottom, 215 HeapWord* top) { 216 // Note that this assumption won't hold if we have a concurrent 217 // collector in this space, which may have freed up objects after 218 // they were dirtied and before the stop-the-world GC that is 219 // examining cards here. 220 assert(bottom < top, "ought to be at least one obj on a dirty card."); 221 222 if (_boundary != NULL) { 223 // We have a boundary outside of which we don't want to look 224 // at objects, so create a filtering closure around the 225 // oop closure before walking the region. 226 FilteringClosure filter(_boundary, _cl); 227 walk_mem_region_with_cl(mr, bottom, top, &filter); 228 } else { 229 // No boundary, simply walk the heap with the oop closure. 230 walk_mem_region_with_cl(mr, bottom, top, _cl); 231 } 232 233 } 234 235 // We must replicate this so that the static type of "FilteringClosure" 236 // (see above) is apparent at the oop_iterate calls. 237 #define ContiguousSpaceDCTOC__walk_mem_region_with_cl_DEFN(ClosureType) \ 238 void ContiguousSpaceDCTOC::walk_mem_region_with_cl(MemRegion mr, \ 239 HeapWord* bottom, \ 240 HeapWord* top, \ 241 ClosureType* cl) { \ 242 bottom += oop(bottom)->oop_iterate_size(cl, mr); \ 243 if (bottom < top) { \ 244 HeapWord* next_obj = bottom + oop(bottom)->size(); \ 245 while (next_obj < top) { \ 246 /* Bottom lies entirely below top, so we can call the */ \ 247 /* non-memRegion version of oop_iterate below. */ \ 248 oop(bottom)->oop_iterate(cl); \ 249 bottom = next_obj; \ 250 next_obj = bottom + oop(bottom)->size(); \ 251 } \ 252 /* Last object. */ \ 253 oop(bottom)->oop_iterate(cl, mr); \ 254 } \ 255 } 256 257 // (There are only two of these, rather than N, because the split is due 258 // only to the introduction of the FilteringClosure, a local part of the 259 // impl of this abstraction.) 260 ContiguousSpaceDCTOC__walk_mem_region_with_cl_DEFN(ExtendedOopClosure) 261 ContiguousSpaceDCTOC__walk_mem_region_with_cl_DEFN(FilteringClosure) 262 263 DirtyCardToOopClosure* 264 ContiguousSpace::new_dcto_cl(ExtendedOopClosure* cl, 265 CardTable::PrecisionStyle precision, 266 HeapWord* boundary, 267 bool parallel) { 268 return new ContiguousSpaceDCTOC(this, cl, precision, boundary); 269 } 270 271 void Space::initialize(MemRegion mr, 272 bool clear_space, 273 bool mangle_space) { 274 HeapWord* bottom = mr.start(); 275 HeapWord* end = mr.end(); 276 assert(Universe::on_page_boundary(bottom) && Universe::on_page_boundary(end), 277 "invalid space boundaries"); 278 set_bottom(bottom); 279 set_end(end); 280 if (clear_space) clear(mangle_space); 281 } 282 283 void Space::clear(bool mangle_space) { 284 if (ZapUnusedHeapArea && mangle_space) { 285 mangle_unused_area(); 286 } 287 } 288 289 ContiguousSpace::ContiguousSpace(): CompactibleSpace(), _top(NULL), 290 _concurrent_iteration_safe_limit(NULL) { 291 _mangler = new GenSpaceMangler(this); 292 } 293 294 ContiguousSpace::~ContiguousSpace() { 295 delete _mangler; 296 } 297 298 void ContiguousSpace::initialize(MemRegion mr, 299 bool clear_space, 300 bool mangle_space) 301 { 302 CompactibleSpace::initialize(mr, clear_space, mangle_space); 303 set_concurrent_iteration_safe_limit(top()); 304 } 305 306 void ContiguousSpace::clear(bool mangle_space) { 307 set_top(bottom()); 308 set_saved_mark(); 309 CompactibleSpace::clear(mangle_space); 310 } 311 312 bool ContiguousSpace::is_free_block(const HeapWord* p) const { 313 return p >= _top; 314 } 315 316 void OffsetTableContigSpace::clear(bool mangle_space) { 317 ContiguousSpace::clear(mangle_space); 318 _offsets.initialize_threshold(); 319 } 320 321 void OffsetTableContigSpace::set_bottom(HeapWord* new_bottom) { 322 Space::set_bottom(new_bottom); 323 _offsets.set_bottom(new_bottom); 324 } 325 326 void OffsetTableContigSpace::set_end(HeapWord* new_end) { 327 // Space should not advertise an increase in size 328 // until after the underlying offset table has been enlarged. 329 _offsets.resize(pointer_delta(new_end, bottom())); 330 Space::set_end(new_end); 331 } 332 333 #ifndef PRODUCT 334 335 void ContiguousSpace::set_top_for_allocations(HeapWord* v) { 336 mangler()->set_top_for_allocations(v); 337 } 338 void ContiguousSpace::set_top_for_allocations() { 339 mangler()->set_top_for_allocations(top()); 340 } 341 void ContiguousSpace::check_mangled_unused_area(HeapWord* limit) { 342 mangler()->check_mangled_unused_area(limit); 343 } 344 345 void ContiguousSpace::check_mangled_unused_area_complete() { 346 mangler()->check_mangled_unused_area_complete(); 347 } 348 349 // Mangled only the unused space that has not previously 350 // been mangled and that has not been allocated since being 351 // mangled. 352 void ContiguousSpace::mangle_unused_area() { 353 mangler()->mangle_unused_area(); 354 } 355 void ContiguousSpace::mangle_unused_area_complete() { 356 mangler()->mangle_unused_area_complete(); 357 } 358 #endif // NOT_PRODUCT 359 360 void CompactibleSpace::initialize(MemRegion mr, 361 bool clear_space, 362 bool mangle_space) { 363 Space::initialize(mr, clear_space, mangle_space); 364 set_compaction_top(bottom()); 365 _next_compaction_space = NULL; 366 } 367 368 void CompactibleSpace::clear(bool mangle_space) { 369 Space::clear(mangle_space); 370 _compaction_top = bottom(); 371 } 372 373 HeapWord* CompactibleSpace::forward(oop q, size_t size, 374 CompactPoint* cp, HeapWord* compact_top) { 375 // q is alive 376 // First check if we should switch compaction space 377 assert(this == cp->space, "'this' should be current compaction space."); 378 size_t compaction_max_size = pointer_delta(end(), compact_top); 379 while (size > compaction_max_size) { 380 // switch to next compaction space 381 cp->space->set_compaction_top(compact_top); 382 cp->space = cp->space->next_compaction_space(); 383 if (cp->space == NULL) { 384 cp->gen = GenCollectedHeap::heap()->young_gen(); 385 assert(cp->gen != NULL, "compaction must succeed"); 386 cp->space = cp->gen->first_compaction_space(); 387 assert(cp->space != NULL, "generation must have a first compaction space"); 388 } 389 compact_top = cp->space->bottom(); 390 cp->space->set_compaction_top(compact_top); 391 cp->threshold = cp->space->initialize_threshold(); 392 compaction_max_size = pointer_delta(cp->space->end(), compact_top); 393 } 394 395 // store the forwarding pointer into the mark word 396 if ((HeapWord*)q != compact_top) { 397 q->forward_to(oop(compact_top)); 398 assert(q->is_gc_marked(), "encoding the pointer should preserve the mark"); 399 } else { 400 // if the object isn't moving we can just set the mark to the default 401 // mark and handle it specially later on. 402 q->init_mark_raw(); 403 assert(q->forwardee() == NULL, "should be forwarded to NULL"); 404 } 405 406 compact_top += size; 407 408 // we need to update the offset table so that the beginnings of objects can be 409 // found during scavenge. Note that we are updating the offset table based on 410 // where the object will be once the compaction phase finishes. 411 if (compact_top > cp->threshold) 412 cp->threshold = 413 cp->space->cross_threshold(compact_top - size, compact_top); 414 return compact_top; 415 } 416 417 #if INCLUDE_SERIALGC 418 419 void ContiguousSpace::prepare_for_compaction(CompactPoint* cp) { 420 scan_and_forward(this, cp); 421 } 422 423 void CompactibleSpace::adjust_pointers() { 424 // Check first is there is any work to do. 425 if (used() == 0) { 426 return; // Nothing to do. 427 } 428 429 scan_and_adjust_pointers(this); 430 } 431 432 void CompactibleSpace::compact() { 433 scan_and_compact(this); 434 } 435 436 #endif // INCLUDE_SERIALGC 437 438 void Space::print_short() const { print_short_on(tty); } 439 440 void Space::print_short_on(outputStream* st) const { 441 st->print(" space " SIZE_FORMAT "K, %3d%% used", capacity() / K, 442 (int) ((double) used() * 100 / capacity())); 443 } 444 445 void Space::print() const { print_on(tty); } 446 447 void Space::print_on(outputStream* st) const { 448 print_short_on(st); 449 st->print_cr(" [" INTPTR_FORMAT ", " INTPTR_FORMAT ")", 450 p2i(bottom()), p2i(end())); 451 } 452 453 void ContiguousSpace::print_on(outputStream* st) const { 454 print_short_on(st); 455 st->print_cr(" [" INTPTR_FORMAT ", " INTPTR_FORMAT ", " INTPTR_FORMAT ")", 456 p2i(bottom()), p2i(top()), p2i(end())); 457 } 458 459 void OffsetTableContigSpace::print_on(outputStream* st) const { 460 print_short_on(st); 461 st->print_cr(" [" INTPTR_FORMAT ", " INTPTR_FORMAT ", " 462 INTPTR_FORMAT ", " INTPTR_FORMAT ")", 463 p2i(bottom()), p2i(top()), p2i(_offsets.threshold()), p2i(end())); 464 } 465 466 void ContiguousSpace::verify() const { 467 HeapWord* p = bottom(); 468 HeapWord* t = top(); 469 HeapWord* prev_p = NULL; 470 while (p < t) { 471 oop(p)->verify(); 472 prev_p = p; 473 p += oop(p)->size(); 474 } 475 guarantee(p == top(), "end of last object must match end of space"); 476 if (top() != end()) { 477 guarantee(top() == block_start_const(end()-1) && 478 top() == block_start_const(top()), 479 "top should be start of unallocated block, if it exists"); 480 } 481 } 482 483 void Space::oop_iterate(ExtendedOopClosure* blk) { 484 ObjectToOopClosure blk2(blk); 485 object_iterate(&blk2); 486 } 487 488 bool Space::obj_is_alive(const HeapWord* p) const { 489 assert (block_is_obj(p), "The address should point to an object"); 490 return true; 491 } 492 493 void ContiguousSpace::oop_iterate(ExtendedOopClosure* blk) { 494 if (is_empty()) return; 495 HeapWord* obj_addr = bottom(); 496 HeapWord* t = top(); 497 // Could call objects iterate, but this is easier. 498 while (obj_addr < t) { 499 obj_addr += oop(obj_addr)->oop_iterate_size(blk); 500 } 501 } 502 503 void ContiguousSpace::object_iterate(ObjectClosure* blk) { 504 if (is_empty()) return; 505 object_iterate_from(bottom(), blk); 506 } 507 508 // For a ContiguousSpace object_iterate() and safe_object_iterate() 509 // are the same. 510 void ContiguousSpace::safe_object_iterate(ObjectClosure* blk) { 511 object_iterate(blk); 512 } 513 514 void ContiguousSpace::object_iterate_from(HeapWord* mark, ObjectClosure* blk) { 515 while (mark < top()) { 516 blk->do_object(oop(mark)); 517 mark += oop(mark)->size(); 518 } 519 } 520 521 HeapWord* 522 ContiguousSpace::object_iterate_careful(ObjectClosureCareful* blk) { 523 HeapWord * limit = concurrent_iteration_safe_limit(); 524 assert(limit <= top(), "sanity check"); 525 for (HeapWord* p = bottom(); p < limit;) { 526 size_t size = blk->do_object_careful(oop(p)); 527 if (size == 0) { 528 return p; // failed at p 529 } else { 530 p += size; 531 } 532 } 533 return NULL; // all done 534 } 535 536 // Very general, slow implementation. 537 HeapWord* ContiguousSpace::block_start_const(const void* p) const { 538 assert(MemRegion(bottom(), end()).contains(p), 539 "p (" PTR_FORMAT ") not in space [" PTR_FORMAT ", " PTR_FORMAT ")", 540 p2i(p), p2i(bottom()), p2i(end())); 541 if (p >= top()) { 542 return top(); 543 } else { 544 HeapWord* last = bottom(); 545 HeapWord* cur = last; 546 while (cur <= p) { 547 last = cur; 548 cur += oop(cur)->size(); 549 } 550 assert(oopDesc::is_oop(oop(last)), PTR_FORMAT " should be an object start", p2i(last)); 551 return last; 552 } 553 } 554 555 size_t ContiguousSpace::block_size(const HeapWord* p) const { 556 assert(MemRegion(bottom(), end()).contains(p), 557 "p (" PTR_FORMAT ") not in space [" PTR_FORMAT ", " PTR_FORMAT ")", 558 p2i(p), p2i(bottom()), p2i(end())); 559 HeapWord* current_top = top(); 560 assert(p <= current_top, 561 "p > current top - p: " PTR_FORMAT ", current top: " PTR_FORMAT, 562 p2i(p), p2i(current_top)); 563 assert(p == current_top || oopDesc::is_oop(oop(p)), 564 "p (" PTR_FORMAT ") is not a block start - " 565 "current_top: " PTR_FORMAT ", is_oop: %s", 566 p2i(p), p2i(current_top), BOOL_TO_STR(oopDesc::is_oop(oop(p)))); 567 if (p < current_top) { 568 return oop(p)->size(); 569 } else { 570 assert(p == current_top, "just checking"); 571 return pointer_delta(end(), (HeapWord*) p); 572 } 573 } 574 575 // This version requires locking. 576 inline HeapWord* ContiguousSpace::allocate_impl(size_t size) { 577 assert(Heap_lock->owned_by_self() || 578 (SafepointSynchronize::is_at_safepoint() && Thread::current()->is_VM_thread()), 579 "not locked"); 580 HeapWord* obj = top(); 581 if (pointer_delta(end(), obj) >= size) { 582 HeapWord* new_top = obj + size; 583 set_top(new_top); 584 assert(is_aligned(obj) && is_aligned(new_top), "checking alignment"); 585 return obj; 586 } else { 587 return NULL; 588 } 589 } 590 591 // This version is lock-free. 592 inline HeapWord* ContiguousSpace::par_allocate_impl(size_t size) { 593 do { 594 HeapWord* obj = top(); 595 if (pointer_delta(end(), obj) >= size) { 596 HeapWord* new_top = obj + size; 597 HeapWord* result = Atomic::cmpxchg(new_top, top_addr(), obj); 598 // result can be one of two: 599 // the old top value: the exchange succeeded 600 // otherwise: the new value of the top is returned. 601 if (result == obj) { 602 assert(is_aligned(obj) && is_aligned(new_top), "checking alignment"); 603 return obj; 604 } 605 } else { 606 return NULL; 607 } 608 } while (true); 609 } 610 611 HeapWord* ContiguousSpace::allocate_aligned(size_t size) { 612 assert(Heap_lock->owned_by_self() || (SafepointSynchronize::is_at_safepoint() && Thread::current()->is_VM_thread()), "not locked"); 613 HeapWord* end_value = end(); 614 615 HeapWord* obj = CollectedHeap::align_allocation_or_fail(top(), end_value, SurvivorAlignmentInBytes); 616 if (obj == NULL) { 617 return NULL; 618 } 619 620 if (pointer_delta(end_value, obj) >= size) { 621 HeapWord* new_top = obj + size; 622 set_top(new_top); 623 assert(::is_aligned(obj, SurvivorAlignmentInBytes) && is_aligned(new_top), 624 "checking alignment"); 625 return obj; 626 } else { 627 set_top(obj); 628 return NULL; 629 } 630 } 631 632 // Requires locking. 633 HeapWord* ContiguousSpace::allocate(size_t size) { 634 return allocate_impl(size); 635 } 636 637 // Lock-free. 638 HeapWord* ContiguousSpace::par_allocate(size_t size) { 639 return par_allocate_impl(size); 640 } 641 642 void ContiguousSpace::allocate_temporary_filler(int factor) { 643 // allocate temporary type array decreasing free size with factor 'factor' 644 assert(factor >= 0, "just checking"); 645 size_t size = pointer_delta(end(), top()); 646 647 // if space is full, return 648 if (size == 0) return; 649 650 if (factor > 0) { 651 size -= size/factor; 652 } 653 size = align_object_size(size); 654 655 const size_t array_header_size = typeArrayOopDesc::header_size(T_INT); 656 if (size >= align_object_size(array_header_size)) { 657 size_t length = (size - array_header_size) * (HeapWordSize / sizeof(jint)); 658 // allocate uninitialized int array 659 typeArrayOop t = (typeArrayOop) allocate(size); 660 assert(t != NULL, "allocation should succeed"); 661 t->set_mark_raw(markOopDesc::prototype()); 662 t->set_klass(Universe::intArrayKlassObj()); 663 t->set_length((int)length); 664 } else { 665 assert(size == CollectedHeap::min_fill_size(), 666 "size for smallest fake object doesn't match"); 667 instanceOop obj = (instanceOop) allocate(size); 668 obj->set_mark_raw(markOopDesc::prototype()); 669 obj->set_klass_gap(0); 670 obj->set_klass(SystemDictionary::Object_klass()); 671 } 672 } 673 674 HeapWord* OffsetTableContigSpace::initialize_threshold() { 675 return _offsets.initialize_threshold(); 676 } 677 678 HeapWord* OffsetTableContigSpace::cross_threshold(HeapWord* start, HeapWord* end) { 679 _offsets.alloc_block(start, end); 680 return _offsets.threshold(); 681 } 682 683 OffsetTableContigSpace::OffsetTableContigSpace(BlockOffsetSharedArray* sharedOffsetArray, 684 MemRegion mr) : 685 _offsets(sharedOffsetArray, mr), 686 _par_alloc_lock(Mutex::leaf, "OffsetTableContigSpace par alloc lock", true) 687 { 688 _offsets.set_contig_space(this); 689 initialize(mr, SpaceDecorator::Clear, SpaceDecorator::Mangle); 690 } 691 692 #define OBJ_SAMPLE_INTERVAL 0 693 #define BLOCK_SAMPLE_INTERVAL 100 694 695 void OffsetTableContigSpace::verify() const { 696 HeapWord* p = bottom(); 697 HeapWord* prev_p = NULL; 698 int objs = 0; 699 int blocks = 0; 700 701 if (VerifyObjectStartArray) { 702 _offsets.verify(); 703 } 704 705 while (p < top()) { 706 size_t size = oop(p)->size(); 707 // For a sampling of objects in the space, find it using the 708 // block offset table. 709 if (blocks == BLOCK_SAMPLE_INTERVAL) { 710 guarantee(p == block_start_const(p + (size/2)), 711 "check offset computation"); 712 blocks = 0; 713 } else { 714 blocks++; 715 } 716 717 if (objs == OBJ_SAMPLE_INTERVAL) { 718 oop(p)->verify(); 719 objs = 0; 720 } else { 721 objs++; 722 } 723 prev_p = p; 724 p += size; 725 } 726 guarantee(p == top(), "end of last object must match end of space"); 727 } 728 729 730 size_t TenuredSpace::allowed_dead_ratio() const { 731 return MarkSweepDeadRatio; 732 }