1 /* 2 * Copyright (c) 1997, 2018, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "classfile/systemDictionary.hpp" 27 #include "classfile/vmSymbols.hpp" 28 #include "gc/shared/blockOffsetTable.inline.hpp" 29 #include "gc/shared/collectedHeap.inline.hpp" 30 #include "gc/shared/fill.hpp" 31 #include "gc/shared/genCollectedHeap.hpp" 32 #include "gc/shared/genOopClosures.inline.hpp" 33 #include "gc/shared/space.hpp" 34 #include "gc/shared/space.inline.hpp" 35 #include "gc/shared/spaceDecorator.hpp" 36 #include "memory/iterator.inline.hpp" 37 #include "memory/universe.hpp" 38 #include "oops/oop.inline.hpp" 39 #include "runtime/atomic.hpp" 40 #include "runtime/java.hpp" 41 #include "runtime/orderAccess.hpp" 42 #include "runtime/prefetch.inline.hpp" 43 #include "runtime/safepoint.hpp" 44 #include "utilities/align.hpp" 45 #include "utilities/copy.hpp" 46 #include "utilities/globalDefinitions.hpp" 47 #include "utilities/macros.hpp" 48 #if INCLUDE_SERIALGC 49 #include "gc/serial/defNewGeneration.hpp" 50 #endif 51 52 HeapWord* DirtyCardToOopClosure::get_actual_top(HeapWord* top, 53 HeapWord* top_obj) { 54 if (top_obj != NULL) { 55 if (_sp->block_is_obj(top_obj)) { 56 if (_precision == CardTable::ObjHeadPreciseArray) { 57 if (oop(top_obj)->is_objArray() || oop(top_obj)->is_typeArray()) { 58 // An arrayOop is starting on the dirty card - since we do exact 59 // store checks for objArrays we are done. 60 } else { 61 // Otherwise, it is possible that the object starting on the dirty 62 // card spans the entire card, and that the store happened on a 63 // later card. Figure out where the object ends. 64 // Use the block_size() method of the space over which 65 // the iteration is being done. That space (e.g. CMS) may have 66 // specific requirements on object sizes which will 67 // be reflected in the block_size() method. 68 top = top_obj + oop(top_obj)->size(); 69 } 70 } 71 } else { 72 top = top_obj; 73 } 74 } else { 75 assert(top == _sp->end(), "only case where top_obj == NULL"); 76 } 77 return top; 78 } 79 80 void DirtyCardToOopClosure::walk_mem_region(MemRegion mr, 81 HeapWord* bottom, 82 HeapWord* top) { 83 // 1. Blocks may or may not be objects. 84 // 2. Even when a block_is_obj(), it may not entirely 85 // occupy the block if the block quantum is larger than 86 // the object size. 87 // We can and should try to optimize by calling the non-MemRegion 88 // version of oop_iterate() for all but the extremal objects 89 // (for which we need to call the MemRegion version of 90 // oop_iterate()) To be done post-beta XXX 91 for (; bottom < top; bottom += _sp->block_size(bottom)) { 92 // As in the case of contiguous space above, we'd like to 93 // just use the value returned by oop_iterate to increment the 94 // current pointer; unfortunately, that won't work in CMS because 95 // we'd need an interface change (it seems) to have the space 96 // "adjust the object size" (for instance pad it up to its 97 // block alignment or minimum block size restrictions. XXX 98 if (_sp->block_is_obj(bottom) && 99 !_sp->obj_allocated_since_save_marks(oop(bottom))) { 100 oop(bottom)->oop_iterate(_cl, mr); 101 } 102 } 103 } 104 105 // We get called with "mr" representing the dirty region 106 // that we want to process. Because of imprecise marking, 107 // we may need to extend the incoming "mr" to the right, 108 // and scan more. However, because we may already have 109 // scanned some of that extended region, we may need to 110 // trim its right-end back some so we do not scan what 111 // we (or another worker thread) may already have scanned 112 // or planning to scan. 113 void DirtyCardToOopClosure::do_MemRegion(MemRegion mr) { 114 115 // Some collectors need to do special things whenever their dirty 116 // cards are processed. For instance, CMS must remember mutator updates 117 // (i.e. dirty cards) so as to re-scan mutated objects. 118 // Such work can be piggy-backed here on dirty card scanning, so as to make 119 // it slightly more efficient than doing a complete non-destructive pre-scan 120 // of the card table. 121 MemRegionClosure* pCl = _sp->preconsumptionDirtyCardClosure(); 122 if (pCl != NULL) { 123 pCl->do_MemRegion(mr); 124 } 125 126 HeapWord* bottom = mr.start(); 127 HeapWord* last = mr.last(); 128 HeapWord* top = mr.end(); 129 HeapWord* bottom_obj; 130 HeapWord* top_obj; 131 132 assert(_precision == CardTable::ObjHeadPreciseArray || 133 _precision == CardTable::Precise, 134 "Only ones we deal with for now."); 135 136 assert(_precision != CardTable::ObjHeadPreciseArray || 137 _last_bottom == NULL || top <= _last_bottom, 138 "Not decreasing"); 139 NOT_PRODUCT(_last_bottom = mr.start()); 140 141 bottom_obj = _sp->block_start(bottom); 142 top_obj = _sp->block_start(last); 143 144 assert(bottom_obj <= bottom, "just checking"); 145 assert(top_obj <= top, "just checking"); 146 147 // Given what we think is the top of the memory region and 148 // the start of the object at the top, get the actual 149 // value of the top. 150 top = get_actual_top(top, top_obj); 151 152 // If the previous call did some part of this region, don't redo. 153 if (_precision == CardTable::ObjHeadPreciseArray && 154 _min_done != NULL && 155 _min_done < top) { 156 top = _min_done; 157 } 158 159 // Top may have been reset, and in fact may be below bottom, 160 // e.g. the dirty card region is entirely in a now free object 161 // -- something that could happen with a concurrent sweeper. 162 bottom = MIN2(bottom, top); 163 MemRegion extended_mr = MemRegion(bottom, top); 164 assert(bottom <= top && 165 (_precision != CardTable::ObjHeadPreciseArray || 166 _min_done == NULL || 167 top <= _min_done), 168 "overlap!"); 169 170 // Walk the region if it is not empty; otherwise there is nothing to do. 171 if (!extended_mr.is_empty()) { 172 walk_mem_region(extended_mr, bottom_obj, top); 173 } 174 175 _min_done = bottom; 176 } 177 178 DirtyCardToOopClosure* Space::new_dcto_cl(OopIterateClosure* cl, 179 CardTable::PrecisionStyle precision, 180 HeapWord* boundary, 181 bool parallel) { 182 return new DirtyCardToOopClosure(this, cl, precision, boundary); 183 } 184 185 HeapWord* ContiguousSpaceDCTOC::get_actual_top(HeapWord* top, 186 HeapWord* top_obj) { 187 if (top_obj != NULL && top_obj < (_sp->toContiguousSpace())->top()) { 188 if (_precision == CardTable::ObjHeadPreciseArray) { 189 if (oop(top_obj)->is_objArray() || oop(top_obj)->is_typeArray()) { 190 // An arrayOop is starting on the dirty card - since we do exact 191 // store checks for objArrays we are done. 192 } else { 193 // Otherwise, it is possible that the object starting on the dirty 194 // card spans the entire card, and that the store happened on a 195 // later card. Figure out where the object ends. 196 assert(_sp->block_size(top_obj) == (size_t) oop(top_obj)->size(), 197 "Block size and object size mismatch"); 198 top = top_obj + oop(top_obj)->size(); 199 } 200 } 201 } else { 202 top = (_sp->toContiguousSpace())->top(); 203 } 204 return top; 205 } 206 207 void FilteringDCTOC::walk_mem_region(MemRegion mr, 208 HeapWord* bottom, 209 HeapWord* top) { 210 // Note that this assumption won't hold if we have a concurrent 211 // collector in this space, which may have freed up objects after 212 // they were dirtied and before the stop-the-world GC that is 213 // examining cards here. 214 assert(bottom < top, "ought to be at least one obj on a dirty card."); 215 216 if (_boundary != NULL) { 217 // We have a boundary outside of which we don't want to look 218 // at objects, so create a filtering closure around the 219 // oop closure before walking the region. 220 FilteringClosure filter(_boundary, _cl); 221 walk_mem_region_with_cl(mr, bottom, top, &filter); 222 } else { 223 // No boundary, simply walk the heap with the oop closure. 224 walk_mem_region_with_cl(mr, bottom, top, _cl); 225 } 226 227 } 228 229 // We must replicate this so that the static type of "FilteringClosure" 230 // (see above) is apparent at the oop_iterate calls. 231 #define ContiguousSpaceDCTOC__walk_mem_region_with_cl_DEFN(ClosureType) \ 232 void ContiguousSpaceDCTOC::walk_mem_region_with_cl(MemRegion mr, \ 233 HeapWord* bottom, \ 234 HeapWord* top, \ 235 ClosureType* cl) { \ 236 bottom += oop(bottom)->oop_iterate_size(cl, mr); \ 237 if (bottom < top) { \ 238 HeapWord* next_obj = bottom + oop(bottom)->size(); \ 239 while (next_obj < top) { \ 240 /* Bottom lies entirely below top, so we can call the */ \ 241 /* non-memRegion version of oop_iterate below. */ \ 242 oop(bottom)->oop_iterate(cl); \ 243 bottom = next_obj; \ 244 next_obj = bottom + oop(bottom)->size(); \ 245 } \ 246 /* Last object. */ \ 247 oop(bottom)->oop_iterate(cl, mr); \ 248 } \ 249 } 250 251 // (There are only two of these, rather than N, because the split is due 252 // only to the introduction of the FilteringClosure, a local part of the 253 // impl of this abstraction.) 254 ContiguousSpaceDCTOC__walk_mem_region_with_cl_DEFN(OopIterateClosure) 255 ContiguousSpaceDCTOC__walk_mem_region_with_cl_DEFN(FilteringClosure) 256 257 DirtyCardToOopClosure* 258 ContiguousSpace::new_dcto_cl(OopIterateClosure* cl, 259 CardTable::PrecisionStyle precision, 260 HeapWord* boundary, 261 bool parallel) { 262 return new ContiguousSpaceDCTOC(this, cl, precision, boundary); 263 } 264 265 void Space::initialize(MemRegion mr, 266 bool clear_space, 267 bool mangle_space) { 268 HeapWord* bottom = mr.start(); 269 HeapWord* end = mr.end(); 270 assert(Universe::on_page_boundary(bottom) && Universe::on_page_boundary(end), 271 "invalid space boundaries"); 272 set_bottom(bottom); 273 set_end(end); 274 if (clear_space) clear(mangle_space); 275 } 276 277 void Space::clear(bool mangle_space) { 278 if (ZapUnusedHeapArea && mangle_space) { 279 mangle_unused_area(); 280 } 281 } 282 283 ContiguousSpace::ContiguousSpace(): CompactibleSpace(), _top(NULL), 284 _concurrent_iteration_safe_limit(NULL) { 285 _mangler = new GenSpaceMangler(this); 286 } 287 288 ContiguousSpace::~ContiguousSpace() { 289 delete _mangler; 290 } 291 292 void ContiguousSpace::initialize(MemRegion mr, 293 bool clear_space, 294 bool mangle_space) 295 { 296 CompactibleSpace::initialize(mr, clear_space, mangle_space); 297 set_concurrent_iteration_safe_limit(top()); 298 } 299 300 void ContiguousSpace::clear(bool mangle_space) { 301 set_top(bottom()); 302 set_saved_mark(); 303 CompactibleSpace::clear(mangle_space); 304 } 305 306 bool ContiguousSpace::is_free_block(const HeapWord* p) const { 307 return p >= _top; 308 } 309 310 void OffsetTableContigSpace::clear(bool mangle_space) { 311 ContiguousSpace::clear(mangle_space); 312 _offsets.initialize_threshold(); 313 } 314 315 void OffsetTableContigSpace::set_bottom(HeapWord* new_bottom) { 316 Space::set_bottom(new_bottom); 317 _offsets.set_bottom(new_bottom); 318 } 319 320 void OffsetTableContigSpace::set_end(HeapWord* new_end) { 321 // Space should not advertise an increase in size 322 // until after the underlying offset table has been enlarged. 323 _offsets.resize(pointer_delta(new_end, bottom())); 324 Space::set_end(new_end); 325 } 326 327 #ifndef PRODUCT 328 329 void ContiguousSpace::set_top_for_allocations(HeapWord* v) { 330 mangler()->set_top_for_allocations(v); 331 } 332 void ContiguousSpace::set_top_for_allocations() { 333 mangler()->set_top_for_allocations(top()); 334 } 335 void ContiguousSpace::check_mangled_unused_area(HeapWord* limit) { 336 mangler()->check_mangled_unused_area(limit); 337 } 338 339 void ContiguousSpace::check_mangled_unused_area_complete() { 340 mangler()->check_mangled_unused_area_complete(); 341 } 342 343 // Mangled only the unused space that has not previously 344 // been mangled and that has not been allocated since being 345 // mangled. 346 void ContiguousSpace::mangle_unused_area() { 347 mangler()->mangle_unused_area(); 348 } 349 void ContiguousSpace::mangle_unused_area_complete() { 350 mangler()->mangle_unused_area_complete(); 351 } 352 #endif // NOT_PRODUCT 353 354 void CompactibleSpace::initialize(MemRegion mr, 355 bool clear_space, 356 bool mangle_space) { 357 Space::initialize(mr, clear_space, mangle_space); 358 set_compaction_top(bottom()); 359 _next_compaction_space = NULL; 360 } 361 362 void CompactibleSpace::clear(bool mangle_space) { 363 Space::clear(mangle_space); 364 _compaction_top = bottom(); 365 } 366 367 HeapWord* CompactibleSpace::forward(oop q, size_t size, 368 CompactPoint* cp, HeapWord* compact_top) { 369 // q is alive 370 // First check if we should switch compaction space 371 assert(this == cp->space, "'this' should be current compaction space."); 372 size_t compaction_max_size = pointer_delta(end(), compact_top); 373 while (size > compaction_max_size) { 374 // switch to next compaction space 375 cp->space->set_compaction_top(compact_top); 376 cp->space = cp->space->next_compaction_space(); 377 if (cp->space == NULL) { 378 cp->gen = GenCollectedHeap::heap()->young_gen(); 379 assert(cp->gen != NULL, "compaction must succeed"); 380 cp->space = cp->gen->first_compaction_space(); 381 assert(cp->space != NULL, "generation must have a first compaction space"); 382 } 383 compact_top = cp->space->bottom(); 384 cp->space->set_compaction_top(compact_top); 385 cp->threshold = cp->space->initialize_threshold(); 386 compaction_max_size = pointer_delta(cp->space->end(), compact_top); 387 } 388 389 // store the forwarding pointer into the mark word 390 if ((HeapWord*)q != compact_top) { 391 q->forward_to(oop(compact_top)); 392 assert(q->is_gc_marked(), "encoding the pointer should preserve the mark"); 393 } else { 394 // if the object isn't moving we can just set the mark to the default 395 // mark and handle it specially later on. 396 q->init_mark_raw(); 397 assert(q->forwardee() == NULL, "should be forwarded to NULL"); 398 } 399 400 compact_top += size; 401 402 // we need to update the offset table so that the beginnings of objects can be 403 // found during scavenge. Note that we are updating the offset table based on 404 // where the object will be once the compaction phase finishes. 405 if (compact_top > cp->threshold) 406 cp->threshold = 407 cp->space->cross_threshold(compact_top - size, compact_top); 408 return compact_top; 409 } 410 411 #if INCLUDE_SERIALGC 412 413 void ContiguousSpace::prepare_for_compaction(CompactPoint* cp) { 414 scan_and_forward(this, cp); 415 } 416 417 void CompactibleSpace::adjust_pointers() { 418 // Check first is there is any work to do. 419 if (used() == 0) { 420 return; // Nothing to do. 421 } 422 423 scan_and_adjust_pointers(this); 424 } 425 426 void CompactibleSpace::compact() { 427 scan_and_compact(this); 428 } 429 430 #endif // INCLUDE_SERIALGC 431 432 void Space::print_short() const { print_short_on(tty); } 433 434 void Space::print_short_on(outputStream* st) const { 435 st->print(" space " SIZE_FORMAT "K, %3d%% used", capacity() / K, 436 (int) ((double) used() * 100 / capacity())); 437 } 438 439 void Space::print() const { print_on(tty); } 440 441 void Space::print_on(outputStream* st) const { 442 print_short_on(st); 443 st->print_cr(" [" INTPTR_FORMAT ", " INTPTR_FORMAT ")", 444 p2i(bottom()), p2i(end())); 445 } 446 447 void ContiguousSpace::print_on(outputStream* st) const { 448 print_short_on(st); 449 st->print_cr(" [" INTPTR_FORMAT ", " INTPTR_FORMAT ", " INTPTR_FORMAT ")", 450 p2i(bottom()), p2i(top()), p2i(end())); 451 } 452 453 void OffsetTableContigSpace::print_on(outputStream* st) const { 454 print_short_on(st); 455 st->print_cr(" [" INTPTR_FORMAT ", " INTPTR_FORMAT ", " 456 INTPTR_FORMAT ", " INTPTR_FORMAT ")", 457 p2i(bottom()), p2i(top()), p2i(_offsets.threshold()), p2i(end())); 458 } 459 460 void ContiguousSpace::verify() const { 461 HeapWord* p = bottom(); 462 HeapWord* t = top(); 463 HeapWord* prev_p = NULL; 464 while (p < t) { 465 oopDesc::verify(oop(p)); 466 prev_p = p; 467 p += oop(p)->size(); 468 } 469 guarantee(p == top(), "end of last object must match end of space"); 470 if (top() != end()) { 471 guarantee(top() == block_start_const(end()-1) && 472 top() == block_start_const(top()), 473 "top should be start of unallocated block, if it exists"); 474 } 475 } 476 477 void Space::oop_iterate(OopIterateClosure* blk) { 478 ObjectToOopClosure blk2(blk); 479 object_iterate(&blk2); 480 } 481 482 bool Space::obj_is_alive(const HeapWord* p) const { 483 assert (block_is_obj(p), "The address should point to an object"); 484 return true; 485 } 486 487 void ContiguousSpace::oop_iterate(OopIterateClosure* blk) { 488 if (is_empty()) return; 489 HeapWord* obj_addr = bottom(); 490 HeapWord* t = top(); 491 // Could call objects iterate, but this is easier. 492 while (obj_addr < t) { 493 obj_addr += oop(obj_addr)->oop_iterate_size(blk); 494 } 495 } 496 497 void ContiguousSpace::object_iterate(ObjectClosure* blk) { 498 if (is_empty()) return; 499 object_iterate_from(bottom(), blk); 500 } 501 502 // For a ContiguousSpace object_iterate() and safe_object_iterate() 503 // are the same. 504 void ContiguousSpace::safe_object_iterate(ObjectClosure* blk) { 505 object_iterate(blk); 506 } 507 508 void ContiguousSpace::object_iterate_from(HeapWord* mark, ObjectClosure* blk) { 509 while (mark < top()) { 510 blk->do_object(oop(mark)); 511 mark += oop(mark)->size(); 512 } 513 } 514 515 HeapWord* 516 ContiguousSpace::object_iterate_careful(ObjectClosureCareful* blk) { 517 HeapWord * limit = concurrent_iteration_safe_limit(); 518 assert(limit <= top(), "sanity check"); 519 for (HeapWord* p = bottom(); p < limit;) { 520 size_t size = blk->do_object_careful(oop(p)); 521 if (size == 0) { 522 return p; // failed at p 523 } else { 524 p += size; 525 } 526 } 527 return NULL; // all done 528 } 529 530 // Very general, slow implementation. 531 HeapWord* ContiguousSpace::block_start_const(const void* p) const { 532 assert(MemRegion(bottom(), end()).contains(p), 533 "p (" PTR_FORMAT ") not in space [" PTR_FORMAT ", " PTR_FORMAT ")", 534 p2i(p), p2i(bottom()), p2i(end())); 535 if (p >= top()) { 536 return top(); 537 } else { 538 HeapWord* last = bottom(); 539 HeapWord* cur = last; 540 while (cur <= p) { 541 last = cur; 542 cur += oop(cur)->size(); 543 } 544 assert(oopDesc::is_oop(oop(last)), PTR_FORMAT " should be an object start", p2i(last)); 545 return last; 546 } 547 } 548 549 size_t ContiguousSpace::block_size(const HeapWord* p) const { 550 assert(MemRegion(bottom(), end()).contains(p), 551 "p (" PTR_FORMAT ") not in space [" PTR_FORMAT ", " PTR_FORMAT ")", 552 p2i(p), p2i(bottom()), p2i(end())); 553 HeapWord* current_top = top(); 554 assert(p <= current_top, 555 "p > current top - p: " PTR_FORMAT ", current top: " PTR_FORMAT, 556 p2i(p), p2i(current_top)); 557 assert(p == current_top || oopDesc::is_oop(oop(p)), 558 "p (" PTR_FORMAT ") is not a block start - " 559 "current_top: " PTR_FORMAT ", is_oop: %s", 560 p2i(p), p2i(current_top), BOOL_TO_STR(oopDesc::is_oop(oop(p)))); 561 if (p < current_top) { 562 return oop(p)->size(); 563 } else { 564 assert(p == current_top, "just checking"); 565 return pointer_delta(end(), (HeapWord*) p); 566 } 567 } 568 569 // This version requires locking. 570 inline HeapWord* ContiguousSpace::allocate_impl(size_t size) { 571 assert(Heap_lock->owned_by_self() || 572 (SafepointSynchronize::is_at_safepoint() && Thread::current()->is_VM_thread()), 573 "not locked"); 574 HeapWord* obj = top(); 575 if (pointer_delta(end(), obj) >= size) { 576 HeapWord* new_top = obj + size; 577 set_top(new_top); 578 assert(is_aligned(obj) && is_aligned(new_top), "checking alignment"); 579 return obj; 580 } else { 581 return NULL; 582 } 583 } 584 585 // This version is lock-free. 586 inline HeapWord* ContiguousSpace::par_allocate_impl(size_t size) { 587 do { 588 HeapWord* obj = top(); 589 if (pointer_delta(end(), obj) >= size) { 590 HeapWord* new_top = obj + size; 591 HeapWord* result = Atomic::cmpxchg(new_top, top_addr(), obj); 592 // result can be one of two: 593 // the old top value: the exchange succeeded 594 // otherwise: the new value of the top is returned. 595 if (result == obj) { 596 assert(is_aligned(obj) && is_aligned(new_top), "checking alignment"); 597 return obj; 598 } 599 } else { 600 return NULL; 601 } 602 } while (true); 603 } 604 605 HeapWord* ContiguousSpace::allocate_aligned(size_t size) { 606 assert(Heap_lock->owned_by_self() || (SafepointSynchronize::is_at_safepoint() && Thread::current()->is_VM_thread()), "not locked"); 607 HeapWord* end_value = end(); 608 609 HeapWord* obj = CollectedHeap::align_allocation_or_fail(top(), end_value, SurvivorAlignmentInBytes); 610 if (obj == NULL) { 611 return NULL; 612 } 613 614 if (pointer_delta(end_value, obj) >= size) { 615 HeapWord* new_top = obj + size; 616 set_top(new_top); 617 assert(::is_aligned(obj, SurvivorAlignmentInBytes) && is_aligned(new_top), 618 "checking alignment"); 619 return obj; 620 } else { 621 set_top(obj); 622 return NULL; 623 } 624 } 625 626 // Requires locking. 627 HeapWord* ContiguousSpace::allocate(size_t size) { 628 return allocate_impl(size); 629 } 630 631 // Lock-free. 632 HeapWord* ContiguousSpace::par_allocate(size_t size) { 633 return par_allocate_impl(size); 634 } 635 636 void ContiguousSpace::allocate_temporary_filler(int factor) { 637 // allocate temporary type array decreasing free size with factor 'factor' 638 assert(factor >= 0, "just checking"); 639 size_t size = pointer_delta(end(), top()); 640 641 // if space is full, return 642 if (size == 0) return; 643 644 if (factor > 0) { 645 size -= size/factor; 646 } 647 size = align_object_size(size); 648 649 const size_t array_header_size = typeArrayOopDesc::header_size(T_INT); 650 if (size >= align_object_size(array_header_size)) { 651 size_t length = (size - array_header_size) * (HeapWordSize / sizeof(jint)); 652 // allocate uninitialized int array 653 typeArrayOop t = (typeArrayOop) allocate(size); 654 assert(t != NULL, "allocation should succeed"); 655 t->set_mark_raw(markOopDesc::prototype()); 656 t->set_klass(Universe::intArrayKlassObj()); 657 t->set_length((int)length); 658 } else { 659 assert(size == Fill::min_size(), 660 "size for smallest fake object doesn't match"); 661 instanceOop obj = (instanceOop) allocate(size); 662 obj->set_mark_raw(markOopDesc::prototype()); 663 obj->set_klass_gap(0); 664 obj->set_klass(SystemDictionary::Object_klass()); 665 } 666 } 667 668 HeapWord* OffsetTableContigSpace::initialize_threshold() { 669 return _offsets.initialize_threshold(); 670 } 671 672 HeapWord* OffsetTableContigSpace::cross_threshold(HeapWord* start, HeapWord* end) { 673 _offsets.alloc_block(start, end); 674 return _offsets.threshold(); 675 } 676 677 OffsetTableContigSpace::OffsetTableContigSpace(BlockOffsetSharedArray* sharedOffsetArray, 678 MemRegion mr) : 679 _offsets(sharedOffsetArray, mr), 680 _par_alloc_lock(Mutex::leaf, "OffsetTableContigSpace par alloc lock", true) 681 { 682 _offsets.set_contig_space(this); 683 initialize(mr, SpaceDecorator::Clear, SpaceDecorator::Mangle); 684 } 685 686 #define OBJ_SAMPLE_INTERVAL 0 687 #define BLOCK_SAMPLE_INTERVAL 100 688 689 void OffsetTableContigSpace::verify() const { 690 HeapWord* p = bottom(); 691 HeapWord* prev_p = NULL; 692 int objs = 0; 693 int blocks = 0; 694 695 if (VerifyObjectStartArray) { 696 _offsets.verify(); 697 } 698 699 while (p < top()) { 700 size_t size = oop(p)->size(); 701 // For a sampling of objects in the space, find it using the 702 // block offset table. 703 if (blocks == BLOCK_SAMPLE_INTERVAL) { 704 guarantee(p == block_start_const(p + (size/2)), 705 "check offset computation"); 706 blocks = 0; 707 } else { 708 blocks++; 709 } 710 711 if (objs == OBJ_SAMPLE_INTERVAL) { 712 oopDesc::verify(oop(p)); 713 objs = 0; 714 } else { 715 objs++; 716 } 717 prev_p = p; 718 p += size; 719 } 720 guarantee(p == top(), "end of last object must match end of space"); 721 } 722 723 724 size_t TenuredSpace::allowed_dead_ratio() const { 725 return MarkSweepDeadRatio; 726 }