1 /* 2 * Copyright (c) 1997, 2015, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "classfile/systemDictionary.hpp" 27 #include "classfile/vmSymbols.hpp" 28 #include "gc_implementation/shared/liveRange.hpp" 29 #include "gc_implementation/shared/spaceDecorator.hpp" 30 #include "gc_interface/collectedHeap.inline.hpp" 31 #include "memory/blockOffsetTable.inline.hpp" 32 #include "memory/defNewGeneration.hpp" 33 #include "memory/genCollectedHeap.hpp" 34 #include "memory/space.hpp" 35 #include "memory/space.inline.hpp" 36 #include "memory/universe.inline.hpp" 37 #include "oops/oop.inline.hpp" 38 #include "runtime/java.hpp" 39 #include "runtime/atomic.inline.hpp" 40 #include "runtime/prefetch.inline.hpp" 41 #include "runtime/orderAccess.inline.hpp" 42 #include "runtime/safepoint.hpp" 43 #include "utilities/copy.hpp" 44 #include "utilities/globalDefinitions.hpp" 45 #include "utilities/macros.hpp" 46 47 PRAGMA_FORMAT_MUTE_WARNINGS_FOR_GCC 48 49 HeapWord* DirtyCardToOopClosure::get_actual_top(HeapWord* top, 50 HeapWord* top_obj) { 51 if (top_obj != NULL) { 52 if (_sp->block_is_obj(top_obj)) { 53 if (_precision == CardTableModRefBS::ObjHeadPreciseArray) { 54 if (oop(top_obj)->is_objArray() || oop(top_obj)->is_typeArray()) { 55 // An arrayOop is starting on the dirty card - since we do exact 56 // store checks for objArrays we are done. 57 } else { 58 // Otherwise, it is possible that the object starting on the dirty 59 // card spans the entire card, and that the store happened on a 60 // later card. Figure out where the object ends. 61 // Use the block_size() method of the space over which 62 // the iteration is being done. That space (e.g. CMS) may have 63 // specific requirements on object sizes which will 64 // be reflected in the block_size() method. 65 top = top_obj + oop(top_obj)->size(); 66 } 67 } 68 } else { 69 top = top_obj; 70 } 71 } else { 72 assert(top == _sp->end(), "only case where top_obj == NULL"); 73 } 74 return top; 75 } 76 77 void DirtyCardToOopClosure::walk_mem_region(MemRegion mr, 78 HeapWord* bottom, 79 HeapWord* top) { 80 // 1. Blocks may or may not be objects. 81 // 2. Even when a block_is_obj(), it may not entirely 82 // occupy the block if the block quantum is larger than 83 // the object size. 84 // We can and should try to optimize by calling the non-MemRegion 85 // version of oop_iterate() for all but the extremal objects 86 // (for which we need to call the MemRegion version of 87 // oop_iterate()) To be done post-beta XXX 88 for (; bottom < top; bottom += _sp->block_size(bottom)) { 89 // As in the case of contiguous space above, we'd like to 90 // just use the value returned by oop_iterate to increment the 91 // current pointer; unfortunately, that won't work in CMS because 92 // we'd need an interface change (it seems) to have the space 93 // "adjust the object size" (for instance pad it up to its 94 // block alignment or minimum block size restrictions. XXX 95 if (_sp->block_is_obj(bottom) && 96 !_sp->obj_allocated_since_save_marks(oop(bottom))) { 97 oop(bottom)->oop_iterate(_cl, mr); 98 } 99 } 100 } 101 102 // We get called with "mr" representing the dirty region 103 // that we want to process. Because of imprecise marking, 104 // we may need to extend the incoming "mr" to the right, 105 // and scan more. However, because we may already have 106 // scanned some of that extended region, we may need to 107 // trim its right-end back some so we do not scan what 108 // we (or another worker thread) may already have scanned 109 // or planning to scan. 110 void DirtyCardToOopClosure::do_MemRegion(MemRegion mr) { 111 112 // Some collectors need to do special things whenever their dirty 113 // cards are processed. For instance, CMS must remember mutator updates 114 // (i.e. dirty cards) so as to re-scan mutated objects. 115 // Such work can be piggy-backed here on dirty card scanning, so as to make 116 // it slightly more efficient than doing a complete non-destructive pre-scan 117 // of the card table. 118 MemRegionClosure* pCl = _sp->preconsumptionDirtyCardClosure(); 119 if (pCl != NULL) { 120 pCl->do_MemRegion(mr); 121 } 122 123 HeapWord* bottom = mr.start(); 124 HeapWord* last = mr.last(); 125 HeapWord* top = mr.end(); 126 HeapWord* bottom_obj; 127 HeapWord* top_obj; 128 129 assert(_precision == CardTableModRefBS::ObjHeadPreciseArray || 130 _precision == CardTableModRefBS::Precise, 131 "Only ones we deal with for now."); 132 133 assert(_precision != CardTableModRefBS::ObjHeadPreciseArray || 134 _cl->idempotent() || _last_bottom == NULL || 135 top <= _last_bottom, 136 "Not decreasing"); 137 NOT_PRODUCT(_last_bottom = mr.start()); 138 139 bottom_obj = _sp->block_start(bottom); 140 top_obj = _sp->block_start(last); 141 142 assert(bottom_obj <= bottom, "just checking"); 143 assert(top_obj <= top, "just checking"); 144 145 // Given what we think is the top of the memory region and 146 // the start of the object at the top, get the actual 147 // value of the top. 148 top = get_actual_top(top, top_obj); 149 150 // If the previous call did some part of this region, don't redo. 151 if (_precision == CardTableModRefBS::ObjHeadPreciseArray && 152 _min_done != NULL && 153 _min_done < top) { 154 top = _min_done; 155 } 156 157 // Top may have been reset, and in fact may be below bottom, 158 // e.g. the dirty card region is entirely in a now free object 159 // -- something that could happen with a concurrent sweeper. 160 bottom = MIN2(bottom, top); 161 MemRegion extended_mr = MemRegion(bottom, top); 162 assert(bottom <= top && 163 (_precision != CardTableModRefBS::ObjHeadPreciseArray || 164 _min_done == NULL || 165 top <= _min_done), 166 "overlap!"); 167 168 // Walk the region if it is not empty; otherwise there is nothing to do. 169 if (!extended_mr.is_empty()) { 170 walk_mem_region(extended_mr, bottom_obj, top); 171 } 172 173 // An idempotent closure might be applied in any order, so we don't 174 // record a _min_done for it. 175 if (!_cl->idempotent()) { 176 _min_done = bottom; 177 } else { 178 assert(_min_done == _last_explicit_min_done, 179 "Don't update _min_done for idempotent cl"); 180 } 181 } 182 183 DirtyCardToOopClosure* Space::new_dcto_cl(ExtendedOopClosure* cl, 184 CardTableModRefBS::PrecisionStyle precision, 185 HeapWord* boundary) { 186 return new DirtyCardToOopClosure(this, cl, precision, boundary); 187 } 188 189 HeapWord* ContiguousSpaceDCTOC::get_actual_top(HeapWord* top, 190 HeapWord* top_obj) { 191 if (top_obj != NULL && top_obj < (_sp->toContiguousSpace())->top()) { 192 if (_precision == CardTableModRefBS::ObjHeadPreciseArray) { 193 if (oop(top_obj)->is_objArray() || oop(top_obj)->is_typeArray()) { 194 // An arrayOop is starting on the dirty card - since we do exact 195 // store checks for objArrays we are done. 196 } else { 197 // Otherwise, it is possible that the object starting on the dirty 198 // card spans the entire card, and that the store happened on a 199 // later card. Figure out where the object ends. 200 assert(_sp->block_size(top_obj) == (size_t) oop(top_obj)->size(), 201 "Block size and object size mismatch"); 202 top = top_obj + oop(top_obj)->size(); 203 } 204 } 205 } else { 206 top = (_sp->toContiguousSpace())->top(); 207 } 208 return top; 209 } 210 211 void Filtering_DCTOC::walk_mem_region(MemRegion mr, 212 HeapWord* bottom, 213 HeapWord* top) { 214 // Note that this assumption won't hold if we have a concurrent 215 // collector in this space, which may have freed up objects after 216 // they were dirtied and before the stop-the-world GC that is 217 // examining cards here. 218 assert(bottom < top, "ought to be at least one obj on a dirty card."); 219 220 if (_boundary != NULL) { 221 // We have a boundary outside of which we don't want to look 222 // at objects, so create a filtering closure around the 223 // oop closure before walking the region. 224 FilteringClosure filter(_boundary, _cl); 225 walk_mem_region_with_cl(mr, bottom, top, &filter); 226 } else { 227 // No boundary, simply walk the heap with the oop closure. 228 walk_mem_region_with_cl(mr, bottom, top, _cl); 229 } 230 231 } 232 233 // We must replicate this so that the static type of "FilteringClosure" 234 // (see above) is apparent at the oop_iterate calls. 235 #define ContiguousSpaceDCTOC__walk_mem_region_with_cl_DEFN(ClosureType) \ 236 void ContiguousSpaceDCTOC::walk_mem_region_with_cl(MemRegion mr, \ 237 HeapWord* bottom, \ 238 HeapWord* top, \ 239 ClosureType* cl) { \ 240 bottom += oop(bottom)->oop_iterate(cl, mr); \ 241 if (bottom < top) { \ 242 HeapWord* next_obj = bottom + oop(bottom)->size(); \ 243 while (next_obj < top) { \ 244 /* Bottom lies entirely below top, so we can call the */ \ 245 /* non-memRegion version of oop_iterate below. */ \ 246 oop(bottom)->oop_iterate(cl); \ 247 bottom = next_obj; \ 248 next_obj = bottom + oop(bottom)->size(); \ 249 } \ 250 /* Last object. */ \ 251 oop(bottom)->oop_iterate(cl, mr); \ 252 } \ 253 } 254 255 // (There are only two of these, rather than N, because the split is due 256 // only to the introduction of the FilteringClosure, a local part of the 257 // impl of this abstraction.) 258 ContiguousSpaceDCTOC__walk_mem_region_with_cl_DEFN(ExtendedOopClosure) 259 ContiguousSpaceDCTOC__walk_mem_region_with_cl_DEFN(FilteringClosure) 260 261 DirtyCardToOopClosure* 262 ContiguousSpace::new_dcto_cl(ExtendedOopClosure* cl, 263 CardTableModRefBS::PrecisionStyle precision, 264 HeapWord* boundary) { 265 return new ContiguousSpaceDCTOC(this, cl, precision, boundary); 266 } 267 268 void Space::initialize(MemRegion mr, 269 bool clear_space, 270 bool mangle_space) { 271 HeapWord* bottom = mr.start(); 272 HeapWord* end = mr.end(); 273 assert(Universe::on_page_boundary(bottom) && Universe::on_page_boundary(end), 274 "invalid space boundaries"); 275 set_bottom(bottom); 276 set_end(end); 277 if (clear_space) clear(mangle_space); 278 } 279 280 void Space::clear(bool mangle_space) { 281 if (ZapUnusedHeapArea && mangle_space) { 282 mangle_unused_area(); 283 } 284 } 285 286 ContiguousSpace::ContiguousSpace(): CompactibleSpace(), _top(NULL), 287 _concurrent_iteration_safe_limit(NULL) { 288 _mangler = new GenSpaceMangler(this); 289 } 290 291 ContiguousSpace::~ContiguousSpace() { 292 delete _mangler; 293 } 294 295 void ContiguousSpace::initialize(MemRegion mr, 296 bool clear_space, 297 bool mangle_space) 298 { 299 CompactibleSpace::initialize(mr, clear_space, mangle_space); 300 set_concurrent_iteration_safe_limit(top()); 301 } 302 303 void ContiguousSpace::clear(bool mangle_space) { 304 set_top(bottom()); 305 set_saved_mark(); 306 CompactibleSpace::clear(mangle_space); 307 } 308 309 bool ContiguousSpace::is_free_block(const HeapWord* p) const { 310 return p >= _top; 311 } 312 313 void OffsetTableContigSpace::clear(bool mangle_space) { 314 ContiguousSpace::clear(mangle_space); 315 _offsets.initialize_threshold(); 316 } 317 318 void OffsetTableContigSpace::set_bottom(HeapWord* new_bottom) { 319 Space::set_bottom(new_bottom); 320 _offsets.set_bottom(new_bottom); 321 } 322 323 void OffsetTableContigSpace::set_end(HeapWord* new_end) { 324 // Space should not advertise an increase in size 325 // until after the underlying offset table has been enlarged. 326 _offsets.resize(pointer_delta(new_end, bottom())); 327 Space::set_end(new_end); 328 } 329 330 #ifndef PRODUCT 331 332 void ContiguousSpace::set_top_for_allocations(HeapWord* v) { 333 mangler()->set_top_for_allocations(v); 334 } 335 void ContiguousSpace::set_top_for_allocations() { 336 mangler()->set_top_for_allocations(top()); 337 } 338 void ContiguousSpace::check_mangled_unused_area(HeapWord* limit) { 339 mangler()->check_mangled_unused_area(limit); 340 } 341 342 void ContiguousSpace::check_mangled_unused_area_complete() { 343 mangler()->check_mangled_unused_area_complete(); 344 } 345 346 // Mangled only the unused space that has not previously 347 // been mangled and that has not been allocated since being 348 // mangled. 349 void ContiguousSpace::mangle_unused_area() { 350 mangler()->mangle_unused_area(); 351 } 352 void ContiguousSpace::mangle_unused_area_complete() { 353 mangler()->mangle_unused_area_complete(); 354 } 355 #endif // NOT_PRODUCT 356 357 void CompactibleSpace::initialize(MemRegion mr, 358 bool clear_space, 359 bool mangle_space) { 360 Space::initialize(mr, clear_space, mangle_space); 361 set_compaction_top(bottom()); 362 _next_compaction_space = NULL; 363 } 364 365 void CompactibleSpace::clear(bool mangle_space) { 366 Space::clear(mangle_space); 367 _compaction_top = bottom(); 368 } 369 370 HeapWord* CompactibleSpace::forward(oop q, size_t size, 371 CompactPoint* cp, HeapWord* compact_top) { 372 // q is alive 373 // First check if we should switch compaction space 374 assert(this == cp->space, "'this' should be current compaction space."); 375 size_t compaction_max_size = pointer_delta(end(), compact_top); 376 while (size > compaction_max_size) { 377 // switch to next compaction space 378 cp->space->set_compaction_top(compact_top); 379 cp->space = cp->space->next_compaction_space(); 380 if (cp->space == NULL) { 381 cp->gen = GenCollectedHeap::heap()->young_gen(); 382 assert(cp->gen != NULL, "compaction must succeed"); 383 cp->space = cp->gen->first_compaction_space(); 384 assert(cp->space != NULL, "generation must have a first compaction space"); 385 } 386 compact_top = cp->space->bottom(); 387 cp->space->set_compaction_top(compact_top); 388 cp->threshold = cp->space->initialize_threshold(); 389 compaction_max_size = pointer_delta(cp->space->end(), compact_top); 390 } 391 392 // store the forwarding pointer into the mark word 393 if ((HeapWord*)q != compact_top) { 394 q->forward_to(oop(compact_top)); 395 assert(q->is_gc_marked(), "encoding the pointer should preserve the mark"); 396 } else { 397 // if the object isn't moving we can just set the mark to the default 398 // mark and handle it specially later on. 399 q->init_mark(); 400 assert(q->forwardee() == NULL, "should be forwarded to NULL"); 401 } 402 403 compact_top += size; 404 405 // we need to update the offset table so that the beginnings of objects can be 406 // found during scavenge. Note that we are updating the offset table based on 407 // where the object will be once the compaction phase finishes. 408 if (compact_top > cp->threshold) 409 cp->threshold = 410 cp->space->cross_threshold(compact_top - size, compact_top); 411 return compact_top; 412 } 413 414 415 bool CompactibleSpace::insert_deadspace(size_t& allowed_deadspace_words, 416 HeapWord* q, size_t deadlength) { 417 if (allowed_deadspace_words >= deadlength) { 418 allowed_deadspace_words -= deadlength; 419 CollectedHeap::fill_with_object(q, deadlength); 420 oop(q)->set_mark(oop(q)->mark()->set_marked()); 421 assert((int) deadlength == oop(q)->size(), "bad filler object size"); 422 // Recall that we required "q == compaction_top". 423 return true; 424 } else { 425 allowed_deadspace_words = 0; 426 return false; 427 } 428 } 429 430 void ContiguousSpace::prepare_for_compaction(CompactPoint* cp) { 431 scan_and_forward(this, cp); 432 } 433 434 void CompactibleSpace::adjust_pointers() { 435 // Check first is there is any work to do. 436 if (used() == 0) { 437 return; // Nothing to do. 438 } 439 440 scan_and_adjust_pointers(this); 441 } 442 443 void CompactibleSpace::compact() { 444 scan_and_compact(this); 445 } 446 447 void Space::print_short() const { print_short_on(tty); } 448 449 void Space::print_short_on(outputStream* st) const { 450 st->print(" space " SIZE_FORMAT "K, %3d%% used", capacity() / K, 451 (int) ((double) used() * 100 / capacity())); 452 } 453 454 void Space::print() const { print_on(tty); } 455 456 void Space::print_on(outputStream* st) const { 457 print_short_on(st); 458 st->print_cr(" [" INTPTR_FORMAT ", " INTPTR_FORMAT ")", 459 bottom(), end()); 460 } 461 462 void ContiguousSpace::print_on(outputStream* st) const { 463 print_short_on(st); 464 st->print_cr(" [" INTPTR_FORMAT ", " INTPTR_FORMAT ", " INTPTR_FORMAT ")", 465 bottom(), top(), end()); 466 } 467 468 void OffsetTableContigSpace::print_on(outputStream* st) const { 469 print_short_on(st); 470 st->print_cr(" [" INTPTR_FORMAT ", " INTPTR_FORMAT ", " 471 INTPTR_FORMAT ", " INTPTR_FORMAT ")", 472 bottom(), top(), _offsets.threshold(), end()); 473 } 474 475 void ContiguousSpace::verify() const { 476 HeapWord* p = bottom(); 477 HeapWord* t = top(); 478 HeapWord* prev_p = NULL; 479 while (p < t) { 480 oop(p)->verify(); 481 prev_p = p; 482 p += oop(p)->size(); 483 } 484 guarantee(p == top(), "end of last object must match end of space"); 485 if (top() != end()) { 486 guarantee(top() == block_start_const(end()-1) && 487 top() == block_start_const(top()), 488 "top should be start of unallocated block, if it exists"); 489 } 490 } 491 492 void Space::oop_iterate(ExtendedOopClosure* blk) { 493 ObjectToOopClosure blk2(blk); 494 object_iterate(&blk2); 495 } 496 497 bool Space::obj_is_alive(const HeapWord* p) const { 498 assert (block_is_obj(p), "The address should point to an object"); 499 return true; 500 } 501 502 #if INCLUDE_ALL_GCS 503 #define ContigSpace_PAR_OOP_ITERATE_DEFN(OopClosureType, nv_suffix) \ 504 \ 505 void ContiguousSpace::par_oop_iterate(MemRegion mr, OopClosureType* blk) {\ 506 HeapWord* obj_addr = mr.start(); \ 507 HeapWord* t = mr.end(); \ 508 while (obj_addr < t) { \ 509 assert(oop(obj_addr)->is_oop(), "Should be an oop"); \ 510 obj_addr += oop(obj_addr)->oop_iterate(blk); \ 511 } \ 512 } 513 514 ALL_PAR_OOP_ITERATE_CLOSURES(ContigSpace_PAR_OOP_ITERATE_DEFN) 515 516 #undef ContigSpace_PAR_OOP_ITERATE_DEFN 517 #endif // INCLUDE_ALL_GCS 518 519 void ContiguousSpace::oop_iterate(ExtendedOopClosure* blk) { 520 if (is_empty()) return; 521 HeapWord* obj_addr = bottom(); 522 HeapWord* t = top(); 523 // Could call objects iterate, but this is easier. 524 while (obj_addr < t) { 525 obj_addr += oop(obj_addr)->oop_iterate(blk); 526 } 527 } 528 529 void ContiguousSpace::object_iterate(ObjectClosure* blk) { 530 if (is_empty()) return; 531 WaterMark bm = bottom_mark(); 532 object_iterate_from(bm, blk); 533 } 534 535 // For a ContiguousSpace object_iterate() and safe_object_iterate() 536 // are the same. 537 void ContiguousSpace::safe_object_iterate(ObjectClosure* blk) { 538 object_iterate(blk); 539 } 540 541 void ContiguousSpace::object_iterate_from(WaterMark mark, ObjectClosure* blk) { 542 assert(mark.space() == this, "Mark does not match space"); 543 HeapWord* p = mark.point(); 544 while (p < top()) { 545 blk->do_object(oop(p)); 546 p += oop(p)->size(); 547 } 548 } 549 550 HeapWord* 551 ContiguousSpace::object_iterate_careful(ObjectClosureCareful* blk) { 552 HeapWord * limit = concurrent_iteration_safe_limit(); 553 assert(limit <= top(), "sanity check"); 554 for (HeapWord* p = bottom(); p < limit;) { 555 size_t size = blk->do_object_careful(oop(p)); 556 if (size == 0) { 557 return p; // failed at p 558 } else { 559 p += size; 560 } 561 } 562 return NULL; // all done 563 } 564 565 #define ContigSpace_OOP_SINCE_SAVE_MARKS_DEFN(OopClosureType, nv_suffix) \ 566 \ 567 void ContiguousSpace:: \ 568 oop_since_save_marks_iterate##nv_suffix(OopClosureType* blk) { \ 569 HeapWord* t; \ 570 HeapWord* p = saved_mark_word(); \ 571 assert(p != NULL, "expected saved mark"); \ 572 \ 573 const intx interval = PrefetchScanIntervalInBytes; \ 574 do { \ 575 t = top(); \ 576 while (p < t) { \ 577 Prefetch::write(p, interval); \ 578 debug_only(HeapWord* prev = p); \ 579 oop m = oop(p); \ 580 p += m->oop_iterate(blk); \ 581 } \ 582 } while (t < top()); \ 583 \ 584 set_saved_mark_word(p); \ 585 } 586 587 ALL_SINCE_SAVE_MARKS_CLOSURES(ContigSpace_OOP_SINCE_SAVE_MARKS_DEFN) 588 589 #undef ContigSpace_OOP_SINCE_SAVE_MARKS_DEFN 590 591 // Very general, slow implementation. 592 HeapWord* ContiguousSpace::block_start_const(const void* p) const { 593 assert(MemRegion(bottom(), end()).contains(p), 594 err_msg("p (" PTR_FORMAT ") not in space [" PTR_FORMAT ", " PTR_FORMAT ")", 595 p, bottom(), end())); 596 if (p >= top()) { 597 return top(); 598 } else { 599 HeapWord* last = bottom(); 600 HeapWord* cur = last; 601 while (cur <= p) { 602 last = cur; 603 cur += oop(cur)->size(); 604 } 605 assert(oop(last)->is_oop(), 606 err_msg(PTR_FORMAT " should be an object start", last)); 607 return last; 608 } 609 } 610 611 size_t ContiguousSpace::block_size(const HeapWord* p) const { 612 assert(MemRegion(bottom(), end()).contains(p), 613 err_msg("p (" PTR_FORMAT ") not in space [" PTR_FORMAT ", " PTR_FORMAT ")", 614 p, bottom(), end())); 615 HeapWord* current_top = top(); 616 assert(p <= current_top, 617 err_msg("p > current top - p: " PTR_FORMAT ", current top: " PTR_FORMAT, 618 p, current_top)); 619 assert(p == current_top || oop(p)->is_oop(), 620 err_msg("p (" PTR_FORMAT ") is not a block start - " 621 "current_top: " PTR_FORMAT ", is_oop: %s", 622 p, current_top, BOOL_TO_STR(oop(p)->is_oop()))); 623 if (p < current_top) { 624 return oop(p)->size(); 625 } else { 626 assert(p == current_top, "just checking"); 627 return pointer_delta(end(), (HeapWord*) p); 628 } 629 } 630 631 // This version requires locking. 632 inline HeapWord* ContiguousSpace::allocate_impl(size_t size) { 633 assert(Heap_lock->owned_by_self() || 634 (SafepointSynchronize::is_at_safepoint() && Thread::current()->is_VM_thread()), 635 "not locked"); 636 HeapWord* obj = top(); 637 if (pointer_delta(end(), obj) >= size) { 638 HeapWord* new_top = obj + size; 639 set_top(new_top); 640 assert(is_aligned(obj) && is_aligned(new_top), "checking alignment"); 641 return obj; 642 } else { 643 return NULL; 644 } 645 } 646 647 // This version is lock-free. 648 inline HeapWord* ContiguousSpace::par_allocate_impl(size_t size) { 649 do { 650 HeapWord* obj = top(); 651 if (pointer_delta(end(), obj) >= size) { 652 HeapWord* new_top = obj + size; 653 HeapWord* result = (HeapWord*)Atomic::cmpxchg_ptr(new_top, top_addr(), obj); 654 // result can be one of two: 655 // the old top value: the exchange succeeded 656 // otherwise: the new value of the top is returned. 657 if (result == obj) { 658 assert(is_aligned(obj) && is_aligned(new_top), "checking alignment"); 659 return obj; 660 } 661 } else { 662 return NULL; 663 } 664 } while (true); 665 } 666 667 HeapWord* ContiguousSpace::allocate_aligned(size_t size) { 668 assert(Heap_lock->owned_by_self() || (SafepointSynchronize::is_at_safepoint() && Thread::current()->is_VM_thread()), "not locked"); 669 HeapWord* end_value = end(); 670 671 HeapWord* obj = CollectedHeap::align_allocation_or_fail(top(), end_value, SurvivorAlignmentInBytes); 672 if (obj == NULL) { 673 return NULL; 674 } 675 676 if (pointer_delta(end_value, obj) >= size) { 677 HeapWord* new_top = obj + size; 678 set_top(new_top); 679 assert(is_ptr_aligned(obj, SurvivorAlignmentInBytes) && is_aligned(new_top), 680 "checking alignment"); 681 return obj; 682 } else { 683 set_top(obj); 684 return NULL; 685 } 686 } 687 688 // Requires locking. 689 HeapWord* ContiguousSpace::allocate(size_t size) { 690 return allocate_impl(size); 691 } 692 693 // Lock-free. 694 HeapWord* ContiguousSpace::par_allocate(size_t size) { 695 return par_allocate_impl(size); 696 } 697 698 void ContiguousSpace::allocate_temporary_filler(int factor) { 699 // allocate temporary type array decreasing free size with factor 'factor' 700 assert(factor >= 0, "just checking"); 701 size_t size = pointer_delta(end(), top()); 702 703 // if space is full, return 704 if (size == 0) return; 705 706 if (factor > 0) { 707 size -= size/factor; 708 } 709 size = align_object_size(size); 710 711 const size_t array_header_size = typeArrayOopDesc::header_size(T_INT); 712 if (size >= (size_t)align_object_size(array_header_size)) { 713 size_t length = (size - array_header_size) * (HeapWordSize / sizeof(jint)); 714 // allocate uninitialized int array 715 typeArrayOop t = (typeArrayOop) allocate(size); 716 assert(t != NULL, "allocation should succeed"); 717 t->set_mark(markOopDesc::prototype()); 718 t->set_klass(Universe::intArrayKlassObj()); 719 t->set_length((int)length); 720 } else { 721 assert(size == CollectedHeap::min_fill_size(), 722 "size for smallest fake object doesn't match"); 723 instanceOop obj = (instanceOop) allocate(size); 724 obj->set_mark(markOopDesc::prototype()); 725 obj->set_klass_gap(0); 726 obj->set_klass(SystemDictionary::Object_klass()); 727 } 728 } 729 730 HeapWord* OffsetTableContigSpace::initialize_threshold() { 731 return _offsets.initialize_threshold(); 732 } 733 734 HeapWord* OffsetTableContigSpace::cross_threshold(HeapWord* start, HeapWord* end) { 735 _offsets.alloc_block(start, end); 736 return _offsets.threshold(); 737 } 738 739 OffsetTableContigSpace::OffsetTableContigSpace(BlockOffsetSharedArray* sharedOffsetArray, 740 MemRegion mr) : 741 _offsets(sharedOffsetArray, mr), 742 _par_alloc_lock(Mutex::leaf, "OffsetTableContigSpace par alloc lock", true) 743 { 744 _offsets.set_contig_space(this); 745 initialize(mr, SpaceDecorator::Clear, SpaceDecorator::Mangle); 746 } 747 748 #define OBJ_SAMPLE_INTERVAL 0 749 #define BLOCK_SAMPLE_INTERVAL 100 750 751 void OffsetTableContigSpace::verify() const { 752 HeapWord* p = bottom(); 753 HeapWord* prev_p = NULL; 754 int objs = 0; 755 int blocks = 0; 756 757 if (VerifyObjectStartArray) { 758 _offsets.verify(); 759 } 760 761 while (p < top()) { 762 size_t size = oop(p)->size(); 763 // For a sampling of objects in the space, find it using the 764 // block offset table. 765 if (blocks == BLOCK_SAMPLE_INTERVAL) { 766 guarantee(p == block_start_const(p + (size/2)), 767 "check offset computation"); 768 blocks = 0; 769 } else { 770 blocks++; 771 } 772 773 if (objs == OBJ_SAMPLE_INTERVAL) { 774 oop(p)->verify(); 775 objs = 0; 776 } else { 777 objs++; 778 } 779 prev_p = p; 780 p += size; 781 } 782 guarantee(p == top(), "end of last object must match end of space"); 783 } 784 785 786 size_t TenuredSpace::allowed_dead_ratio() const { 787 return MarkSweepDeadRatio; 788 }