1 /* 2 * Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "classfile/systemDictionary.hpp" 27 #include "classfile/vmSymbols.hpp" 28 #include "gc_implementation/shared/liveRange.hpp" 29 #include "gc_implementation/shared/markSweep.hpp" 30 #include "gc_implementation/shared/spaceDecorator.hpp" 31 #include "memory/blockOffsetTable.inline.hpp" 32 #include "memory/defNewGeneration.hpp" 33 #include "memory/genCollectedHeap.hpp" 34 #include "memory/space.hpp" 35 #include "memory/space.inline.hpp" 36 #include "memory/universe.inline.hpp" 37 #include "oops/oop.inline.hpp" 38 #include "oops/oop.inline2.hpp" 39 #include "runtime/java.hpp" 40 #include "runtime/atomic.inline.hpp" 41 #include "runtime/prefetch.inline.hpp" 42 #include "runtime/orderAccess.inline.hpp" 43 #include "runtime/safepoint.hpp" 44 #include "utilities/copy.hpp" 45 #include "utilities/globalDefinitions.hpp" 46 #include "utilities/macros.hpp" 47 48 PRAGMA_FORMAT_MUTE_WARNINGS_FOR_GCC 49 50 HeapWord* DirtyCardToOopClosure::get_actual_top(HeapWord* top, 51 HeapWord* top_obj) { 52 if (top_obj != NULL) { 53 if (_sp->block_is_obj(top_obj)) { 54 if (_precision == CardTableModRefBS::ObjHeadPreciseArray) { 55 if (oop(top_obj)->is_objArray() || oop(top_obj)->is_typeArray()) { 56 // An arrayOop is starting on the dirty card - since we do exact 57 // store checks for objArrays we are done. 58 } else { 59 // Otherwise, it is possible that the object starting on the dirty 60 // card spans the entire card, and that the store happened on a 61 // later card. Figure out where the object ends. 62 // Use the block_size() method of the space over which 63 // the iteration is being done. That space (e.g. CMS) may have 64 // specific requirements on object sizes which will 65 // be reflected in the block_size() method. 66 top = top_obj + oop(top_obj)->size(); 67 } 68 } 69 } else { 70 top = top_obj; 71 } 72 } else { 73 assert(top == _sp->end(), "only case where top_obj == NULL"); 74 } 75 return top; 76 } 77 78 void DirtyCardToOopClosure::walk_mem_region(MemRegion mr, 79 HeapWord* bottom, 80 HeapWord* top) { 81 // 1. Blocks may or may not be objects. 82 // 2. Even when a block_is_obj(), it may not entirely 83 // occupy the block if the block quantum is larger than 84 // the object size. 85 // We can and should try to optimize by calling the non-MemRegion 86 // version of oop_iterate() for all but the extremal objects 87 // (for which we need to call the MemRegion version of 88 // oop_iterate()) To be done post-beta XXX 89 for (; bottom < top; bottom += _sp->block_size(bottom)) { 90 // As in the case of contiguous space above, we'd like to 91 // just use the value returned by oop_iterate to increment the 92 // current pointer; unfortunately, that won't work in CMS because 93 // we'd need an interface change (it seems) to have the space 94 // "adjust the object size" (for instance pad it up to its 95 // block alignment or minimum block size restrictions. XXX 96 if (_sp->block_is_obj(bottom) && 97 !_sp->obj_allocated_since_save_marks(oop(bottom))) { 98 oop(bottom)->oop_iterate(_cl, mr); 99 } 100 } 101 } 102 103 // We get called with "mr" representing the dirty region 104 // that we want to process. Because of imprecise marking, 105 // we may need to extend the incoming "mr" to the right, 106 // and scan more. However, because we may already have 107 // scanned some of that extended region, we may need to 108 // trim its right-end back some so we do not scan what 109 // we (or another worker thread) may already have scanned 110 // or planning to scan. 111 void DirtyCardToOopClosure::do_MemRegion(MemRegion mr) { 112 113 // Some collectors need to do special things whenever their dirty 114 // cards are processed. For instance, CMS must remember mutator updates 115 // (i.e. dirty cards) so as to re-scan mutated objects. 116 // Such work can be piggy-backed here on dirty card scanning, so as to make 117 // it slightly more efficient than doing a complete non-destructive pre-scan 118 // of the card table. 119 MemRegionClosure* pCl = _sp->preconsumptionDirtyCardClosure(); 120 if (pCl != NULL) { 121 pCl->do_MemRegion(mr); 122 } 123 124 HeapWord* bottom = mr.start(); 125 HeapWord* last = mr.last(); 126 HeapWord* top = mr.end(); 127 HeapWord* bottom_obj; 128 HeapWord* top_obj; 129 130 assert(_precision == CardTableModRefBS::ObjHeadPreciseArray || 131 _precision == CardTableModRefBS::Precise, 132 "Only ones we deal with for now."); 133 134 assert(_precision != CardTableModRefBS::ObjHeadPreciseArray || 135 _cl->idempotent() || _last_bottom == NULL || 136 top <= _last_bottom, 137 "Not decreasing"); 138 NOT_PRODUCT(_last_bottom = mr.start()); 139 140 bottom_obj = _sp->block_start(bottom); 141 top_obj = _sp->block_start(last); 142 143 assert(bottom_obj <= bottom, "just checking"); 144 assert(top_obj <= top, "just checking"); 145 146 // Given what we think is the top of the memory region and 147 // the start of the object at the top, get the actual 148 // value of the top. 149 top = get_actual_top(top, top_obj); 150 151 // If the previous call did some part of this region, don't redo. 152 if (_precision == CardTableModRefBS::ObjHeadPreciseArray && 153 _min_done != NULL && 154 _min_done < top) { 155 top = _min_done; 156 } 157 158 // Top may have been reset, and in fact may be below bottom, 159 // e.g. the dirty card region is entirely in a now free object 160 // -- something that could happen with a concurrent sweeper. 161 bottom = MIN2(bottom, top); 162 MemRegion extended_mr = MemRegion(bottom, top); 163 assert(bottom <= top && 164 (_precision != CardTableModRefBS::ObjHeadPreciseArray || 165 _min_done == NULL || 166 top <= _min_done), 167 "overlap!"); 168 169 // Walk the region if it is not empty; otherwise there is nothing to do. 170 if (!extended_mr.is_empty()) { 171 walk_mem_region(extended_mr, bottom_obj, top); 172 } 173 174 // An idempotent closure might be applied in any order, so we don't 175 // record a _min_done for it. 176 if (!_cl->idempotent()) { 177 _min_done = bottom; 178 } else { 179 assert(_min_done == _last_explicit_min_done, 180 "Don't update _min_done for idempotent cl"); 181 } 182 } 183 184 DirtyCardToOopClosure* Space::new_dcto_cl(ExtendedOopClosure* cl, 185 CardTableModRefBS::PrecisionStyle precision, 186 HeapWord* boundary) { 187 return new DirtyCardToOopClosure(this, cl, precision, boundary); 188 } 189 190 HeapWord* ContiguousSpaceDCTOC::get_actual_top(HeapWord* top, 191 HeapWord* top_obj) { 192 if (top_obj != NULL && top_obj < (_sp->toContiguousSpace())->top()) { 193 if (_precision == CardTableModRefBS::ObjHeadPreciseArray) { 194 if (oop(top_obj)->is_objArray() || oop(top_obj)->is_typeArray()) { 195 // An arrayOop is starting on the dirty card - since we do exact 196 // store checks for objArrays we are done. 197 } else { 198 // Otherwise, it is possible that the object starting on the dirty 199 // card spans the entire card, and that the store happened on a 200 // later card. Figure out where the object ends. 201 assert(_sp->block_size(top_obj) == (size_t) oop(top_obj)->size(), 202 "Block size and object size mismatch"); 203 top = top_obj + oop(top_obj)->size(); 204 } 205 } 206 } else { 207 top = (_sp->toContiguousSpace())->top(); 208 } 209 return top; 210 } 211 212 void Filtering_DCTOC::walk_mem_region(MemRegion mr, 213 HeapWord* bottom, 214 HeapWord* top) { 215 // Note that this assumption won't hold if we have a concurrent 216 // collector in this space, which may have freed up objects after 217 // they were dirtied and before the stop-the-world GC that is 218 // examining cards here. 219 assert(bottom < top, "ought to be at least one obj on a dirty card."); 220 221 if (_boundary != NULL) { 222 // We have a boundary outside of which we don't want to look 223 // at objects, so create a filtering closure around the 224 // oop closure before walking the region. 225 FilteringClosure filter(_boundary, _cl); 226 walk_mem_region_with_cl(mr, bottom, top, &filter); 227 } else { 228 // No boundary, simply walk the heap with the oop closure. 229 walk_mem_region_with_cl(mr, bottom, top, _cl); 230 } 231 232 } 233 234 // We must replicate this so that the static type of "FilteringClosure" 235 // (see above) is apparent at the oop_iterate calls. 236 #define ContiguousSpaceDCTOC__walk_mem_region_with_cl_DEFN(ClosureType) \ 237 void ContiguousSpaceDCTOC::walk_mem_region_with_cl(MemRegion mr, \ 238 HeapWord* bottom, \ 239 HeapWord* top, \ 240 ClosureType* cl) { \ 241 bottom += oop(bottom)->oop_iterate(cl, mr); \ 242 if (bottom < top) { \ 243 HeapWord* next_obj = bottom + oop(bottom)->size(); \ 244 while (next_obj < top) { \ 245 /* Bottom lies entirely below top, so we can call the */ \ 246 /* non-memRegion version of oop_iterate below. */ \ 247 oop(bottom)->oop_iterate(cl); \ 248 bottom = next_obj; \ 249 next_obj = bottom + oop(bottom)->size(); \ 250 } \ 251 /* Last object. */ \ 252 oop(bottom)->oop_iterate(cl, mr); \ 253 } \ 254 } 255 256 // (There are only two of these, rather than N, because the split is due 257 // only to the introduction of the FilteringClosure, a local part of the 258 // impl of this abstraction.) 259 ContiguousSpaceDCTOC__walk_mem_region_with_cl_DEFN(ExtendedOopClosure) 260 ContiguousSpaceDCTOC__walk_mem_region_with_cl_DEFN(FilteringClosure) 261 262 DirtyCardToOopClosure* 263 ContiguousSpace::new_dcto_cl(ExtendedOopClosure* cl, 264 CardTableModRefBS::PrecisionStyle precision, 265 HeapWord* boundary) { 266 return new ContiguousSpaceDCTOC(this, cl, precision, boundary); 267 } 268 269 void Space::initialize(MemRegion mr, 270 bool clear_space, 271 bool mangle_space) { 272 HeapWord* bottom = mr.start(); 273 HeapWord* end = mr.end(); 274 assert(Universe::on_page_boundary(bottom) && Universe::on_page_boundary(end), 275 "invalid space boundaries"); 276 set_bottom(bottom); 277 set_end(end); 278 if (clear_space) clear(mangle_space); 279 } 280 281 void Space::clear(bool mangle_space) { 282 if (ZapUnusedHeapArea && mangle_space) { 283 mangle_unused_area(); 284 } 285 } 286 287 ContiguousSpace::ContiguousSpace(): CompactibleSpace(), _top(NULL), 288 _concurrent_iteration_safe_limit(NULL) { 289 _mangler = new GenSpaceMangler(this); 290 } 291 292 ContiguousSpace::~ContiguousSpace() { 293 delete _mangler; 294 } 295 296 void ContiguousSpace::initialize(MemRegion mr, 297 bool clear_space, 298 bool mangle_space) 299 { 300 CompactibleSpace::initialize(mr, clear_space, mangle_space); 301 set_concurrent_iteration_safe_limit(top()); 302 } 303 304 void ContiguousSpace::clear(bool mangle_space) { 305 set_top(bottom()); 306 set_saved_mark(); 307 CompactibleSpace::clear(mangle_space); 308 } 309 310 bool ContiguousSpace::is_free_block(const HeapWord* p) const { 311 return p >= _top; 312 } 313 314 void OffsetTableContigSpace::clear(bool mangle_space) { 315 ContiguousSpace::clear(mangle_space); 316 _offsets.initialize_threshold(); 317 } 318 319 void OffsetTableContigSpace::set_bottom(HeapWord* new_bottom) { 320 Space::set_bottom(new_bottom); 321 _offsets.set_bottom(new_bottom); 322 } 323 324 void OffsetTableContigSpace::set_end(HeapWord* new_end) { 325 // Space should not advertise an increase in size 326 // until after the underlying offset table has been enlarged. 327 _offsets.resize(pointer_delta(new_end, bottom())); 328 Space::set_end(new_end); 329 } 330 331 #ifndef PRODUCT 332 333 void ContiguousSpace::set_top_for_allocations(HeapWord* v) { 334 mangler()->set_top_for_allocations(v); 335 } 336 void ContiguousSpace::set_top_for_allocations() { 337 mangler()->set_top_for_allocations(top()); 338 } 339 void ContiguousSpace::check_mangled_unused_area(HeapWord* limit) { 340 mangler()->check_mangled_unused_area(limit); 341 } 342 343 void ContiguousSpace::check_mangled_unused_area_complete() { 344 mangler()->check_mangled_unused_area_complete(); 345 } 346 347 // Mangled only the unused space that has not previously 348 // been mangled and that has not been allocated since being 349 // mangled. 350 void ContiguousSpace::mangle_unused_area() { 351 mangler()->mangle_unused_area(); 352 } 353 void ContiguousSpace::mangle_unused_area_complete() { 354 mangler()->mangle_unused_area_complete(); 355 } 356 void ContiguousSpace::mangle_region(MemRegion mr) { 357 // Although this method uses SpaceMangler::mangle_region() which 358 // is not specific to a space, the when the ContiguousSpace version 359 // is called, it is always with regard to a space and this 360 // bounds checking is appropriate. 361 MemRegion space_mr(bottom(), end()); 362 assert(space_mr.contains(mr), "Mangling outside space"); 363 SpaceMangler::mangle_region(mr); 364 } 365 #endif // NOT_PRODUCT 366 367 void CompactibleSpace::initialize(MemRegion mr, 368 bool clear_space, 369 bool mangle_space) { 370 Space::initialize(mr, clear_space, mangle_space); 371 set_compaction_top(bottom()); 372 _next_compaction_space = NULL; 373 } 374 375 void CompactibleSpace::clear(bool mangle_space) { 376 Space::clear(mangle_space); 377 _compaction_top = bottom(); 378 } 379 380 HeapWord* CompactibleSpace::forward(oop q, size_t size, 381 CompactPoint* cp, HeapWord* compact_top) { 382 // q is alive 383 // First check if we should switch compaction space 384 assert(this == cp->space, "'this' should be current compaction space."); 385 size_t compaction_max_size = pointer_delta(end(), compact_top); 386 while (size > compaction_max_size) { 387 // switch to next compaction space 388 cp->space->set_compaction_top(compact_top); 389 cp->space = cp->space->next_compaction_space(); 390 if (cp->space == NULL) { 391 cp->gen = GenCollectedHeap::heap()->prev_gen(cp->gen); 392 assert(cp->gen != NULL, "compaction must succeed"); 393 cp->space = cp->gen->first_compaction_space(); 394 assert(cp->space != NULL, "generation must have a first compaction space"); 395 } 396 compact_top = cp->space->bottom(); 397 cp->space->set_compaction_top(compact_top); 398 cp->threshold = cp->space->initialize_threshold(); 399 compaction_max_size = pointer_delta(cp->space->end(), compact_top); 400 } 401 402 // store the forwarding pointer into the mark word 403 if ((HeapWord*)q != compact_top) { 404 q->forward_to(oop(compact_top)); 405 assert(q->is_gc_marked(), "encoding the pointer should preserve the mark"); 406 } else { 407 // if the object isn't moving we can just set the mark to the default 408 // mark and handle it specially later on. 409 q->init_mark(); 410 assert(q->forwardee() == NULL, "should be forwarded to NULL"); 411 } 412 413 compact_top += size; 414 415 // we need to update the offset table so that the beginnings of objects can be 416 // found during scavenge. Note that we are updating the offset table based on 417 // where the object will be once the compaction phase finishes. 418 if (compact_top > cp->threshold) 419 cp->threshold = 420 cp->space->cross_threshold(compact_top - size, compact_top); 421 return compact_top; 422 } 423 424 425 bool CompactibleSpace::insert_deadspace(size_t& allowed_deadspace_words, 426 HeapWord* q, size_t deadlength) { 427 if (allowed_deadspace_words >= deadlength) { 428 allowed_deadspace_words -= deadlength; 429 CollectedHeap::fill_with_object(q, deadlength); 430 oop(q)->set_mark(oop(q)->mark()->set_marked()); 431 assert((int) deadlength == oop(q)->size(), "bad filler object size"); 432 // Recall that we required "q == compaction_top". 433 return true; 434 } else { 435 allowed_deadspace_words = 0; 436 return false; 437 } 438 } 439 440 #define block_is_always_obj(q) true 441 #define obj_size(q) oop(q)->size() 442 #define adjust_obj_size(s) s 443 444 void CompactibleSpace::prepare_for_compaction(CompactPoint* cp) { 445 SCAN_AND_FORWARD(cp, end, block_is_obj, block_size); 446 } 447 448 // Faster object search. 449 void ContiguousSpace::prepare_for_compaction(CompactPoint* cp) { 450 SCAN_AND_FORWARD(cp, top, block_is_always_obj, obj_size); 451 } 452 453 void Space::adjust_pointers() { 454 // adjust all the interior pointers to point at the new locations of objects 455 // Used by MarkSweep::mark_sweep_phase3() 456 457 // First check to see if there is any work to be done. 458 if (used() == 0) { 459 return; // Nothing to do. 460 } 461 462 // Otherwise... 463 HeapWord* q = bottom(); 464 HeapWord* t = end(); 465 466 debug_only(HeapWord* prev_q = NULL); 467 while (q < t) { 468 if (oop(q)->is_gc_marked()) { 469 // q is alive 470 471 // point all the oops to the new location 472 size_t size = oop(q)->adjust_pointers(); 473 474 debug_only(prev_q = q); 475 476 q += size; 477 } else { 478 // q is not a live object. But we're not in a compactible space, 479 // So we don't have live ranges. 480 debug_only(prev_q = q); 481 q += block_size(q); 482 assert(q > prev_q, "we should be moving forward through memory"); 483 } 484 } 485 assert(q == t, "just checking"); 486 } 487 488 void CompactibleSpace::adjust_pointers() { 489 // Check first is there is any work to do. 490 if (used() == 0) { 491 return; // Nothing to do. 492 } 493 494 SCAN_AND_ADJUST_POINTERS(adjust_obj_size); 495 } 496 497 void CompactibleSpace::compact() { 498 SCAN_AND_COMPACT(obj_size); 499 } 500 501 void Space::print_short() const { print_short_on(tty); } 502 503 void Space::print_short_on(outputStream* st) const { 504 st->print(" space " SIZE_FORMAT "K, %3d%% used", capacity() / K, 505 (int) ((double) used() * 100 / capacity())); 506 } 507 508 void Space::print() const { print_on(tty); } 509 510 void Space::print_on(outputStream* st) const { 511 print_short_on(st); 512 st->print_cr(" [" INTPTR_FORMAT ", " INTPTR_FORMAT ")", 513 bottom(), end()); 514 } 515 516 void ContiguousSpace::print_on(outputStream* st) const { 517 print_short_on(st); 518 st->print_cr(" [" INTPTR_FORMAT ", " INTPTR_FORMAT ", " INTPTR_FORMAT ")", 519 bottom(), top(), end()); 520 } 521 522 void OffsetTableContigSpace::print_on(outputStream* st) const { 523 print_short_on(st); 524 st->print_cr(" [" INTPTR_FORMAT ", " INTPTR_FORMAT ", " 525 INTPTR_FORMAT ", " INTPTR_FORMAT ")", 526 bottom(), top(), _offsets.threshold(), end()); 527 } 528 529 void ContiguousSpace::verify() const { 530 HeapWord* p = bottom(); 531 HeapWord* t = top(); 532 HeapWord* prev_p = NULL; 533 while (p < t) { 534 oop(p)->verify(); 535 prev_p = p; 536 p += oop(p)->size(); 537 } 538 guarantee(p == top(), "end of last object must match end of space"); 539 if (top() != end()) { 540 guarantee(top() == block_start_const(end()-1) && 541 top() == block_start_const(top()), 542 "top should be start of unallocated block, if it exists"); 543 } 544 } 545 546 void Space::oop_iterate(ExtendedOopClosure* blk) { 547 ObjectToOopClosure blk2(blk); 548 object_iterate(&blk2); 549 } 550 551 bool Space::obj_is_alive(const HeapWord* p) const { 552 assert (block_is_obj(p), "The address should point to an object"); 553 return true; 554 } 555 556 #if INCLUDE_ALL_GCS 557 #define ContigSpace_PAR_OOP_ITERATE_DEFN(OopClosureType, nv_suffix) \ 558 \ 559 void ContiguousSpace::par_oop_iterate(MemRegion mr, OopClosureType* blk) {\ 560 HeapWord* obj_addr = mr.start(); \ 561 HeapWord* t = mr.end(); \ 562 while (obj_addr < t) { \ 563 assert(oop(obj_addr)->is_oop(), "Should be an oop"); \ 564 obj_addr += oop(obj_addr)->oop_iterate(blk); \ 565 } \ 566 } 567 568 ALL_PAR_OOP_ITERATE_CLOSURES(ContigSpace_PAR_OOP_ITERATE_DEFN) 569 570 #undef ContigSpace_PAR_OOP_ITERATE_DEFN 571 #endif // INCLUDE_ALL_GCS 572 573 void ContiguousSpace::oop_iterate(ExtendedOopClosure* blk) { 574 if (is_empty()) return; 575 HeapWord* obj_addr = bottom(); 576 HeapWord* t = top(); 577 // Could call objects iterate, but this is easier. 578 while (obj_addr < t) { 579 obj_addr += oop(obj_addr)->oop_iterate(blk); 580 } 581 } 582 583 void ContiguousSpace::object_iterate(ObjectClosure* blk) { 584 if (is_empty()) return; 585 WaterMark bm = bottom_mark(); 586 object_iterate_from(bm, blk); 587 } 588 589 // For a ContiguousSpace object_iterate() and safe_object_iterate() 590 // are the same. 591 void ContiguousSpace::safe_object_iterate(ObjectClosure* blk) { 592 object_iterate(blk); 593 } 594 595 void ContiguousSpace::object_iterate_from(WaterMark mark, ObjectClosure* blk) { 596 assert(mark.space() == this, "Mark does not match space"); 597 HeapWord* p = mark.point(); 598 while (p < top()) { 599 blk->do_object(oop(p)); 600 p += oop(p)->size(); 601 } 602 } 603 604 HeapWord* 605 ContiguousSpace::object_iterate_careful(ObjectClosureCareful* blk) { 606 HeapWord * limit = concurrent_iteration_safe_limit(); 607 assert(limit <= top(), "sanity check"); 608 for (HeapWord* p = bottom(); p < limit;) { 609 size_t size = blk->do_object_careful(oop(p)); 610 if (size == 0) { 611 return p; // failed at p 612 } else { 613 p += size; 614 } 615 } 616 return NULL; // all done 617 } 618 619 #define ContigSpace_OOP_SINCE_SAVE_MARKS_DEFN(OopClosureType, nv_suffix) \ 620 \ 621 void ContiguousSpace:: \ 622 oop_since_save_marks_iterate##nv_suffix(OopClosureType* blk) { \ 623 HeapWord* t; \ 624 HeapWord* p = saved_mark_word(); \ 625 assert(p != NULL, "expected saved mark"); \ 626 \ 627 const intx interval = PrefetchScanIntervalInBytes; \ 628 do { \ 629 t = top(); \ 630 while (p < t) { \ 631 Prefetch::write(p, interval); \ 632 debug_only(HeapWord* prev = p); \ 633 oop m = oop(p); \ 634 p += m->oop_iterate(blk); \ 635 } \ 636 } while (t < top()); \ 637 \ 638 set_saved_mark_word(p); \ 639 } 640 641 ALL_SINCE_SAVE_MARKS_CLOSURES(ContigSpace_OOP_SINCE_SAVE_MARKS_DEFN) 642 643 #undef ContigSpace_OOP_SINCE_SAVE_MARKS_DEFN 644 645 // Very general, slow implementation. 646 HeapWord* ContiguousSpace::block_start_const(const void* p) const { 647 assert(MemRegion(bottom(), end()).contains(p), 648 err_msg("p (" PTR_FORMAT ") not in space [" PTR_FORMAT ", " PTR_FORMAT ")", 649 p, bottom(), end())); 650 if (p >= top()) { 651 return top(); 652 } else { 653 HeapWord* last = bottom(); 654 HeapWord* cur = last; 655 while (cur <= p) { 656 last = cur; 657 cur += oop(cur)->size(); 658 } 659 assert(oop(last)->is_oop(), 660 err_msg(PTR_FORMAT " should be an object start", last)); 661 return last; 662 } 663 } 664 665 size_t ContiguousSpace::block_size(const HeapWord* p) const { 666 assert(MemRegion(bottom(), end()).contains(p), 667 err_msg("p (" PTR_FORMAT ") not in space [" PTR_FORMAT ", " PTR_FORMAT ")", 668 p, bottom(), end())); 669 HeapWord* current_top = top(); 670 assert(p <= current_top, 671 err_msg("p > current top - p: " PTR_FORMAT ", current top: " PTR_FORMAT, 672 p, current_top)); 673 assert(p == current_top || oop(p)->is_oop(), 674 err_msg("p (" PTR_FORMAT ") is not a block start - " 675 "current_top: " PTR_FORMAT ", is_oop: %s", 676 p, current_top, BOOL_TO_STR(oop(p)->is_oop()))); 677 if (p < current_top) { 678 return oop(p)->size(); 679 } else { 680 assert(p == current_top, "just checking"); 681 return pointer_delta(end(), (HeapWord*) p); 682 } 683 } 684 685 // This version requires locking. 686 inline HeapWord* ContiguousSpace::allocate_impl(size_t size, 687 HeapWord* const end_value) { 688 // In G1 there are places where a GC worker can allocates into a 689 // region using this serial allocation code without being prone to a 690 // race with other GC workers (we ensure that no other GC worker can 691 // access the same region at the same time). So the assert below is 692 // too strong in the case of G1. 693 assert(Heap_lock->owned_by_self() || 694 (SafepointSynchronize::is_at_safepoint() && 695 (Thread::current()->is_VM_thread() || UseG1GC)), 696 "not locked"); 697 HeapWord* obj = top(); 698 if (pointer_delta(end_value, obj) >= size) { 699 HeapWord* new_top = obj + size; 700 set_top(new_top); 701 assert(is_aligned(obj) && is_aligned(new_top), "checking alignment"); 702 return obj; 703 } else { 704 return NULL; 705 } 706 } 707 708 // This version is lock-free. 709 inline HeapWord* ContiguousSpace::par_allocate_impl(size_t size, 710 HeapWord* const end_value) { 711 do { 712 HeapWord* obj = top(); 713 if (pointer_delta(end_value, obj) >= size) { 714 HeapWord* new_top = obj + size; 715 HeapWord* result = (HeapWord*)Atomic::cmpxchg_ptr(new_top, top_addr(), obj); 716 // result can be one of two: 717 // the old top value: the exchange succeeded 718 // otherwise: the new value of the top is returned. 719 if (result == obj) { 720 assert(is_aligned(obj) && is_aligned(new_top), "checking alignment"); 721 return obj; 722 } 723 } else { 724 return NULL; 725 } 726 } while (true); 727 } 728 729 // Requires locking. 730 HeapWord* ContiguousSpace::allocate(size_t size) { 731 return allocate_impl(size, end()); 732 } 733 734 // Lock-free. 735 HeapWord* ContiguousSpace::par_allocate(size_t size) { 736 return par_allocate_impl(size, end()); 737 } 738 739 void ContiguousSpace::allocate_temporary_filler(int factor) { 740 // allocate temporary type array decreasing free size with factor 'factor' 741 assert(factor >= 0, "just checking"); 742 size_t size = pointer_delta(end(), top()); 743 744 // if space is full, return 745 if (size == 0) return; 746 747 if (factor > 0) { 748 size -= size/factor; 749 } 750 size = align_object_size(size); 751 752 const size_t array_header_size = typeArrayOopDesc::header_size(T_INT); 753 if (size >= (size_t)align_object_size(array_header_size)) { 754 size_t length = (size - array_header_size) * (HeapWordSize / sizeof(jint)); 755 // allocate uninitialized int array 756 typeArrayOop t = (typeArrayOop) allocate(size); 757 assert(t != NULL, "allocation should succeed"); 758 t->set_mark(markOopDesc::prototype()); 759 t->set_klass(Universe::intArrayKlassObj()); 760 t->set_length((int)length); 761 } else { 762 assert(size == CollectedHeap::min_fill_size(), 763 "size for smallest fake object doesn't match"); 764 instanceOop obj = (instanceOop) allocate(size); 765 obj->set_mark(markOopDesc::prototype()); 766 obj->set_klass_gap(0); 767 obj->set_klass(SystemDictionary::Object_klass()); 768 } 769 } 770 771 void EdenSpace::clear(bool mangle_space) { 772 ContiguousSpace::clear(mangle_space); 773 set_soft_end(end()); 774 } 775 776 // Requires locking. 777 HeapWord* EdenSpace::allocate(size_t size) { 778 return allocate_impl(size, soft_end()); 779 } 780 781 // Lock-free. 782 HeapWord* EdenSpace::par_allocate(size_t size) { 783 return par_allocate_impl(size, soft_end()); 784 } 785 786 HeapWord* ConcEdenSpace::par_allocate(size_t size) 787 { 788 do { 789 // The invariant is top() should be read before end() because 790 // top() can't be greater than end(), so if an update of _soft_end 791 // occurs between 'end_val = end();' and 'top_val = top();' top() 792 // also can grow up to the new end() and the condition 793 // 'top_val > end_val' is true. To ensure the loading order 794 // OrderAccess::loadload() is required after top() read. 795 HeapWord* obj = top(); 796 OrderAccess::loadload(); 797 if (pointer_delta(*soft_end_addr(), obj) >= size) { 798 HeapWord* new_top = obj + size; 799 HeapWord* result = (HeapWord*)Atomic::cmpxchg_ptr(new_top, top_addr(), obj); 800 // result can be one of two: 801 // the old top value: the exchange succeeded 802 // otherwise: the new value of the top is returned. 803 if (result == obj) { 804 assert(is_aligned(obj) && is_aligned(new_top), "checking alignment"); 805 return obj; 806 } 807 } else { 808 return NULL; 809 } 810 } while (true); 811 } 812 813 814 HeapWord* OffsetTableContigSpace::initialize_threshold() { 815 return _offsets.initialize_threshold(); 816 } 817 818 HeapWord* OffsetTableContigSpace::cross_threshold(HeapWord* start, HeapWord* end) { 819 _offsets.alloc_block(start, end); 820 return _offsets.threshold(); 821 } 822 823 OffsetTableContigSpace::OffsetTableContigSpace(BlockOffsetSharedArray* sharedOffsetArray, 824 MemRegion mr) : 825 _offsets(sharedOffsetArray, mr), 826 _par_alloc_lock(Mutex::leaf, "OffsetTableContigSpace par alloc lock", true) 827 { 828 _offsets.set_contig_space(this); 829 initialize(mr, SpaceDecorator::Clear, SpaceDecorator::Mangle); 830 } 831 832 #define OBJ_SAMPLE_INTERVAL 0 833 #define BLOCK_SAMPLE_INTERVAL 100 834 835 void OffsetTableContigSpace::verify() const { 836 HeapWord* p = bottom(); 837 HeapWord* prev_p = NULL; 838 int objs = 0; 839 int blocks = 0; 840 841 if (VerifyObjectStartArray) { 842 _offsets.verify(); 843 } 844 845 while (p < top()) { 846 size_t size = oop(p)->size(); 847 // For a sampling of objects in the space, find it using the 848 // block offset table. 849 if (blocks == BLOCK_SAMPLE_INTERVAL) { 850 guarantee(p == block_start_const(p + (size/2)), 851 "check offset computation"); 852 blocks = 0; 853 } else { 854 blocks++; 855 } 856 857 if (objs == OBJ_SAMPLE_INTERVAL) { 858 oop(p)->verify(); 859 objs = 0; 860 } else { 861 objs++; 862 } 863 prev_p = p; 864 p += size; 865 } 866 guarantee(p == top(), "end of last object must match end of space"); 867 } 868 869 870 size_t TenuredSpace::allowed_dead_ratio() const { 871 return MarkSweepDeadRatio; 872 }