1 /* 2 * Copyright (c) 2000, 2013, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "gc_interface/collectedHeap.inline.hpp" 27 #include "memory/blockOffsetTable.inline.hpp" 28 #include "memory/iterator.hpp" 29 #include "memory/space.inline.hpp" 30 #include "memory/universe.hpp" 31 #include "oops/oop.inline.hpp" 32 #include "runtime/java.hpp" 33 #include "services/memTracker.hpp" 34 35 ////////////////////////////////////////////////////////////////////// 36 // BlockOffsetSharedArray 37 ////////////////////////////////////////////////////////////////////// 38 39 BlockOffsetSharedArray::BlockOffsetSharedArray(MemRegion reserved, 40 size_t init_word_size): 41 _reserved(reserved), _end(NULL) 42 { 43 size_t size = compute_size(reserved.word_size()); 44 ReservedSpace rs(size); 45 if (!rs.is_reserved()) { 46 vm_exit_during_initialization("Could not reserve enough space for heap offset array"); 47 } 48 49 NMTTrackOp op(NMTTrackOp::TypeOp); 50 op.execute_op((address)rs.base(), 0, mtGC); 51 52 if (!_vs.initialize(rs, 0)) { 53 vm_exit_during_initialization("Could not reserve enough space for heap offset array"); 54 } 55 _offset_array = (u_char*)_vs.low_boundary(); 56 resize(init_word_size); 57 if (TraceBlockOffsetTable) { 58 gclog_or_tty->print_cr("BlockOffsetSharedArray::BlockOffsetSharedArray: "); 59 gclog_or_tty->print_cr(" " 60 " rs.base(): " INTPTR_FORMAT 61 " rs.size(): " INTPTR_FORMAT 62 " rs end(): " INTPTR_FORMAT, 63 rs.base(), rs.size(), rs.base() + rs.size()); 64 gclog_or_tty->print_cr(" " 65 " _vs.low_boundary(): " INTPTR_FORMAT 66 " _vs.high_boundary(): " INTPTR_FORMAT, 67 _vs.low_boundary(), 68 _vs.high_boundary()); 69 } 70 } 71 72 void BlockOffsetSharedArray::resize(size_t new_word_size) { 73 assert(new_word_size <= _reserved.word_size(), "Resize larger than reserved"); 74 size_t new_size = compute_size(new_word_size); 75 size_t old_size = _vs.committed_size(); 76 size_t delta; 77 char* high = _vs.high(); 78 _end = _reserved.start() + new_word_size; 79 if (new_size > old_size) { 80 delta = ReservedSpace::page_align_size_up(new_size - old_size); 81 assert(delta > 0, "just checking"); 82 if (!_vs.expand_by(delta)) { 83 // Do better than this for Merlin 84 vm_exit_out_of_memory(delta, OOM_MMAP_ERROR, "offset table expansion"); 85 } 86 assert(_vs.high() == high + delta, "invalid expansion"); 87 } else { 88 delta = ReservedSpace::page_align_size_down(old_size - new_size); 89 if (delta == 0) return; 90 _vs.shrink_by(delta); 91 assert(_vs.high() == high - delta, "invalid expansion"); 92 } 93 } 94 95 bool BlockOffsetSharedArray::is_card_boundary(HeapWord* p) const { 96 assert(p >= _reserved.start(), "just checking"); 97 size_t delta = pointer_delta(p, _reserved.start()); 98 return (delta & right_n_bits(LogN_words)) == (size_t)NoBits; 99 } 100 101 102 ////////////////////////////////////////////////////////////////////// 103 // BlockOffsetArray 104 ////////////////////////////////////////////////////////////////////// 105 106 BlockOffsetArray::BlockOffsetArray(BlockOffsetSharedArray* array, 107 MemRegion mr, bool init_to_zero_) : 108 BlockOffsetTable(mr.start(), mr.end()), 109 _array(array) 110 { 111 assert(_bottom <= _end, "arguments out of order"); 112 set_init_to_zero(init_to_zero_); 113 if (!init_to_zero_) { 114 // initialize cards to point back to mr.start() 115 set_remainder_to_point_to_start(mr.start() + N_words, mr.end()); 116 _array->set_offset_array(0, 0); // set first card to 0 117 } 118 } 119 120 121 // The arguments follow the normal convention of denoting 122 // a right-open interval: [start, end) 123 void 124 BlockOffsetArray:: 125 set_remainder_to_point_to_start(HeapWord* start, HeapWord* end, bool reducing) { 126 127 check_reducing_assertion(reducing); 128 if (start >= end) { 129 // The start address is equal to the end address (or to 130 // the right of the end address) so there are not cards 131 // that need to be updated.. 132 return; 133 } 134 135 // Write the backskip value for each region. 136 // 137 // offset 138 // card 2nd 3rd 139 // | +- 1st | | 140 // v v v v 141 // +-+-+-+-+-+-+-+-+-+-+-+-+-+-+ +-+-+-+-+-+-+-+-+-+-+- 142 // |x|0|0|0|0|0|0|0|1|1|1|1|1|1| ... |1|1|1|1|2|2|2|2|2|2| ... 143 // +-+-+-+-+-+-+-+-+-+-+-+-+-+-+ +-+-+-+-+-+-+-+-+-+-+- 144 // 11 19 75 145 // 12 146 // 147 // offset card is the card that points to the start of an object 148 // x - offset value of offset card 149 // 1st - start of first logarithmic region 150 // 0 corresponds to logarithmic value N_words + 0 and 2**(3 * 0) = 1 151 // 2nd - start of second logarithmic region 152 // 1 corresponds to logarithmic value N_words + 1 and 2**(3 * 1) = 8 153 // 3rd - start of third logarithmic region 154 // 2 corresponds to logarithmic value N_words + 2 and 2**(3 * 2) = 64 155 // 156 // integer below the block offset entry is an example of 157 // the index of the entry 158 // 159 // Given an address, 160 // Find the index for the address 161 // Find the block offset table entry 162 // Convert the entry to a back slide 163 // (e.g., with today's, offset = 0x81 => 164 // back slip = 2**(3*(0x81 - N_words)) = 2**3) = 8 165 // Move back N (e.g., 8) entries and repeat with the 166 // value of the new entry 167 // 168 size_t start_card = _array->index_for(start); 169 size_t end_card = _array->index_for(end-1); 170 assert(start ==_array->address_for_index(start_card), "Precondition"); 171 assert(end ==_array->address_for_index(end_card)+N_words, "Precondition"); 172 set_remainder_to_point_to_start_incl(start_card, end_card, reducing); // closed interval 173 } 174 175 176 // Unlike the normal convention in this code, the argument here denotes 177 // a closed, inclusive interval: [start_card, end_card], cf set_remainder_to_point_to_start() 178 // above. 179 void 180 BlockOffsetArray::set_remainder_to_point_to_start_incl(size_t start_card, size_t end_card, bool reducing) { 181 182 check_reducing_assertion(reducing); 183 if (start_card > end_card) { 184 return; 185 } 186 assert(start_card > _array->index_for(_bottom), "Cannot be first card"); 187 assert(_array->offset_array(start_card-1) <= N_words, 188 "Offset card has an unexpected value"); 189 size_t start_card_for_region = start_card; 190 u_char offset = max_jubyte; 191 for (int i = 0; i < N_powers; i++) { 192 // -1 so that the the card with the actual offset is counted. Another -1 193 // so that the reach ends in this region and not at the start 194 // of the next. 195 size_t reach = start_card - 1 + (power_to_cards_back(i+1) - 1); 196 offset = N_words + i; 197 if (reach >= end_card) { 198 _array->set_offset_array(start_card_for_region, end_card, offset, reducing); 199 start_card_for_region = reach + 1; 200 break; 201 } 202 _array->set_offset_array(start_card_for_region, reach, offset, reducing); 203 start_card_for_region = reach + 1; 204 } 205 assert(start_card_for_region > end_card, "Sanity check"); 206 DEBUG_ONLY(check_all_cards(start_card, end_card);) 207 } 208 209 // The card-interval [start_card, end_card] is a closed interval; this 210 // is an expensive check -- use with care and only under protection of 211 // suitable flag. 212 void BlockOffsetArray::check_all_cards(size_t start_card, size_t end_card) const { 213 214 if (end_card < start_card) { 215 return; 216 } 217 guarantee(_array->offset_array(start_card) == N_words, "Wrong value in second card"); 218 u_char last_entry = N_words; 219 for (size_t c = start_card + 1; c <= end_card; c++ /* yeah! */) { 220 u_char entry = _array->offset_array(c); 221 guarantee(entry >= last_entry, "Monotonicity"); 222 if (c - start_card > power_to_cards_back(1)) { 223 guarantee(entry > N_words, "Should be in logarithmic region"); 224 } 225 size_t backskip = entry_to_cards_back(entry); 226 size_t landing_card = c - backskip; 227 guarantee(landing_card >= (start_card - 1), "Inv"); 228 if (landing_card >= start_card) { 229 guarantee(_array->offset_array(landing_card) <= entry, "Monotonicity"); 230 } else { 231 guarantee(landing_card == (start_card - 1), "Tautology"); 232 // Note that N_words is the maximum offset value 233 guarantee(_array->offset_array(landing_card) <= N_words, "Offset value"); 234 } 235 last_entry = entry; // remember for monotonicity test 236 } 237 } 238 239 240 void 241 BlockOffsetArray::alloc_block(HeapWord* blk_start, HeapWord* blk_end) { 242 assert(blk_start != NULL && blk_end > blk_start, 243 "phantom block"); 244 single_block(blk_start, blk_end); 245 } 246 247 // Action_mark - update the BOT for the block [blk_start, blk_end). 248 // Current typical use is for splitting a block. 249 // Action_single - udpate the BOT for an allocation. 250 // Action_verify - BOT verification. 251 void 252 BlockOffsetArray::do_block_internal(HeapWord* blk_start, 253 HeapWord* blk_end, 254 Action action, bool reducing) { 255 assert(Universe::heap()->is_in_reserved(blk_start), 256 "reference must be into the heap"); 257 assert(Universe::heap()->is_in_reserved(blk_end-1), 258 "limit must be within the heap"); 259 // This is optimized to make the test fast, assuming we only rarely 260 // cross boundaries. 261 uintptr_t end_ui = (uintptr_t)(blk_end - 1); 262 uintptr_t start_ui = (uintptr_t)blk_start; 263 // Calculate the last card boundary preceding end of blk 264 intptr_t boundary_before_end = (intptr_t)end_ui; 265 clear_bits(boundary_before_end, right_n_bits(LogN)); 266 if (start_ui <= (uintptr_t)boundary_before_end) { 267 // blk starts at or crosses a boundary 268 // Calculate index of card on which blk begins 269 size_t start_index = _array->index_for(blk_start); 270 // Index of card on which blk ends 271 size_t end_index = _array->index_for(blk_end - 1); 272 // Start address of card on which blk begins 273 HeapWord* boundary = _array->address_for_index(start_index); 274 assert(boundary <= blk_start, "blk should start at or after boundary"); 275 if (blk_start != boundary) { 276 // blk starts strictly after boundary 277 // adjust card boundary and start_index forward to next card 278 boundary += N_words; 279 start_index++; 280 } 281 assert(start_index <= end_index, "monotonicity of index_for()"); 282 assert(boundary <= (HeapWord*)boundary_before_end, "tautology"); 283 switch (action) { 284 case Action_mark: { 285 if (init_to_zero()) { 286 _array->set_offset_array(start_index, boundary, blk_start, reducing); 287 break; 288 } // Else fall through to the next case 289 } 290 case Action_single: { 291 _array->set_offset_array(start_index, boundary, blk_start, reducing); 292 // We have finished marking the "offset card". We need to now 293 // mark the subsequent cards that this blk spans. 294 if (start_index < end_index) { 295 HeapWord* rem_st = _array->address_for_index(start_index) + N_words; 296 HeapWord* rem_end = _array->address_for_index(end_index) + N_words; 297 set_remainder_to_point_to_start(rem_st, rem_end, reducing); 298 } 299 break; 300 } 301 case Action_check: { 302 _array->check_offset_array(start_index, boundary, blk_start); 303 // We have finished checking the "offset card". We need to now 304 // check the subsequent cards that this blk spans. 305 check_all_cards(start_index + 1, end_index); 306 break; 307 } 308 default: 309 ShouldNotReachHere(); 310 } 311 } 312 } 313 314 // The range [blk_start, blk_end) represents a single contiguous block 315 // of storage; modify the block offset table to represent this 316 // information; Right-open interval: [blk_start, blk_end) 317 // NOTE: this method does _not_ adjust _unallocated_block. 318 void 319 BlockOffsetArray::single_block(HeapWord* blk_start, 320 HeapWord* blk_end) { 321 do_block_internal(blk_start, blk_end, Action_single); 322 } 323 324 void BlockOffsetArray::verify() const { 325 // For each entry in the block offset table, verify that 326 // the entry correctly finds the start of an object at the 327 // first address covered by the block or to the left of that 328 // first address. 329 330 size_t next_index = 1; 331 size_t last_index = last_active_index(); 332 333 // Use for debugging. Initialize to NULL to distinguish the 334 // first iteration through the while loop. 335 HeapWord* last_p = NULL; 336 HeapWord* last_start = NULL; 337 oop last_o = NULL; 338 339 while (next_index <= last_index) { 340 // Use an address past the start of the address for 341 // the entry. 342 HeapWord* p = _array->address_for_index(next_index) + 1; 343 if (p >= _end) { 344 // That's all of the allocated block table. 345 return; 346 } 347 // block_start() asserts that start <= p. 348 HeapWord* start = block_start(p); 349 // First check if the start is an allocated block and only 350 // then if it is a valid object. 351 oop o = oop(start); 352 assert(!Universe::is_fully_initialized() || 353 _sp->is_free_block(start) || 354 o->is_oop_or_null(), "Bad object was found"); 355 next_index++; 356 last_p = p; 357 last_start = start; 358 last_o = o; 359 } 360 } 361 362 ////////////////////////////////////////////////////////////////////// 363 // BlockOffsetArrayNonContigSpace 364 ////////////////////////////////////////////////////////////////////// 365 366 // The block [blk_start, blk_end) has been allocated; 367 // adjust the block offset table to represent this information; 368 // NOTE: Clients of BlockOffsetArrayNonContigSpace: consider using 369 // the somewhat more lightweight split_block() or 370 // (when init_to_zero()) mark_block() wherever possible. 371 // right-open interval: [blk_start, blk_end) 372 void 373 BlockOffsetArrayNonContigSpace::alloc_block(HeapWord* blk_start, 374 HeapWord* blk_end) { 375 assert(blk_start != NULL && blk_end > blk_start, 376 "phantom block"); 377 single_block(blk_start, blk_end); 378 allocated(blk_start, blk_end); 379 } 380 381 // Adjust BOT to show that a previously whole block has been split 382 // into two. We verify the BOT for the first part (prefix) and 383 // update the BOT for the second part (suffix). 384 // blk is the start of the block 385 // blk_size is the size of the original block 386 // left_blk_size is the size of the first part of the split 387 void BlockOffsetArrayNonContigSpace::split_block(HeapWord* blk, 388 size_t blk_size, 389 size_t left_blk_size) { 390 // Verify that the BOT shows [blk, blk + blk_size) to be one block. 391 verify_single_block(blk, blk_size); 392 // Update the BOT to indicate that [blk + left_blk_size, blk + blk_size) 393 // is one single block. 394 assert(blk_size > 0, "Should be positive"); 395 assert(left_blk_size > 0, "Should be positive"); 396 assert(left_blk_size < blk_size, "Not a split"); 397 398 // Start addresses of prefix block and suffix block. 399 HeapWord* pref_addr = blk; 400 HeapWord* suff_addr = blk + left_blk_size; 401 HeapWord* end_addr = blk + blk_size; 402 403 // Indices for starts of prefix block and suffix block. 404 size_t pref_index = _array->index_for(pref_addr); 405 if (_array->address_for_index(pref_index) != pref_addr) { 406 // pref_addr does not begin pref_index 407 pref_index++; 408 } 409 410 size_t suff_index = _array->index_for(suff_addr); 411 if (_array->address_for_index(suff_index) != suff_addr) { 412 // suff_addr does not begin suff_index 413 suff_index++; 414 } 415 416 // Definition: A block B, denoted [B_start, B_end) __starts__ 417 // a card C, denoted [C_start, C_end), where C_start and C_end 418 // are the heap addresses that card C covers, iff 419 // B_start <= C_start < B_end. 420 // 421 // We say that a card C "is started by" a block B, iff 422 // B "starts" C. 423 // 424 // Note that the cardinality of the set of cards {C} 425 // started by a block B can be 0, 1, or more. 426 // 427 // Below, pref_index and suff_index are, respectively, the 428 // first (least) card indices that the prefix and suffix of 429 // the split start; end_index is one more than the index of 430 // the last (greatest) card that blk starts. 431 size_t end_index = _array->index_for(end_addr - 1) + 1; 432 433 // Calculate the # cards that the prefix and suffix affect. 434 size_t num_pref_cards = suff_index - pref_index; 435 436 size_t num_suff_cards = end_index - suff_index; 437 // Change the cards that need changing 438 if (num_suff_cards > 0) { 439 HeapWord* boundary = _array->address_for_index(suff_index); 440 // Set the offset card for suffix block 441 _array->set_offset_array(suff_index, boundary, suff_addr, true /* reducing */); 442 // Change any further cards that need changing in the suffix 443 if (num_pref_cards > 0) { 444 if (num_pref_cards >= num_suff_cards) { 445 // Unilaterally fix all of the suffix cards: closed card 446 // index interval in args below. 447 set_remainder_to_point_to_start_incl(suff_index + 1, end_index - 1, true /* reducing */); 448 } else { 449 // Unilaterally fix the first (num_pref_cards - 1) following 450 // the "offset card" in the suffix block. 451 set_remainder_to_point_to_start_incl(suff_index + 1, 452 suff_index + num_pref_cards - 1, true /* reducing */); 453 // Fix the appropriate cards in the remainder of the 454 // suffix block -- these are the last num_pref_cards 455 // cards in each power block of the "new" range plumbed 456 // from suff_addr. 457 bool more = true; 458 uint i = 1; 459 while (more && (i < N_powers)) { 460 size_t back_by = power_to_cards_back(i); 461 size_t right_index = suff_index + back_by - 1; 462 size_t left_index = right_index - num_pref_cards + 1; 463 if (right_index >= end_index - 1) { // last iteration 464 right_index = end_index - 1; 465 more = false; 466 } 467 if (back_by > num_pref_cards) { 468 // Fill in the remainder of this "power block", if it 469 // is non-null. 470 if (left_index <= right_index) { 471 _array->set_offset_array(left_index, right_index, 472 N_words + i - 1, true /* reducing */); 473 } else { 474 more = false; // we are done 475 } 476 i++; 477 break; 478 } 479 i++; 480 } 481 while (more && (i < N_powers)) { 482 size_t back_by = power_to_cards_back(i); 483 size_t right_index = suff_index + back_by - 1; 484 size_t left_index = right_index - num_pref_cards + 1; 485 if (right_index >= end_index - 1) { // last iteration 486 right_index = end_index - 1; 487 if (left_index > right_index) { 488 break; 489 } 490 more = false; 491 } 492 assert(left_index <= right_index, "Error"); 493 _array->set_offset_array(left_index, right_index, N_words + i - 1, true /* reducing */); 494 i++; 495 } 496 } 497 } // else no more cards to fix in suffix 498 } // else nothing needs to be done 499 // Verify that we did the right thing 500 verify_single_block(pref_addr, left_blk_size); 501 verify_single_block(suff_addr, blk_size - left_blk_size); 502 } 503 504 505 // Mark the BOT such that if [blk_start, blk_end) straddles a card 506 // boundary, the card following the first such boundary is marked 507 // with the appropriate offset. 508 // NOTE: this method does _not_ adjust _unallocated_block or 509 // any cards subsequent to the first one. 510 void 511 BlockOffsetArrayNonContigSpace::mark_block(HeapWord* blk_start, 512 HeapWord* blk_end, bool reducing) { 513 do_block_internal(blk_start, blk_end, Action_mark, reducing); 514 } 515 516 HeapWord* BlockOffsetArrayNonContigSpace::block_start_unsafe( 517 const void* addr) const { 518 assert(_array->offset_array(0) == 0, "objects can't cross covered areas"); 519 assert(_bottom <= addr && addr < _end, 520 "addr must be covered by this Array"); 521 // Must read this exactly once because it can be modified by parallel 522 // allocation. 523 HeapWord* ub = _unallocated_block; 524 if (BlockOffsetArrayUseUnallocatedBlock && addr >= ub) { 525 assert(ub < _end, "tautology (see above)"); 526 return ub; 527 } 528 529 // Otherwise, find the block start using the table. 530 size_t index = _array->index_for(addr); 531 HeapWord* q = _array->address_for_index(index); 532 533 uint offset = _array->offset_array(index); // Extend u_char to uint. 534 while (offset >= N_words) { 535 // The excess of the offset from N_words indicates a power of Base 536 // to go back by. 537 size_t n_cards_back = entry_to_cards_back(offset); 538 q -= (N_words * n_cards_back); 539 assert(q >= _sp->bottom(), 540 err_msg("q = " PTR_FORMAT " crossed below bottom = " PTR_FORMAT, 541 q, _sp->bottom())); 542 assert(q < _sp->end(), 543 err_msg("q = " PTR_FORMAT " crossed above end = " PTR_FORMAT, 544 q, _sp->end())); 545 index -= n_cards_back; 546 offset = _array->offset_array(index); 547 } 548 assert(offset < N_words, "offset too large"); 549 index--; 550 q -= offset; 551 assert(q >= _sp->bottom(), 552 err_msg("q = " PTR_FORMAT " crossed below bottom = " PTR_FORMAT, 553 q, _sp->bottom())); 554 assert(q < _sp->end(), 555 err_msg("q = " PTR_FORMAT " crossed above end = " PTR_FORMAT, 556 q, _sp->end())); 557 HeapWord* n = q; 558 559 while (n <= addr) { 560 debug_only(HeapWord* last = q); // for debugging 561 q = n; 562 n += _sp->block_size(n); 563 assert(n > q, 564 err_msg("Looping at n = " PTR_FORMAT " with last = " PTR_FORMAT"," 565 " while querying blk_start(" PTR_FORMAT ")" 566 " on _sp = [" PTR_FORMAT "," PTR_FORMAT ")", 567 n, last, addr, _sp->bottom(), _sp->end())); 568 } 569 assert(q <= addr, 570 err_msg("wrong order for current (" INTPTR_FORMAT ")" " <= arg (" INTPTR_FORMAT ")", 571 q, addr)); 572 assert(addr <= n, 573 err_msg("wrong order for arg (" INTPTR_FORMAT ") <= next (" INTPTR_FORMAT ")", 574 addr, n)); 575 return q; 576 } 577 578 HeapWord* BlockOffsetArrayNonContigSpace::block_start_careful( 579 const void* addr) const { 580 assert(_array->offset_array(0) == 0, "objects can't cross covered areas"); 581 582 assert(_bottom <= addr && addr < _end, 583 "addr must be covered by this Array"); 584 // Must read this exactly once because it can be modified by parallel 585 // allocation. 586 HeapWord* ub = _unallocated_block; 587 if (BlockOffsetArrayUseUnallocatedBlock && addr >= ub) { 588 assert(ub < _end, "tautology (see above)"); 589 return ub; 590 } 591 592 // Otherwise, find the block start using the table, but taking 593 // care (cf block_start_unsafe() above) not to parse any objects/blocks 594 // on the cards themsleves. 595 size_t index = _array->index_for(addr); 596 assert(_array->address_for_index(index) == addr, 597 "arg should be start of card"); 598 599 HeapWord* q = (HeapWord*)addr; 600 uint offset; 601 do { 602 offset = _array->offset_array(index); 603 if (offset < N_words) { 604 q -= offset; 605 } else { 606 size_t n_cards_back = entry_to_cards_back(offset); 607 q -= (n_cards_back * N_words); 608 index -= n_cards_back; 609 } 610 } while (offset >= N_words); 611 assert(q <= addr, "block start should be to left of arg"); 612 return q; 613 } 614 615 #ifndef PRODUCT 616 // Verification & debugging - ensure that the offset table reflects the fact 617 // that the block [blk_start, blk_end) or [blk, blk + size) is a 618 // single block of storage. NOTE: can't const this because of 619 // call to non-const do_block_internal() below. 620 void BlockOffsetArrayNonContigSpace::verify_single_block( 621 HeapWord* blk_start, HeapWord* blk_end) { 622 if (VerifyBlockOffsetArray) { 623 do_block_internal(blk_start, blk_end, Action_check); 624 } 625 } 626 627 void BlockOffsetArrayNonContigSpace::verify_single_block( 628 HeapWord* blk, size_t size) { 629 verify_single_block(blk, blk + size); 630 } 631 632 // Verify that the given block is before _unallocated_block 633 void BlockOffsetArrayNonContigSpace::verify_not_unallocated( 634 HeapWord* blk_start, HeapWord* blk_end) const { 635 if (BlockOffsetArrayUseUnallocatedBlock) { 636 assert(blk_start < blk_end, "Block inconsistency?"); 637 assert(blk_end <= _unallocated_block, "_unallocated_block problem"); 638 } 639 } 640 641 void BlockOffsetArrayNonContigSpace::verify_not_unallocated( 642 HeapWord* blk, size_t size) const { 643 verify_not_unallocated(blk, blk + size); 644 } 645 #endif // PRODUCT 646 647 size_t BlockOffsetArrayNonContigSpace::last_active_index() const { 648 if (_unallocated_block == _bottom) { 649 return 0; 650 } else { 651 return _array->index_for(_unallocated_block - 1); 652 } 653 } 654 655 ////////////////////////////////////////////////////////////////////// 656 // BlockOffsetArrayContigSpace 657 ////////////////////////////////////////////////////////////////////// 658 659 HeapWord* BlockOffsetArrayContigSpace::block_start_unsafe(const void* addr) const { 660 assert(_array->offset_array(0) == 0, "objects can't cross covered areas"); 661 662 // Otherwise, find the block start using the table. 663 assert(_bottom <= addr && addr < _end, 664 "addr must be covered by this Array"); 665 size_t index = _array->index_for(addr); 666 // We must make sure that the offset table entry we use is valid. If 667 // "addr" is past the end, start at the last known one and go forward. 668 index = MIN2(index, _next_offset_index-1); 669 HeapWord* q = _array->address_for_index(index); 670 671 uint offset = _array->offset_array(index); // Extend u_char to uint. 672 while (offset > N_words) { 673 // The excess of the offset from N_words indicates a power of Base 674 // to go back by. 675 size_t n_cards_back = entry_to_cards_back(offset); 676 q -= (N_words * n_cards_back); 677 assert(q >= _sp->bottom(), "Went below bottom!"); 678 index -= n_cards_back; 679 offset = _array->offset_array(index); 680 } 681 while (offset == N_words) { 682 assert(q >= _sp->bottom(), "Went below bottom!"); 683 q -= N_words; 684 index--; 685 offset = _array->offset_array(index); 686 } 687 assert(offset < N_words, "offset too large"); 688 q -= offset; 689 HeapWord* n = q; 690 691 while (n <= addr) { 692 debug_only(HeapWord* last = q); // for debugging 693 q = n; 694 n += _sp->block_size(n); 695 } 696 assert(q <= addr, "wrong order for current and arg"); 697 assert(addr <= n, "wrong order for arg and next"); 698 return q; 699 } 700 701 // 702 // _next_offset_threshold 703 // | _next_offset_index 704 // v v 705 // +-------+-------+-------+-------+-------+ 706 // | i-1 | i | i+1 | i+2 | i+3 | 707 // +-------+-------+-------+-------+-------+ 708 // ( ^ ] 709 // block-start 710 // 711 712 void BlockOffsetArrayContigSpace::alloc_block_work(HeapWord* blk_start, 713 HeapWord* blk_end) { 714 assert(blk_start != NULL && blk_end > blk_start, 715 "phantom block"); 716 assert(blk_end > _next_offset_threshold, 717 "should be past threshold"); 718 assert(blk_start <= _next_offset_threshold, 719 "blk_start should be at or before threshold"); 720 assert(pointer_delta(_next_offset_threshold, blk_start) <= N_words, 721 "offset should be <= BlockOffsetSharedArray::N"); 722 assert(Universe::heap()->is_in_reserved(blk_start), 723 "reference must be into the heap"); 724 assert(Universe::heap()->is_in_reserved(blk_end-1), 725 "limit must be within the heap"); 726 assert(_next_offset_threshold == 727 _array->_reserved.start() + _next_offset_index*N_words, 728 "index must agree with threshold"); 729 730 debug_only(size_t orig_next_offset_index = _next_offset_index;) 731 732 // Mark the card that holds the offset into the block. Note 733 // that _next_offset_index and _next_offset_threshold are not 734 // updated until the end of this method. 735 _array->set_offset_array(_next_offset_index, 736 _next_offset_threshold, 737 blk_start); 738 739 // We need to now mark the subsequent cards that this blk spans. 740 741 // Index of card on which blk ends. 742 size_t end_index = _array->index_for(blk_end - 1); 743 744 // Are there more cards left to be updated? 745 if (_next_offset_index + 1 <= end_index) { 746 HeapWord* rem_st = _array->address_for_index(_next_offset_index + 1); 747 // Calculate rem_end this way because end_index 748 // may be the last valid index in the covered region. 749 HeapWord* rem_end = _array->address_for_index(end_index) + N_words; 750 set_remainder_to_point_to_start(rem_st, rem_end); 751 } 752 753 // _next_offset_index and _next_offset_threshold updated here. 754 _next_offset_index = end_index + 1; 755 // Calculate _next_offset_threshold this way because end_index 756 // may be the last valid index in the covered region. 757 _next_offset_threshold = _array->address_for_index(end_index) + N_words; 758 assert(_next_offset_threshold >= blk_end, "Incorrect offset threshold"); 759 760 #ifdef ASSERT 761 // The offset can be 0 if the block starts on a boundary. That 762 // is checked by an assertion above. 763 size_t start_index = _array->index_for(blk_start); 764 HeapWord* boundary = _array->address_for_index(start_index); 765 assert((_array->offset_array(orig_next_offset_index) == 0 && 766 blk_start == boundary) || 767 (_array->offset_array(orig_next_offset_index) > 0 && 768 _array->offset_array(orig_next_offset_index) <= N_words), 769 "offset array should have been set"); 770 for (size_t j = orig_next_offset_index + 1; j <= end_index; j++) { 771 assert(_array->offset_array(j) > 0 && 772 _array->offset_array(j) <= (u_char) (N_words+N_powers-1), 773 "offset array should have been set"); 774 } 775 #endif 776 } 777 778 HeapWord* BlockOffsetArrayContigSpace::initialize_threshold() { 779 assert(!Universe::heap()->is_in_reserved(_array->_offset_array), 780 "just checking"); 781 _next_offset_index = _array->index_for(_bottom); 782 _next_offset_index++; 783 _next_offset_threshold = 784 _array->address_for_index(_next_offset_index); 785 return _next_offset_threshold; 786 } 787 788 void BlockOffsetArrayContigSpace::zero_bottom_entry() { 789 assert(!Universe::heap()->is_in_reserved(_array->_offset_array), 790 "just checking"); 791 size_t bottom_index = _array->index_for(_bottom); 792 _array->set_offset_array(bottom_index, 0); 793 } 794 795 size_t BlockOffsetArrayContigSpace::last_active_index() const { 796 size_t result = _next_offset_index - 1; 797 return result >= 0 ? result : 0; 798 }