409
410 // If the fist object's end q is at the card boundary. Start refining
411 // with the corresponding card (the value of the entry will be basically
412 // set to 0). If the object crosses the boundary -- start from the next card.
413 size_t n_index = _array->index_for(n);
414 size_t next_index = _array->index_for(n) + !_array->is_card_boundary(n);
415 // Calculate a consistent next boundary. If "n" is not at the boundary
416 // already, step to the boundary.
417 HeapWord* next_boundary = _array->address_for_index(n_index) +
418 (n_index == next_index ? 0 : N_words);
419 assert(next_boundary <= _array->_end,
420 err_msg("next_boundary is beyond the end of the covered region "
421 " next_boundary " PTR_FORMAT " _array->_end " PTR_FORMAT,
422 next_boundary, _array->_end));
423 if (addr >= gsp()->top()) return gsp()->top();
424 while (next_boundary < addr) {
425 while (n <= next_boundary) {
426 q = n;
427 oop obj = oop(q);
428 if (obj->klass_or_null() == NULL) return q;
429 n += obj->size();
430 }
431 assert(q <= next_boundary && n > next_boundary, "Consequence of loop");
432 // [q, n) is the block that crosses the boundary.
433 alloc_block_work2(&next_boundary, &next_index, q, n);
434 }
435 return forward_to_block_containing_addr_const(q, n, addr);
436 }
437
438 HeapWord* G1BlockOffsetArray::block_start_careful(const void* addr) const {
439 assert(_array->offset_array(0) == 0, "objects can't cross covered areas");
440
441 assert(_bottom <= addr && addr < _end,
442 "addr must be covered by this Array");
443 // Must read this exactly once because it can be modified by parallel
444 // allocation.
445 HeapWord* ub = _unallocated_block;
446 if (BlockOffsetArrayUseUnallocatedBlock && addr >= ub) {
447 assert(ub < _end, "tautology (see above)");
448 return ub;
449 }
|
409
410 // If the fist object's end q is at the card boundary. Start refining
411 // with the corresponding card (the value of the entry will be basically
412 // set to 0). If the object crosses the boundary -- start from the next card.
413 size_t n_index = _array->index_for(n);
414 size_t next_index = _array->index_for(n) + !_array->is_card_boundary(n);
415 // Calculate a consistent next boundary. If "n" is not at the boundary
416 // already, step to the boundary.
417 HeapWord* next_boundary = _array->address_for_index(n_index) +
418 (n_index == next_index ? 0 : N_words);
419 assert(next_boundary <= _array->_end,
420 err_msg("next_boundary is beyond the end of the covered region "
421 " next_boundary " PTR_FORMAT " _array->_end " PTR_FORMAT,
422 next_boundary, _array->_end));
423 if (addr >= gsp()->top()) return gsp()->top();
424 while (next_boundary < addr) {
425 while (n <= next_boundary) {
426 q = n;
427 oop obj = oop(q);
428 if (obj->klass_or_null() == NULL) return q;
429 n += block_size(q);
430 }
431 assert(q <= next_boundary && n > next_boundary, "Consequence of loop");
432 // [q, n) is the block that crosses the boundary.
433 alloc_block_work2(&next_boundary, &next_index, q, n);
434 }
435 return forward_to_block_containing_addr_const(q, n, addr);
436 }
437
438 HeapWord* G1BlockOffsetArray::block_start_careful(const void* addr) const {
439 assert(_array->offset_array(0) == 0, "objects can't cross covered areas");
440
441 assert(_bottom <= addr && addr < _end,
442 "addr must be covered by this Array");
443 // Must read this exactly once because it can be modified by parallel
444 // allocation.
445 HeapWord* ub = _unallocated_block;
446 if (BlockOffsetArrayUseUnallocatedBlock && addr >= ub) {
447 assert(ub < _end, "tautology (see above)");
448 return ub;
449 }
|