1 /* 2 * Copyright (c) 2001, 2010, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #ifndef SHARE_VM_GC_IMPLEMENTATION_G1_G1BLOCKOFFSETTABLE_INLINE_HPP 26 #define SHARE_VM_GC_IMPLEMENTATION_G1_G1BLOCKOFFSETTABLE_INLINE_HPP 27 28 #include "gc_implementation/g1/g1BlockOffsetTable.hpp" 29 #include "memory/space.hpp" 30 31 inline HeapWord* G1BlockOffsetTable::block_start(const void* addr) { 32 if (addr >= _bottom && addr < _end) { 33 return block_start_unsafe(addr); 34 } else { 35 return NULL; 36 } 37 } 38 39 inline HeapWord* 40 G1BlockOffsetTable::block_start_const(const void* addr) const { 41 if (addr >= _bottom && addr < _end) { 42 return block_start_unsafe_const(addr); 43 } else { 44 return NULL; 45 } 46 } 47 48 inline size_t G1BlockOffsetSharedArray::index_for(const void* p) const { 49 char* pc = (char*)p; 50 assert(pc >= (char*)_reserved.start() && 51 pc < (char*)_reserved.end(), 52 "p not in range."); 53 size_t delta = pointer_delta(pc, _reserved.start(), sizeof(char)); 54 size_t result = delta >> LogN; 55 assert(result < _vs.committed_size(), "bad index from address"); 56 return result; 57 } 58 59 inline HeapWord* 60 G1BlockOffsetSharedArray::address_for_index(size_t index) const { 61 assert(index < _vs.committed_size(), "bad index"); 62 HeapWord* result = _reserved.start() + (index << LogN_words); 63 assert(result >= _reserved.start() && result < _reserved.end(), 64 "bad address from index"); 65 return result; 66 } 67 68 inline HeapWord* 69 G1BlockOffsetArray::block_at_or_preceding(const void* addr, 70 bool has_max_index, 71 size_t max_index) const { 72 assert(_array->offset_array(0) == 0, "objects can't cross covered areas"); 73 size_t index = _array->index_for(addr); 74 // We must make sure that the offset table entry we use is valid. If 75 // "addr" is past the end, start at the last known one and go forward. 76 if (has_max_index) { 77 index = MIN2(index, max_index); 78 } 79 HeapWord* q = _array->address_for_index(index); 80 81 uint offset = _array->offset_array(index); // Extend u_char to uint. 82 while (offset >= N_words) { 83 // The excess of the offset from N_words indicates a power of Base 84 // to go back by. 85 size_t n_cards_back = BlockOffsetArray::entry_to_cards_back(offset); 86 q -= (N_words * n_cards_back); 87 assert(q >= _sp->bottom(), "Went below bottom!"); 88 index -= n_cards_back; 89 offset = _array->offset_array(index); 90 } 91 assert(offset < N_words, "offset too large"); 92 q -= offset; 93 return q; 94 } 95 96 inline HeapWord* 97 G1BlockOffsetArray:: 98 forward_to_block_containing_addr_const(HeapWord* q, HeapWord* n, 99 const void* addr) const { 100 if (csp() != NULL) { 101 if (addr >= csp()->top()) return csp()->top(); 102 while (n <= addr) { 103 q = n; 104 oop obj = oop(q); 105 if (obj->klass_or_null() == NULL) return q; 106 n += obj->size(); 107 } 108 } else { 109 while (n <= addr) { 110 q = n; 111 oop obj = oop(q); 112 if (obj->klass_or_null() == NULL) return q; 113 n += _sp->block_size(q); 114 } 115 } 116 assert(q <= n, "wrong order for q and addr"); 117 assert(addr < n, "wrong order for addr and n"); 118 return q; 119 } 120 121 inline HeapWord* 122 G1BlockOffsetArray::forward_to_block_containing_addr(HeapWord* q, 123 const void* addr) { 124 if (oop(q)->klass_or_null() == NULL) return q; 125 HeapWord* n = q + _sp->block_size(q); 126 // In the normal case, where the query "addr" is a card boundary, and the 127 // offset table chunks are the same size as cards, the block starting at 128 // "q" will contain addr, so the test below will fail, and we'll fall 129 // through quickly. 130 if (n <= addr) { 131 q = forward_to_block_containing_addr_slow(q, n, addr); 132 } 133 assert(q <= addr, "wrong order for current and arg"); 134 return q; 135 } 136 137 ////////////////////////////////////////////////////////////////////////// 138 // BlockOffsetArrayNonContigSpace inlines 139 ////////////////////////////////////////////////////////////////////////// 140 inline void G1BlockOffsetArray::freed(HeapWord* blk_start, HeapWord* blk_end) { 141 // Verify that the BOT shows [blk_start, blk_end) to be one block. 142 verify_single_block(blk_start, blk_end); 143 // adjust _unallocated_block upward or downward 144 // as appropriate 145 if (BlockOffsetArrayUseUnallocatedBlock) { 146 assert(_unallocated_block <= _end, 147 "Inconsistent value for _unallocated_block"); 148 if (blk_end >= _unallocated_block && blk_start <= _unallocated_block) { 149 // CMS-specific note: a block abutting _unallocated_block to 150 // its left is being freed, a new block is being added or 151 // we are resetting following a compaction 152 _unallocated_block = blk_start; 153 } 154 } 155 } 156 157 inline void G1BlockOffsetArray::freed(HeapWord* blk, size_t size) { 158 freed(blk, blk + size); 159 } 160 161 #endif // SHARE_VM_GC_IMPLEMENTATION_G1_G1BLOCKOFFSETTABLE_INLINE_HPP