1 /* 2 * Copyright (c) 2001, 2014, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #ifndef SHARE_VM_GC_IMPLEMENTATION_G1_G1BLOCKOFFSETTABLE_INLINE_HPP 26 #define SHARE_VM_GC_IMPLEMENTATION_G1_G1BLOCKOFFSETTABLE_INLINE_HPP 27 28 #include "gc_implementation/g1/g1BlockOffsetTable.hpp" 29 #include "memory/space.hpp" 30 31 inline HeapWord* G1BlockOffsetTable::block_start(const void* addr) { 32 if (addr >= _bottom && addr < _end) { 33 return block_start_unsafe(addr); 34 } else { 35 return NULL; 36 } 37 } 38 39 inline HeapWord* 40 G1BlockOffsetTable::block_start_const(const void* addr) const { 41 if (addr >= _bottom && addr < _end) { 42 return block_start_unsafe_const(addr); 43 } else { 44 return NULL; 45 } 46 } 47 48 inline size_t G1BlockOffsetSharedArray::index_for(const void* p) const { 49 char* pc = (char*)p; 50 assert(pc >= (char*)_reserved.start() && 51 pc < (char*)_reserved.end(), 52 err_msg("p (" PTR_FORMAT ") not in reserved [" PTR_FORMAT ", " PTR_FORMAT ")", 53 p2i(p), p2i(_reserved.start()), p2i(_reserved.end()))); 54 size_t delta = pointer_delta(pc, _reserved.start(), sizeof(char)); 55 size_t result = delta >> LogN; 56 check_index(result, "bad index from address"); 57 return result; 58 } 59 60 inline HeapWord* 61 G1BlockOffsetSharedArray::address_for_index(size_t index) const { 62 check_index(index, "index out of range"); 63 HeapWord* result = _reserved.start() + (index << LogN_words); 64 assert(result >= _reserved.start() && result < _reserved.end(), 65 err_msg("bad address from index result " PTR_FORMAT 66 " _reserved.start() " PTR_FORMAT " _reserved.end() " 67 PTR_FORMAT, 68 p2i(result), p2i(_reserved.start()), p2i(_reserved.end()))); 69 return result; 70 } 71 72 inline HeapWord* 73 G1BlockOffsetArray::block_at_or_preceding(const void* addr, 74 bool has_max_index, 75 size_t max_index) const { 76 assert(_array->offset_array(0) == 0, "objects can't cross covered areas"); 77 size_t index = _array->index_for(addr); 78 // We must make sure that the offset table entry we use is valid. If 79 // "addr" is past the end, start at the last known one and go forward. 80 if (has_max_index) { 81 index = MIN2(index, max_index); 82 } 83 HeapWord* q = _array->address_for_index(index); 84 85 uint offset = _array->offset_array(index); // Extend u_char to uint. 86 while (offset >= N_words) { 87 // The excess of the offset from N_words indicates a power of Base 88 // to go back by. 89 size_t n_cards_back = BlockOffsetArray::entry_to_cards_back(offset); 90 q -= (N_words * n_cards_back); 91 assert(q >= _sp->bottom(), "Went below bottom!"); 92 index -= n_cards_back; 93 offset = _array->offset_array(index); 94 } 95 assert(offset < N_words, "offset too large"); 96 q -= offset; 97 return q; 98 } 99 100 inline HeapWord* 101 G1BlockOffsetArray:: 102 forward_to_block_containing_addr_const(HeapWord* q, HeapWord* n, 103 const void* addr) const { 104 if (csp() != NULL) { 105 if (addr >= csp()->top()) return csp()->top(); 106 while (n <= addr) { 107 q = n; 108 oop obj = oop(q); 109 if (obj->klass_or_null() == NULL) return q; 110 n += obj->size(); 111 } 112 } else { 113 while (n <= addr) { 114 q = n; 115 oop obj = oop(q); 116 if (obj->klass_or_null() == NULL) return q; 117 n += _sp->block_size(q); 118 } 119 } 120 assert(q <= n, "wrong order for q and addr"); 121 assert(addr < n, "wrong order for addr and n"); 122 return q; 123 } 124 125 inline HeapWord* 126 G1BlockOffsetArray::forward_to_block_containing_addr(HeapWord* q, 127 const void* addr) { 128 if (oop(q)->klass_or_null() == NULL) return q; 129 HeapWord* n = q + _sp->block_size(q); 130 // In the normal case, where the query "addr" is a card boundary, and the 131 // offset table chunks are the same size as cards, the block starting at 132 // "q" will contain addr, so the test below will fail, and we'll fall 133 // through quickly. 134 if (n <= addr) { 135 q = forward_to_block_containing_addr_slow(q, n, addr); 136 } 137 assert(q <= addr, "wrong order for current and arg"); 138 return q; 139 } 140 141 ////////////////////////////////////////////////////////////////////////// 142 // BlockOffsetArrayNonContigSpace inlines 143 ////////////////////////////////////////////////////////////////////////// 144 inline void G1BlockOffsetArray::freed(HeapWord* blk_start, HeapWord* blk_end) { 145 // Verify that the BOT shows [blk_start, blk_end) to be one block. 146 verify_single_block(blk_start, blk_end); 147 // adjust _unallocated_block upward or downward 148 // as appropriate 149 if (BlockOffsetArrayUseUnallocatedBlock) { 150 assert(_unallocated_block <= _end, 151 "Inconsistent value for _unallocated_block"); 152 if (blk_end >= _unallocated_block && blk_start <= _unallocated_block) { 153 // CMS-specific note: a block abutting _unallocated_block to 154 // its left is being freed, a new block is being added or 155 // we are resetting following a compaction 156 _unallocated_block = blk_start; 157 } 158 } 159 } 160 161 inline void G1BlockOffsetArray::freed(HeapWord* blk, size_t size) { 162 freed(blk, blk + size); 163 } 164 165 #endif // SHARE_VM_GC_IMPLEMENTATION_G1_G1BLOCKOFFSETTABLE_INLINE_HPP