1 /* 2 * Copyright (c) 2010, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 // Forward declarations 26 class CompactibleFreeListSpace; 27 28 class PromotedObject VALUE_OBJ_CLASS_SPEC { 29 private: 30 enum { 31 promoted_mask = right_n_bits(2), // i.e. 0x3 32 displaced_mark = nth_bit(2), // i.e. 0x4 33 next_mask = ~(right_n_bits(3)) // i.e. ~(0x7) 34 }; 35 36 // Below, we want _narrow_next in the "higher" 32 bit slot, 37 // whose position will depend on endian-ness of the platform. 38 // This is so that there is no interference with the 39 // cms_free_bit occupying bit position 7 (lsb == 0) 40 // when we are using compressed oops; see FreeChunk::isFree(). 41 // We cannot move the cms_free_bit down because currently 42 // biased locking code assumes that age bits are contiguous 43 // with the lock bits. Even if that assumption were relaxed, 44 // the least position we could move this bit to would be 45 // to bit position 3, which would require 16 byte alignment. 46 typedef struct { 47 #ifdef VM_LITTLE_ENDIAN 48 LP64_ONLY(narrowOop _pad;) 49 narrowOop _narrow_next; 50 #else 51 narrowOop _narrow_next; 52 LP64_ONLY(narrowOop _pad;) 53 #endif 54 } Data; 55 56 union { 57 intptr_t _next; 58 Data _data; 59 }; 60 public: 61 inline PromotedObject* next() const { 62 assert(!((FreeChunk*)this)->isFree(), "Error"); 63 PromotedObject* res; 64 if (UseCompressedOops) { 65 // The next pointer is a compressed oop stored in the top 32 bits 66 res = (PromotedObject*)oopDesc::decode_heap_oop(_data._narrow_next); 67 } else { 68 res = (PromotedObject*)(_next & next_mask); 69 } 70 assert(oop(res)->is_oop_or_null(true /* ignore mark word */), "Not an oop?"); 71 return res; 72 } 73 inline void setNext(PromotedObject* x) { 74 assert(((intptr_t)x & ~next_mask) == 0, "Conflict in bit usage, " 75 "or insufficient alignment of objects"); 76 if (UseCompressedOops) { 77 assert(_data._narrow_next == 0, "Overwrite?"); 78 _data._narrow_next = oopDesc::encode_heap_oop(oop(x)); 79 } else { 80 _next |= (intptr_t)x; 81 } 82 assert(!((FreeChunk*)this)->isFree(), "Error"); 83 } 84 inline void setPromotedMark() { 85 _next |= promoted_mask; 86 assert(!((FreeChunk*)this)->isFree(), "Error"); 87 } 88 inline bool hasPromotedMark() const { 89 assert(!((FreeChunk*)this)->isFree(), "Error"); 90 return (_next & promoted_mask) == promoted_mask; 91 } 92 inline void setDisplacedMark() { 93 _next |= displaced_mark; 94 assert(!((FreeChunk*)this)->isFree(), "Error"); 95 } 96 inline bool hasDisplacedMark() const { 97 assert(!((FreeChunk*)this)->isFree(), "Error"); 98 return (_next & displaced_mark) != 0; 99 } 100 inline void clearNext() { 101 _next = 0; 102 assert(!((FreeChunk*)this)->isFree(), "Error"); 103 } 104 debug_only(void *next_addr() { return (void *) &_next; }) 105 }; 106 107 class SpoolBlock: public FreeChunk { 108 friend class PromotionInfo; 109 protected: 110 SpoolBlock* nextSpoolBlock; 111 size_t bufferSize; // number of usable words in this block 112 markOop* displacedHdr; // the displaced headers start here 113 114 // Note about bufferSize: it denotes the number of entries available plus 1; 115 // legal indices range from 1 through BufferSize - 1. See the verification 116 // code verify() that counts the number of displaced headers spooled. 117 size_t computeBufferSize() { 118 return (size() * sizeof(HeapWord) - sizeof(*this)) / sizeof(markOop); 119 } 120 121 public: 122 void init() { 123 bufferSize = computeBufferSize(); 124 displacedHdr = (markOop*)&displacedHdr; 125 nextSpoolBlock = NULL; 126 } 127 128 void print_on(outputStream* st) const; 129 void print() const { print_on(gclog_or_tty); } 130 }; 131 132 class PromotionInfo VALUE_OBJ_CLASS_SPEC { 133 bool _tracking; // set if tracking 134 CompactibleFreeListSpace* _space; // the space to which this belongs 135 PromotedObject* _promoHead; // head of list of promoted objects 136 PromotedObject* _promoTail; // tail of list of promoted objects 137 SpoolBlock* _spoolHead; // first spooling block 138 SpoolBlock* _spoolTail; // last non-full spooling block or null 139 SpoolBlock* _splice_point; // when _spoolTail is null, holds list tail 140 SpoolBlock* _spareSpool; // free spool buffer 141 size_t _firstIndex; // first active index in 142 // first spooling block (_spoolHead) 143 size_t _nextIndex; // last active index + 1 in last 144 // spooling block (_spoolTail) 145 private: 146 // ensure that spooling space exists; return true if there is spooling space 147 bool ensure_spooling_space_work(); 148 149 public: 150 PromotionInfo() : 151 _tracking(0), _space(NULL), 152 _promoHead(NULL), _promoTail(NULL), 153 _spoolHead(NULL), _spoolTail(NULL), 154 _spareSpool(NULL), _firstIndex(1), 155 _nextIndex(1) {} 156 157 bool noPromotions() const { 158 assert(_promoHead != NULL || _promoTail == NULL, "list inconsistency"); 159 return _promoHead == NULL; 160 } 161 void startTrackingPromotions(); 162 void stopTrackingPromotions(uint worker_id = 0); 163 bool tracking() const { return _tracking; } 164 void track(PromotedObject* trackOop); // keep track of a promoted oop 165 // The following variant must be used when trackOop is not fully 166 // initialized and has a NULL klass: 167 void track(PromotedObject* trackOop, klassOop klassOfOop); // keep track of a promoted oop 168 void setSpace(CompactibleFreeListSpace* sp) { _space = sp; } 169 CompactibleFreeListSpace* space() const { return _space; } 170 markOop nextDisplacedHeader(); // get next header & forward spool pointer 171 void saveDisplacedHeader(markOop hdr); 172 // save header and forward spool 173 174 inline size_t refillSize() const; 175 176 SpoolBlock* getSpoolBlock(); // return a free spooling block 177 inline bool has_spooling_space() { 178 return _spoolTail != NULL && _spoolTail->bufferSize > _nextIndex; 179 } 180 // ensure that spooling space exists 181 bool ensure_spooling_space() { 182 return has_spooling_space() || ensure_spooling_space_work(); 183 } 184 #define PROMOTED_OOPS_ITERATE_DECL(OopClosureType, nv_suffix) \ 185 void promoted_oops_iterate##nv_suffix(OopClosureType* cl); 186 ALL_SINCE_SAVE_MARKS_CLOSURES(PROMOTED_OOPS_ITERATE_DECL) 187 #undef PROMOTED_OOPS_ITERATE_DECL 188 void promoted_oops_iterate(OopsInGenClosure* cl) { 189 promoted_oops_iterate_v(cl); 190 } 191 void verify() const; 192 void reset() { 193 _promoHead = NULL; 194 _promoTail = NULL; 195 _spoolHead = NULL; 196 _spoolTail = NULL; 197 _spareSpool = NULL; 198 _firstIndex = 0; 199 _nextIndex = 0; 200 201 } 202 203 void print_on(outputStream* st) const; 204 void print_statistics(uint worker_id) const; 205 }; 206