1 #ifdef USE_PRAGMA_IDENT_SRC 2 #pragma ident "@(#)mutableSpace.cpp 1.22 07/05/05 17:05:35 JVM" 3 #endif 4 /* 5 * Copyright 2001-2008 Sun Microsystems, Inc. All Rights Reserved. 6 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 7 * 8 * This code is free software; you can redistribute it and/or modify it 9 * under the terms of the GNU General Public License version 2 only, as 10 * published by the Free Software Foundation. 11 * 12 * This code is distributed in the hope that it will be useful, but WITHOUT 13 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 14 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 15 * version 2 for more details (a copy is included in the LICENSE file that 16 * accompanied this code). 17 * 18 * You should have received a copy of the GNU General Public License version 19 * 2 along with this work; if not, write to the Free Software Foundation, 20 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 21 * 22 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, 23 * CA 95054 USA or visit www.sun.com if you need additional information or 24 * have any questions. 25 * 26 */ 27 28 # include "incls/_precompiled.incl" 29 # include "incls/_mutableSpace.cpp.incl" 30 31 MutableSpace::MutableSpace(): ImmutableSpace(), _top(NULL) { 32 _mangler = new MutableSpaceMangler(this); 33 } 34 35 MutableSpace::~MutableSpace() { 36 delete _mangler; 37 } 38 39 void MutableSpace::initialize(MemRegion mr, 40 bool clear_space, 41 bool mangle_space) { 42 HeapWord* bottom = mr.start(); 43 HeapWord* end = mr.end(); 44 45 assert(Universe::on_page_boundary(bottom) && Universe::on_page_boundary(end), 46 "invalid space boundaries"); 47 set_bottom(bottom); 48 set_end(end); 49 50 if (clear_space) { 51 clear(mangle_space); 52 } 53 } 54 55 void MutableSpace::clear(bool mangle_space) { 56 set_top(bottom()); 57 if (ZapUnusedHeapArea && mangle_space) { 58 mangle_unused_area(); 59 } 60 } 61 62 #ifndef PRODUCT 63 void MutableSpace::check_mangled_unused_area(HeapWord* limit) { 64 mangler()->check_mangled_unused_area(limit); 65 } 66 67 void MutableSpace::check_mangled_unused_area_complete() { 68 mangler()->check_mangled_unused_area_complete(); 69 } 70 71 // Mangle only the unused space that has not previously 72 // been mangled and that has not been allocated since being 73 // mangled. 74 void MutableSpace::mangle_unused_area() { 75 mangler()->mangle_unused_area(); 76 } 77 78 void MutableSpace::mangle_unused_area_complete() { 79 mangler()->mangle_unused_area_complete(); 80 } 81 82 void MutableSpace::mangle_region(MemRegion mr) { 83 SpaceMangler::mangle_region(mr); 84 } 85 86 void MutableSpace::set_top_for_allocations(HeapWord* v) { 87 mangler()->set_top_for_allocations(v); 88 } 89 90 void MutableSpace::set_top_for_allocations() { 91 mangler()->set_top_for_allocations(top()); 92 } 93 #endif 94 95 // This version requires locking. */ 96 HeapWord* MutableSpace::allocate(size_t size) { 97 assert(Heap_lock->owned_by_self() || 98 (SafepointSynchronize::is_at_safepoint() && 99 Thread::current()->is_VM_thread()), 100 "not locked"); 101 HeapWord* obj = top(); 102 if (pointer_delta(end(), obj) >= size) { 103 HeapWord* new_top = obj + size; 104 set_top(new_top); 105 assert(is_object_aligned((intptr_t)obj) && is_object_aligned((intptr_t)new_top), 106 "checking alignment"); 107 return obj; 108 } else { 109 return NULL; 110 } 111 } 112 113 // This version is lock-free. 114 HeapWord* MutableSpace::cas_allocate(size_t size) { 115 do { 116 HeapWord* obj = top(); 117 if (pointer_delta(end(), obj) >= size) { 118 HeapWord* new_top = obj + size; 119 HeapWord* result = (HeapWord*)Atomic::cmpxchg_ptr(new_top, top_addr(), obj); 120 // result can be one of two: 121 // the old top value: the exchange succeeded 122 // otherwise: the new value of the top is returned. 123 if (result != obj) { 124 continue; // another thread beat us to the allocation, try again 125 } 126 assert(is_object_aligned((intptr_t)obj) && is_object_aligned((intptr_t)new_top), 127 "checking alignment"); 128 return obj; 129 } else { 130 return NULL; 131 } 132 } while (true); 133 } 134 135 // Try to deallocate previous allocation. Returns true upon success. 136 bool MutableSpace::cas_deallocate(HeapWord *obj, size_t size) { 137 HeapWord* expected_top = obj + size; 138 return (HeapWord*)Atomic::cmpxchg_ptr(obj, top_addr(), expected_top) == expected_top; 139 } 140 141 void MutableSpace::oop_iterate(OopClosure* cl) { 142 HeapWord* obj_addr = bottom(); 143 HeapWord* t = top(); 144 // Could call objects iterate, but this is easier. 145 while (obj_addr < t) { 146 obj_addr += oop(obj_addr)->oop_iterate(cl); 147 } 148 } 149 150 void MutableSpace::object_iterate(ObjectClosure* cl) { 151 HeapWord* p = bottom(); 152 while (p < top()) { 153 cl->do_object(oop(p)); 154 p += oop(p)->size(); 155 } 156 } 157 158 void MutableSpace::print_short() const { print_short_on(tty); } 159 void MutableSpace::print_short_on( outputStream* st) const { 160 st->print(" space " SIZE_FORMAT "K, %d%% used", capacity_in_bytes() / K, 161 (int) ((double) used_in_bytes() * 100 / capacity_in_bytes())); 162 } 163 164 void MutableSpace::print() const { print_on(tty); } 165 void MutableSpace::print_on(outputStream* st) const { 166 MutableSpace::print_short_on(st); 167 st->print_cr(" [" INTPTR_FORMAT "," INTPTR_FORMAT "," INTPTR_FORMAT ")", 168 bottom(), top(), end()); 169 } 170 171 void MutableSpace::verify(bool allow_dirty) { 172 HeapWord* p = bottom(); 173 HeapWord* t = top(); 174 HeapWord* prev_p = NULL; 175 while (p < t) { 176 oop(p)->verify(); 177 prev_p = p; 178 p += oop(p)->size(); 179 } 180 guarantee(p == top(), "end of last object must match end of space"); 181 }