100 inline HeapWord* G1ContiguousSpace::par_allocate(size_t min_word_size,
101 size_t desired_word_size,
102 size_t* actual_size) {
103 MutexLocker x(&_par_alloc_lock);
104 return allocate(min_word_size, desired_word_size, actual_size);
105 }
106
107 inline HeapWord* G1ContiguousSpace::block_start(const void* p) {
108 return _bot_part.block_start(p);
109 }
110
111 inline HeapWord*
112 G1ContiguousSpace::block_start_const(const void* p) const {
113 return _bot_part.block_start_const(p);
114 }
115
116 inline bool HeapRegion::is_obj_dead_with_size(const oop obj, G1CMBitMapRO* prev_bitmap, size_t* size) const {
117 HeapWord* addr = (HeapWord*) obj;
118
119 assert(addr < top(), "must be");
120 assert(!is_archive(), "Archive regions should not have references into interesting regions.");
121 assert(!is_humongous(), "Humongous objects not handled here");
122 bool obj_is_dead = is_obj_dead(obj, prev_bitmap);
123
124 if (ClassUnloadingWithConcurrentMark && obj_is_dead) {
125 assert(!block_is_obj(addr), "must be");
126 *size = block_size_using_bitmap(addr, prev_bitmap);
127 } else {
128 assert(block_is_obj(addr), "must be");
129 *size = obj->size();
130 }
131 return obj_is_dead;
132 }
133
134 inline bool
135 HeapRegion::block_is_obj(const HeapWord* p) const {
136 G1CollectedHeap* g1h = G1CollectedHeap::heap();
137
138 if (!this->is_in(p)) {
139 assert(is_continues_humongous(), "This case can only happen for humongous regions");
140 return (p == humongous_start_region()->bottom());
145 return p < top();
146 }
147
148 inline size_t HeapRegion::block_size_using_bitmap(const HeapWord* addr, const G1CMBitMapRO* prev_bitmap) const {
149 assert(ClassUnloadingWithConcurrentMark,
150 "All blocks should be objects if class unloading isn't used, so this method should not be called. "
151 "HR: [" PTR_FORMAT ", " PTR_FORMAT ", " PTR_FORMAT ") "
152 "addr: " PTR_FORMAT,
153 p2i(bottom()), p2i(top()), p2i(end()), p2i(addr));
154
155 // Old regions' dead objects may have dead classes
156 // We need to find the next live object using the bitmap
157 HeapWord* next = prev_bitmap->getNextMarkedWordAddress(addr, prev_top_at_mark_start());
158
159 assert(next > addr, "must get the next live object");
160 return pointer_delta(next, addr);
161 }
162
163 inline bool HeapRegion::is_obj_dead(const oop obj, const G1CMBitMapRO* prev_bitmap) const {
164 assert(is_in_reserved(obj), "Object " PTR_FORMAT " must be in region", p2i(obj));
165 return !obj_allocated_since_prev_marking(obj) && !prev_bitmap->isMarked((HeapWord*)obj);
166 }
167
168 inline size_t HeapRegion::block_size(const HeapWord *addr) const {
169 if (addr == top()) {
170 return pointer_delta(end(), addr);
171 }
172
173 if (block_is_obj(addr)) {
174 return oop(addr)->size();
175 }
176
177 return block_size_using_bitmap(addr, G1CollectedHeap::heap()->concurrent_mark()->prevMarkBitMap());
178 }
179
180 inline HeapWord* HeapRegion::par_allocate_no_bot_updates(size_t min_word_size,
181 size_t desired_word_size,
182 size_t* actual_word_size) {
183 assert(is_young(), "we can only skip BOT updates on young regions");
184 return par_allocate_impl(min_word_size, desired_word_size, actual_word_size);
185 }
|
100 inline HeapWord* G1ContiguousSpace::par_allocate(size_t min_word_size,
101 size_t desired_word_size,
102 size_t* actual_size) {
103 MutexLocker x(&_par_alloc_lock);
104 return allocate(min_word_size, desired_word_size, actual_size);
105 }
106
107 inline HeapWord* G1ContiguousSpace::block_start(const void* p) {
108 return _bot_part.block_start(p);
109 }
110
111 inline HeapWord*
112 G1ContiguousSpace::block_start_const(const void* p) const {
113 return _bot_part.block_start_const(p);
114 }
115
116 inline bool HeapRegion::is_obj_dead_with_size(const oop obj, G1CMBitMapRO* prev_bitmap, size_t* size) const {
117 HeapWord* addr = (HeapWord*) obj;
118
119 assert(addr < top(), "must be");
120 assert(!is_closed_archive(),
121 "Closed archive regions should not have references into other regions");
122 assert(!is_humongous(), "Humongous objects not handled here");
123 bool obj_is_dead = is_obj_dead(obj, prev_bitmap);
124
125 if (ClassUnloadingWithConcurrentMark && obj_is_dead) {
126 assert(!block_is_obj(addr), "must be");
127 *size = block_size_using_bitmap(addr, prev_bitmap);
128 } else {
129 assert(block_is_obj(addr), "must be");
130 *size = obj->size();
131 }
132 return obj_is_dead;
133 }
134
135 inline bool
136 HeapRegion::block_is_obj(const HeapWord* p) const {
137 G1CollectedHeap* g1h = G1CollectedHeap::heap();
138
139 if (!this->is_in(p)) {
140 assert(is_continues_humongous(), "This case can only happen for humongous regions");
141 return (p == humongous_start_region()->bottom());
146 return p < top();
147 }
148
149 inline size_t HeapRegion::block_size_using_bitmap(const HeapWord* addr, const G1CMBitMapRO* prev_bitmap) const {
150 assert(ClassUnloadingWithConcurrentMark,
151 "All blocks should be objects if class unloading isn't used, so this method should not be called. "
152 "HR: [" PTR_FORMAT ", " PTR_FORMAT ", " PTR_FORMAT ") "
153 "addr: " PTR_FORMAT,
154 p2i(bottom()), p2i(top()), p2i(end()), p2i(addr));
155
156 // Old regions' dead objects may have dead classes
157 // We need to find the next live object using the bitmap
158 HeapWord* next = prev_bitmap->getNextMarkedWordAddress(addr, prev_top_at_mark_start());
159
160 assert(next > addr, "must get the next live object");
161 return pointer_delta(next, addr);
162 }
163
164 inline bool HeapRegion::is_obj_dead(const oop obj, const G1CMBitMapRO* prev_bitmap) const {
165 assert(is_in_reserved(obj), "Object " PTR_FORMAT " must be in region", p2i(obj));
166 return !obj_allocated_since_prev_marking(obj) &&
167 !prev_bitmap->isMarked((HeapWord*)obj) &&
168 !is_open_archive();
169 }
170
171 inline size_t HeapRegion::block_size(const HeapWord *addr) const {
172 if (addr == top()) {
173 return pointer_delta(end(), addr);
174 }
175
176 if (block_is_obj(addr)) {
177 return oop(addr)->size();
178 }
179
180 return block_size_using_bitmap(addr, G1CollectedHeap::heap()->concurrent_mark()->prevMarkBitMap());
181 }
182
183 inline HeapWord* HeapRegion::par_allocate_no_bot_updates(size_t min_word_size,
184 size_t desired_word_size,
185 size_t* actual_word_size) {
186 assert(is_young(), "we can only skip BOT updates on young regions");
187 return par_allocate_impl(min_word_size, desired_word_size, actual_word_size);
188 }
|