Print this page
Split |
Split |
Close |
Expand all |
Collapse all |
--- old/src/share/vm/gc/g1/heapRegion.cpp
+++ new/src/share/vm/gc/g1/heapRegion.cpp
1 1 /*
2 2 * Copyright (c) 2001, 2015, Oracle and/or its affiliates. All rights reserved.
3 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 4 *
5 5 * This code is free software; you can redistribute it and/or modify it
6 6 * under the terms of the GNU General Public License version 2 only, as
7 7 * published by the Free Software Foundation.
8 8 *
9 9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 12 * version 2 for more details (a copy is included in the LICENSE file that
13 13 * accompanied this code).
14 14 *
15 15 * You should have received a copy of the GNU General Public License version
16 16 * 2 along with this work; if not, write to the Free Software Foundation,
17 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 18 *
19 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 20 * or visit www.oracle.com if you need additional information or have any
21 21 * questions.
22 22 *
23 23 */
24 24
25 25 #include "precompiled.hpp"
26 26 #include "code/nmethod.hpp"
27 27 #include "gc/g1/g1BlockOffsetTable.inline.hpp"
28 28 #include "gc/g1/g1CollectedHeap.inline.hpp"
29 29 #include "gc/g1/g1OopClosures.inline.hpp"
30 30 #include "gc/g1/heapRegion.inline.hpp"
31 31 #include "gc/g1/heapRegionBounds.inline.hpp"
32 32 #include "gc/g1/heapRegionManager.inline.hpp"
33 33 #include "gc/g1/heapRegionRemSet.hpp"
34 34 #include "gc/shared/genOopClosures.inline.hpp"
35 35 #include "gc/shared/liveRange.hpp"
36 36 #include "gc/shared/space.inline.hpp"
37 37 #include "memory/iterator.hpp"
38 38 #include "oops/oop.inline.hpp"
39 39 #include "runtime/atomic.inline.hpp"
40 40 #include "runtime/orderAccess.inline.hpp"
41 41
42 42 int HeapRegion::LogOfHRGrainBytes = 0;
43 43 int HeapRegion::LogOfHRGrainWords = 0;
44 44 size_t HeapRegion::GrainBytes = 0;
45 45 size_t HeapRegion::GrainWords = 0;
46 46 size_t HeapRegion::CardsPerRegion = 0;
47 47
48 48 HeapRegionDCTOC::HeapRegionDCTOC(G1CollectedHeap* g1,
49 49 HeapRegion* hr,
50 50 G1ParPushHeapRSClosure* cl,
51 51 CardTableModRefBS::PrecisionStyle precision) :
52 52 DirtyCardToOopClosure(hr, cl, precision, NULL),
53 53 _hr(hr), _rs_scan(cl), _g1(g1) { }
54 54
55 55 FilterOutOfRegionClosure::FilterOutOfRegionClosure(HeapRegion* r,
56 56 OopClosure* oc) :
57 57 _r_bottom(r->bottom()), _r_end(r->end()), _oc(oc) { }
58 58
59 59 void HeapRegionDCTOC::walk_mem_region(MemRegion mr,
60 60 HeapWord* bottom,
61 61 HeapWord* top) {
62 62 G1CollectedHeap* g1h = _g1;
63 63 size_t oop_size;
64 64 HeapWord* cur = bottom;
65 65
66 66 // Start filtering what we add to the remembered set. If the object is
67 67 // not considered dead, either because it is marked (in the mark bitmap)
68 68 // or it was allocated after marking finished, then we add it. Otherwise
69 69 // we can safely ignore the object.
70 70 if (!g1h->is_obj_dead(oop(cur))) {
71 71 oop_size = oop(cur)->oop_iterate_size(_rs_scan, mr);
72 72 } else {
73 73 oop_size = _hr->block_size(cur);
74 74 }
75 75
76 76 cur += oop_size;
77 77
78 78 if (cur < top) {
79 79 oop cur_oop = oop(cur);
80 80 oop_size = _hr->block_size(cur);
81 81 HeapWord* next_obj = cur + oop_size;
82 82 while (next_obj < top) {
83 83 // Keep filtering the remembered set.
84 84 if (!g1h->is_obj_dead(cur_oop)) {
85 85 // Bottom lies entirely below top, so we can call the
86 86 // non-memRegion version of oop_iterate below.
87 87 cur_oop->oop_iterate(_rs_scan);
88 88 }
89 89 cur = next_obj;
90 90 cur_oop = oop(cur);
91 91 oop_size = _hr->block_size(cur);
92 92 next_obj = cur + oop_size;
93 93 }
94 94
95 95 // Last object. Need to do dead-obj filtering here too.
96 96 if (!g1h->is_obj_dead(oop(cur))) {
97 97 oop(cur)->oop_iterate(_rs_scan, mr);
98 98 }
99 99 }
100 100 }
101 101
102 102 size_t HeapRegion::max_region_size() {
103 103 return HeapRegionBounds::max_size();
104 104 }
105 105
106 106 size_t HeapRegion::min_region_size_in_words() {
107 107 return HeapRegionBounds::min_size() >> LogHeapWordSize;
108 108 }
109 109
110 110 void HeapRegion::setup_heap_region_size(size_t initial_heap_size, size_t max_heap_size) {
111 111 size_t region_size = G1HeapRegionSize;
112 112 if (FLAG_IS_DEFAULT(G1HeapRegionSize)) {
113 113 size_t average_heap_size = (initial_heap_size + max_heap_size) / 2;
114 114 region_size = MAX2(average_heap_size / HeapRegionBounds::target_number(),
115 115 HeapRegionBounds::min_size());
116 116 }
117 117
118 118 int region_size_log = log2_long((jlong) region_size);
119 119 // Recalculate the region size to make sure it's a power of
120 120 // 2. This means that region_size is the largest power of 2 that's
121 121 // <= what we've calculated so far.
122 122 region_size = ((size_t)1 << region_size_log);
123 123
124 124 // Now make sure that we don't go over or under our limits.
125 125 if (region_size < HeapRegionBounds::min_size()) {
126 126 region_size = HeapRegionBounds::min_size();
127 127 } else if (region_size > HeapRegionBounds::max_size()) {
128 128 region_size = HeapRegionBounds::max_size();
129 129 }
130 130
131 131 // And recalculate the log.
132 132 region_size_log = log2_long((jlong) region_size);
133 133
134 134 // Now, set up the globals.
135 135 guarantee(LogOfHRGrainBytes == 0, "we should only set it once");
136 136 LogOfHRGrainBytes = region_size_log;
137 137
138 138 guarantee(LogOfHRGrainWords == 0, "we should only set it once");
139 139 LogOfHRGrainWords = LogOfHRGrainBytes - LogHeapWordSize;
140 140
141 141 guarantee(GrainBytes == 0, "we should only set it once");
142 142 // The cast to int is safe, given that we've bounded region_size by
143 143 // MIN_REGION_SIZE and MAX_REGION_SIZE.
144 144 GrainBytes = region_size;
145 145
146 146 guarantee(GrainWords == 0, "we should only set it once");
147 147 GrainWords = GrainBytes >> LogHeapWordSize;
148 148 guarantee((size_t) 1 << LogOfHRGrainWords == GrainWords, "sanity");
149 149
150 150 guarantee(CardsPerRegion == 0, "we should only set it once");
151 151 CardsPerRegion = GrainBytes >> CardTableModRefBS::card_shift;
152 152 }
153 153
154 154 void HeapRegion::reset_after_compaction() {
155 155 G1OffsetTableContigSpace::reset_after_compaction();
156 156 // After a compaction the mark bitmap is invalid, so we must
157 157 // treat all objects as being inside the unmarked area.
158 158 zero_marked_bytes();
159 159 init_top_at_mark_start();
160 160 }
161 161
162 162 void HeapRegion::hr_clear(bool par, bool clear_space, bool locked) {
163 163 assert(_humongous_start_region == NULL,
164 164 "we should have already filtered out humongous regions");
165 165 assert(!in_collection_set(),
166 166 "Should not clear heap region %u in the collection set", hrm_index());
167 167
168 168 set_allocation_context(AllocationContext::system());
169 169 set_young_index_in_cset(-1);
170 170 uninstall_surv_rate_group();
171 171 set_free();
172 172 reset_pre_dummy_top();
173 173
174 174 if (!par) {
175 175 // If this is parallel, this will be done later.
176 176 HeapRegionRemSet* hrrs = rem_set();
177 177 if (locked) {
178 178 hrrs->clear_locked();
179 179 } else {
180 180 hrrs->clear();
181 181 }
182 182 }
183 183 zero_marked_bytes();
184 184
185 185 _offsets.resize(HeapRegion::GrainWords);
186 186 init_top_at_mark_start();
187 187 if (clear_space) clear(SpaceDecorator::Mangle);
188 188 }
189 189
190 190 void HeapRegion::par_clear() {
191 191 assert(used() == 0, "the region should have been already cleared");
192 192 assert(capacity() == HeapRegion::GrainBytes, "should be back to normal");
193 193 HeapRegionRemSet* hrrs = rem_set();
194 194 hrrs->clear();
195 195 CardTableModRefBS* ct_bs =
196 196 barrier_set_cast<CardTableModRefBS>(G1CollectedHeap::heap()->barrier_set());
197 197 ct_bs->clear(MemRegion(bottom(), end()));
198 198 }
199 199
200 200 void HeapRegion::calc_gc_efficiency() {
201 201 // GC efficiency is the ratio of how much space would be
202 202 // reclaimed over how long we predict it would take to reclaim it.
203 203 G1CollectedHeap* g1h = G1CollectedHeap::heap();
204 204 G1CollectorPolicy* g1p = g1h->g1_policy();
205 205
206 206 // Retrieve a prediction of the elapsed time for this region for
207 207 // a mixed gc because the region will only be evacuated during a
208 208 // mixed gc.
209 209 double region_elapsed_time_ms =
210 210 g1p->predict_region_elapsed_time_ms(this, false /* for_young_gc */);
211 211 _gc_efficiency = (double) reclaimable_bytes() / region_elapsed_time_ms;
212 212 }
213 213
214 214 void HeapRegion::set_starts_humongous(HeapWord* obj_top, size_t fill_size) {
215 215 assert(!is_humongous(), "sanity / pre-condition");
216 216 assert(top() == bottom(), "should be empty");
217 217
218 218 _type.set_starts_humongous();
219 219 _humongous_start_region = this;
220 220
221 221 _offsets.set_for_starts_humongous(obj_top, fill_size);
222 222 }
223 223
224 224 void HeapRegion::set_continues_humongous(HeapRegion* first_hr) {
225 225 assert(!is_humongous(), "sanity / pre-condition");
226 226 assert(top() == bottom(), "should be empty");
227 227 assert(first_hr->is_starts_humongous(), "pre-condition");
228 228
229 229 _type.set_continues_humongous();
230 230 _humongous_start_region = first_hr;
231 231 }
232 232
233 233 void HeapRegion::clear_humongous() {
234 234 assert(is_humongous(), "pre-condition");
235 235
236 236 assert(capacity() == HeapRegion::GrainBytes, "pre-condition");
237 237 _humongous_start_region = NULL;
238 238 }
239 239
240 240 HeapRegion::HeapRegion(uint hrm_index,
241 241 G1BlockOffsetSharedArray* sharedOffsetArray,
242 242 MemRegion mr) :
243 243 G1OffsetTableContigSpace(sharedOffsetArray, mr),
244 244 _hrm_index(hrm_index),
245 245 _allocation_context(AllocationContext::system()),
246 246 _humongous_start_region(NULL),
247 247 _next_in_special_set(NULL),
248 248 _evacuation_failed(false),
249 249 _prev_marked_bytes(0), _next_marked_bytes(0), _gc_efficiency(0.0),
250 250 _next_young_region(NULL),
251 251 _next_dirty_cards_region(NULL), _next(NULL), _prev(NULL),
252 252 #ifdef ASSERT
253 253 _containing_set(NULL),
254 254 #endif // ASSERT
255 255 _young_index_in_cset(-1), _surv_rate_group(NULL), _age_index(-1),
256 256 _rem_set(NULL), _recorded_rs_length(0), _predicted_elapsed_time_ms(0),
257 257 _predicted_bytes_to_copy(0)
258 258 {
259 259 _rem_set = new HeapRegionRemSet(sharedOffsetArray, this);
260 260 assert(HeapRegionRemSet::num_par_rem_sets() > 0, "Invariant.");
261 261
262 262 initialize(mr);
263 263 }
264 264
265 265 void HeapRegion::initialize(MemRegion mr, bool clear_space, bool mangle_space) {
266 266 assert(_rem_set->is_empty(), "Remembered set must be empty");
267 267
268 268 G1OffsetTableContigSpace::initialize(mr, clear_space, mangle_space);
269 269
270 270 hr_clear(false /*par*/, false /*clear_space*/);
271 271 set_top(bottom());
272 272 record_timestamp();
273 273 }
274 274
275 275 CompactibleSpace* HeapRegion::next_compaction_space() const {
276 276 return G1CollectedHeap::heap()->next_compaction_region(this);
277 277 }
278 278
279 279 void HeapRegion::note_self_forwarding_removal_start(bool during_initial_mark,
280 280 bool during_conc_mark) {
281 281 // We always recreate the prev marking info and we'll explicitly
282 282 // mark all objects we find to be self-forwarded on the prev
283 283 // bitmap. So all objects need to be below PTAMS.
284 284 _prev_marked_bytes = 0;
285 285
286 286 if (during_initial_mark) {
287 287 // During initial-mark, we'll also explicitly mark all objects
288 288 // we find to be self-forwarded on the next bitmap. So all
289 289 // objects need to be below NTAMS.
290 290 _next_top_at_mark_start = top();
291 291 _next_marked_bytes = 0;
292 292 } else if (during_conc_mark) {
293 293 // During concurrent mark, all objects in the CSet (including
294 294 // the ones we find to be self-forwarded) are implicitly live.
295 295 // So all objects need to be above NTAMS.
296 296 _next_top_at_mark_start = bottom();
297 297 _next_marked_bytes = 0;
298 298 }
299 299 }
300 300
301 301 void HeapRegion::note_self_forwarding_removal_end(bool during_initial_mark,
302 302 bool during_conc_mark,
303 303 size_t marked_bytes) {
304 304 assert(marked_bytes <= used(),
305 305 "marked: " SIZE_FORMAT " used: " SIZE_FORMAT, marked_bytes, used());
306 306 _prev_top_at_mark_start = top();
307 307 _prev_marked_bytes = marked_bytes;
308 308 }
309 309
310 310 HeapWord*
311 311 HeapRegion::object_iterate_mem_careful(MemRegion mr,
312 312 ObjectClosure* cl) {
313 313 G1CollectedHeap* g1h = G1CollectedHeap::heap();
314 314 // We used to use "block_start_careful" here. But we're actually happy
315 315 // to update the BOT while we do this...
316 316 HeapWord* cur = block_start(mr.start());
317 317 mr = mr.intersection(used_region());
318 318 if (mr.is_empty()) return NULL;
319 319 // Otherwise, find the obj that extends onto mr.start().
320 320
321 321 assert(cur <= mr.start()
322 322 && (oop(cur)->klass_or_null() == NULL ||
323 323 cur + oop(cur)->size() > mr.start()),
324 324 "postcondition of block_start");
325 325 oop obj;
326 326 while (cur < mr.end()) {
327 327 obj = oop(cur);
328 328 if (obj->klass_or_null() == NULL) {
329 329 // Ran into an unparseable point.
330 330 return cur;
331 331 } else if (!g1h->is_obj_dead(obj)) {
332 332 cl->do_object(obj);
333 333 }
334 334 cur += block_size(cur);
335 335 }
336 336 return NULL;
337 337 }
338 338
339 339 HeapWord*
340 340 HeapRegion::
341 341 oops_on_card_seq_iterate_careful(MemRegion mr,
342 342 FilterOutOfRegionClosure* cl,
343 343 bool filter_young,
344 344 jbyte* card_ptr) {
345 345 // Currently, we should only have to clean the card if filter_young
346 346 // is true and vice versa.
347 347 if (filter_young) {
348 348 assert(card_ptr != NULL, "pre-condition");
349 349 } else {
350 350 assert(card_ptr == NULL, "pre-condition");
351 351 }
352 352 G1CollectedHeap* g1h = G1CollectedHeap::heap();
353 353
354 354 // If we're within a stop-world GC, then we might look at a card in a
355 355 // GC alloc region that extends onto a GC LAB, which may not be
356 356 // parseable. Stop such at the "scan_top" of the region.
357 357 if (g1h->is_gc_active()) {
358 358 mr = mr.intersection(MemRegion(bottom(), scan_top()));
359 359 } else {
360 360 mr = mr.intersection(used_region());
361 361 }
362 362 if (mr.is_empty()) return NULL;
363 363 // Otherwise, find the obj that extends onto mr.start().
364 364
365 365 // The intersection of the incoming mr (for the card) and the
366 366 // allocated part of the region is non-empty. This implies that
367 367 // we have actually allocated into this region. The code in
368 368 // G1CollectedHeap.cpp that allocates a new region sets the
369 369 // is_young tag on the region before allocating. Thus we
370 370 // safely know if this region is young.
371 371 if (is_young() && filter_young) {
372 372 return NULL;
373 373 }
374 374
375 375 assert(!is_young(), "check value of filter_young");
376 376
377 377 // We can only clean the card here, after we make the decision that
378 378 // the card is not young. And we only clean the card if we have been
379 379 // asked to (i.e., card_ptr != NULL).
380 380 if (card_ptr != NULL) {
381 381 *card_ptr = CardTableModRefBS::clean_card_val();
382 382 // We must complete this write before we do any of the reads below.
383 383 OrderAccess::storeload();
384 384 }
385 385
386 386 // Cache the boundaries of the memory region in some const locals
387 387 HeapWord* const start = mr.start();
388 388 HeapWord* const end = mr.end();
389 389
390 390 // We used to use "block_start_careful" here. But we're actually happy
391 391 // to update the BOT while we do this...
392 392 HeapWord* cur = block_start(start);
393 393 assert(cur <= start, "Postcondition");
394 394
395 395 oop obj;
396 396
397 397 HeapWord* next = cur;
398 398 do {
399 399 cur = next;
400 400 obj = oop(cur);
401 401 if (obj->klass_or_null() == NULL) {
402 402 // Ran into an unparseable point.
403 403 return cur;
404 404 }
405 405 // Otherwise...
406 406 next = cur + block_size(cur);
407 407 } while (next <= start);
408 408
409 409 // If we finish the above loop...We have a parseable object that
410 410 // begins on or before the start of the memory region, and ends
411 411 // inside or spans the entire region.
412 412 assert(cur <= start, "Loop postcondition");
413 413 assert(obj->klass_or_null() != NULL, "Loop postcondition");
414 414
415 415 do {
416 416 obj = oop(cur);
417 417 assert((cur + block_size(cur)) > (HeapWord*)obj, "Loop invariant");
418 418 if (obj->klass_or_null() == NULL) {
419 419 // Ran into an unparseable point.
420 420 return cur;
421 421 }
422 422
423 423 // Advance the current pointer. "obj" still points to the object to iterate.
424 424 cur = cur + block_size(cur);
425 425
426 426 if (!g1h->is_obj_dead(obj)) {
427 427 // Non-objArrays are sometimes marked imprecise at the object start. We
428 428 // always need to iterate over them in full.
429 429 // We only iterate over object arrays in full if they are completely contained
430 430 // in the memory region.
431 431 if (!obj->is_objArray() || (((HeapWord*)obj) >= start && cur <= end)) {
432 432 obj->oop_iterate(cl);
433 433 } else {
434 434 obj->oop_iterate(cl, mr);
435 435 }
436 436 }
437 437 } while (cur < end);
438 438
439 439 return NULL;
440 440 }
441 441
442 442 // Code roots support
443 443
444 444 void HeapRegion::add_strong_code_root(nmethod* nm) {
445 445 HeapRegionRemSet* hrrs = rem_set();
446 446 hrrs->add_strong_code_root(nm);
447 447 }
448 448
449 449 void HeapRegion::add_strong_code_root_locked(nmethod* nm) {
450 450 assert_locked_or_safepoint(CodeCache_lock);
451 451 HeapRegionRemSet* hrrs = rem_set();
452 452 hrrs->add_strong_code_root_locked(nm);
453 453 }
454 454
455 455 void HeapRegion::remove_strong_code_root(nmethod* nm) {
456 456 HeapRegionRemSet* hrrs = rem_set();
457 457 hrrs->remove_strong_code_root(nm);
458 458 }
459 459
460 460 void HeapRegion::strong_code_roots_do(CodeBlobClosure* blk) const {
461 461 HeapRegionRemSet* hrrs = rem_set();
462 462 hrrs->strong_code_roots_do(blk);
463 463 }
464 464
465 465 class VerifyStrongCodeRootOopClosure: public OopClosure {
466 466 const HeapRegion* _hr;
467 467 nmethod* _nm;
468 468 bool _failures;
469 469 bool _has_oops_in_region;
470 470
471 471 template <class T> void do_oop_work(T* p) {
472 472 T heap_oop = oopDesc::load_heap_oop(p);
473 473 if (!oopDesc::is_null(heap_oop)) {
474 474 oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
475 475
476 476 // Note: not all the oops embedded in the nmethod are in the
477 477 // current region. We only look at those which are.
478 478 if (_hr->is_in(obj)) {
479 479 // Object is in the region. Check that its less than top
480 480 if (_hr->top() <= (HeapWord*)obj) {
481 481 // Object is above top
482 482 gclog_or_tty->print_cr("Object " PTR_FORMAT " in region "
483 483 "[" PTR_FORMAT ", " PTR_FORMAT ") is above "
484 484 "top " PTR_FORMAT,
485 485 p2i(obj), p2i(_hr->bottom()), p2i(_hr->end()), p2i(_hr->top()));
486 486 _failures = true;
487 487 return;
488 488 }
489 489 // Nmethod has at least one oop in the current region
490 490 _has_oops_in_region = true;
491 491 }
492 492 }
493 493 }
494 494
495 495 public:
496 496 VerifyStrongCodeRootOopClosure(const HeapRegion* hr, nmethod* nm):
497 497 _hr(hr), _failures(false), _has_oops_in_region(false) {}
498 498
499 499 void do_oop(narrowOop* p) { do_oop_work(p); }
500 500 void do_oop(oop* p) { do_oop_work(p); }
501 501
502 502 bool failures() { return _failures; }
503 503 bool has_oops_in_region() { return _has_oops_in_region; }
504 504 };
505 505
506 506 class VerifyStrongCodeRootCodeBlobClosure: public CodeBlobClosure {
507 507 const HeapRegion* _hr;
508 508 bool _failures;
509 509 public:
510 510 VerifyStrongCodeRootCodeBlobClosure(const HeapRegion* hr) :
511 511 _hr(hr), _failures(false) {}
512 512
513 513 void do_code_blob(CodeBlob* cb) {
514 514 nmethod* nm = (cb == NULL) ? NULL : cb->as_nmethod_or_null();
515 515 if (nm != NULL) {
516 516 // Verify that the nemthod is live
517 517 if (!nm->is_alive()) {
518 518 gclog_or_tty->print_cr("region [" PTR_FORMAT "," PTR_FORMAT "] has dead nmethod "
519 519 PTR_FORMAT " in its strong code roots",
520 520 p2i(_hr->bottom()), p2i(_hr->end()), p2i(nm));
521 521 _failures = true;
522 522 } else {
523 523 VerifyStrongCodeRootOopClosure oop_cl(_hr, nm);
524 524 nm->oops_do(&oop_cl);
525 525 if (!oop_cl.has_oops_in_region()) {
526 526 gclog_or_tty->print_cr("region [" PTR_FORMAT "," PTR_FORMAT "] has nmethod "
527 527 PTR_FORMAT " in its strong code roots "
528 528 "with no pointers into region",
529 529 p2i(_hr->bottom()), p2i(_hr->end()), p2i(nm));
530 530 _failures = true;
531 531 } else if (oop_cl.failures()) {
532 532 gclog_or_tty->print_cr("region [" PTR_FORMAT "," PTR_FORMAT "] has other "
533 533 "failures for nmethod " PTR_FORMAT,
534 534 p2i(_hr->bottom()), p2i(_hr->end()), p2i(nm));
535 535 _failures = true;
536 536 }
537 537 }
538 538 }
539 539 }
540 540
541 541 bool failures() { return _failures; }
542 542 };
543 543
544 544 void HeapRegion::verify_strong_code_roots(VerifyOption vo, bool* failures) const {
545 545 if (!G1VerifyHeapRegionCodeRoots) {
546 546 // We're not verifying code roots.
547 547 return;
548 548 }
549 549 if (vo == VerifyOption_G1UseMarkWord) {
550 550 // Marking verification during a full GC is performed after class
551 551 // unloading, code cache unloading, etc so the strong code roots
552 552 // attached to each heap region are in an inconsistent state. They won't
553 553 // be consistent until the strong code roots are rebuilt after the
554 554 // actual GC. Skip verifying the strong code roots in this particular
555 555 // time.
556 556 assert(VerifyDuringGC, "only way to get here");
557 557 return;
558 558 }
559 559
560 560 HeapRegionRemSet* hrrs = rem_set();
561 561 size_t strong_code_roots_length = hrrs->strong_code_roots_list_length();
562 562
563 563 // if this region is empty then there should be no entries
564 564 // on its strong code root list
565 565 if (is_empty()) {
566 566 if (strong_code_roots_length > 0) {
567 567 gclog_or_tty->print_cr("region [" PTR_FORMAT "," PTR_FORMAT "] is empty "
568 568 "but has " SIZE_FORMAT " code root entries",
569 569 p2i(bottom()), p2i(end()), strong_code_roots_length);
570 570 *failures = true;
571 571 }
572 572 return;
573 573 }
574 574
575 575 if (is_continues_humongous()) {
576 576 if (strong_code_roots_length > 0) {
577 577 gclog_or_tty->print_cr("region " HR_FORMAT " is a continuation of a humongous "
578 578 "region but has " SIZE_FORMAT " code root entries",
579 579 HR_FORMAT_PARAMS(this), strong_code_roots_length);
580 580 *failures = true;
581 581 }
582 582 return;
583 583 }
584 584
585 585 VerifyStrongCodeRootCodeBlobClosure cb_cl(this);
586 586 strong_code_roots_do(&cb_cl);
587 587
588 588 if (cb_cl.failures()) {
589 589 *failures = true;
590 590 }
591 591 }
592 592
593 593 void HeapRegion::print() const { print_on(gclog_or_tty); }
594 594 void HeapRegion::print_on(outputStream* st) const {
595 595 st->print("AC%4u", allocation_context());
596 596
597 597 st->print(" %2s", get_short_type_str());
↓ open down ↓ |
597 lines elided |
↑ open up ↑ |
598 598 if (in_collection_set())
599 599 st->print(" CS");
600 600 else
601 601 st->print(" ");
602 602 st->print(" TS %5d", _gc_time_stamp);
603 603 st->print(" PTAMS " PTR_FORMAT " NTAMS " PTR_FORMAT,
604 604 p2i(prev_top_at_mark_start()), p2i(next_top_at_mark_start()));
605 605 G1OffsetTableContigSpace::print_on(st);
606 606 }
607 607
608 -class VerifyLiveClosure: public OopClosure {
609 -private:
608 +class HeapRegionVerifyClosure : public OopClosure {
609 +protected:
610 610 G1CollectedHeap* _g1h;
611 611 CardTableModRefBS* _bs;
612 612 oop _containing_obj;
613 613 bool _failures;
614 614 int _n_failures;
615 615 VerifyOption _vo;
616 616 public:
617 617 // _vo == UsePrevMarking -> use "prev" marking information,
618 618 // _vo == UseNextMarking -> use "next" marking information,
619 619 // _vo == UseMarkWord -> use mark word from object header.
620 - VerifyLiveClosure(G1CollectedHeap* g1h, VerifyOption vo) :
620 + HeapRegionVerifyClosure(G1CollectedHeap* g1h, VerifyOption vo) :
621 621 _g1h(g1h), _bs(barrier_set_cast<CardTableModRefBS>(g1h->barrier_set())),
622 622 _containing_obj(NULL), _failures(false), _n_failures(0), _vo(vo)
623 623 { }
624 624
625 625 void set_containing_obj(oop obj) {
626 626 _containing_obj = obj;
627 627 }
628 628
629 629 bool failures() { return _failures; }
630 630 int n_failures() { return _n_failures; }
631 631
632 - virtual void do_oop(narrowOop* p) { do_oop_work(p); }
633 - virtual void do_oop( oop* p) { do_oop_work(p); }
634 -
635 632 void print_object(outputStream* out, oop obj) {
636 633 #ifdef PRODUCT
637 634 Klass* k = obj->klass();
638 635 const char* class_name = k->external_name();
639 636 out->print_cr("class name %s", class_name);
640 637 #else // PRODUCT
641 638 obj->print_on(out);
642 639 #endif // PRODUCT
643 640 }
644 641
645 642 template <class T>
643 + void verifyRemSets(T* p) {
644 + T heap_oop = oopDesc::load_heap_oop(p);
645 + if (!oopDesc::is_null(heap_oop)) {
646 + oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
647 + bool failed = false;
648 +
649 + HeapRegion* from = _g1h->heap_region_containing((HeapWord*)p);
650 + HeapRegion* to = _g1h->heap_region_containing(obj);
651 + if (from != NULL && to != NULL &&
652 + from != to &&
653 + !to->is_pinned()) {
654 + jbyte cv_obj = *_bs->byte_for_const(_containing_obj);
655 + jbyte cv_field = *_bs->byte_for_const(p);
656 + const jbyte dirty = CardTableModRefBS::dirty_card_val();
657 +
658 + bool is_bad = !(from->is_young()
659 + || to->rem_set()->contains_reference(p)
660 + || !G1HRRSFlushLogBuffersOnVerify && // buffers were not flushed
661 + (_containing_obj->is_objArray() ?
662 + cv_field == dirty
663 + : cv_obj == dirty || cv_field == dirty));
664 + if (is_bad) {
665 + MutexLockerEx x(ParGCRareEvent_lock,
666 + Mutex::_no_safepoint_check_flag);
667 +
668 + if (!_failures) {
669 + gclog_or_tty->cr();
670 + gclog_or_tty->print_cr("----------");
671 + }
672 + gclog_or_tty->print_cr("Missing rem set entry:");
673 + gclog_or_tty->print_cr("Field " PTR_FORMAT " "
674 + "of obj " PTR_FORMAT ", "
675 + "in region " HR_FORMAT,
676 + p2i(p), p2i(_containing_obj),
677 + HR_FORMAT_PARAMS(from));
678 + _containing_obj->print_on(gclog_or_tty);
679 + gclog_or_tty->print_cr("points to obj " PTR_FORMAT " "
680 + "in region " HR_FORMAT,
681 + p2i(obj),
682 + HR_FORMAT_PARAMS(to));
683 + obj->print_on(gclog_or_tty);
684 + gclog_or_tty->print_cr("Obj head CTE = %d, field CTE = %d.",
685 + cv_obj, cv_field);
686 + gclog_or_tty->print_cr("----------");
687 + gclog_or_tty->flush();
688 + _failures = true;
689 + if (!failed) _n_failures++;
690 + }
691 + }
692 + }
693 + }
694 +};
695 +
696 +class VerifyRSetClosure : public HeapRegionVerifyClosure {
697 +
698 +public:
699 + VerifyRSetClosure(G1CollectedHeap* g1h, VerifyOption vo) : HeapRegionVerifyClosure(g1h, vo) { }
700 +
701 + virtual void do_oop(narrowOop* p) { do_oop_work(p); }
702 + virtual void do_oop(oop* p) { do_oop_work(p); }
703 +
704 + template <class T>
646 705 void do_oop_work(T* p) {
647 706 assert(_containing_obj != NULL, "Precondition");
648 707 assert(!_g1h->is_obj_dead_cond(_containing_obj, _vo),
708 + "Precondition");
709 +
710 + verifyRemSets(p);
711 + }
712 +};
713 +
714 +class VerifyLiveClosure : public HeapRegionVerifyClosure {
715 +public:
716 + VerifyLiveClosure(G1CollectedHeap* g1h, VerifyOption vo) : HeapRegionVerifyClosure(g1h, vo) { }
717 +
718 + virtual void do_oop(narrowOop* p) { do_oop_work(p); }
719 + virtual void do_oop( oop* p) { do_oop_work(p); }
720 +
721 + template <class T>
722 + void do_oop_work(T* p) {
723 + assert(_containing_obj != NULL, "Precondition");
724 + assert(!_g1h->is_obj_dead_cond(_containing_obj, _vo),
649 725 "Precondition");
650 726 T heap_oop = oopDesc::load_heap_oop(p);
651 727 if (!oopDesc::is_null(heap_oop)) {
652 728 oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
653 729 bool failed = false;
654 730 if (!_g1h->is_in_closed_subset(obj) || _g1h->is_obj_dead_cond(obj, _vo)) {
655 731 MutexLockerEx x(ParGCRareEvent_lock,
656 732 Mutex::_no_safepoint_check_flag);
657 733
658 734 if (!_failures) {
659 735 gclog_or_tty->cr();
660 736 gclog_or_tty->print_cr("----------");
661 737 }
662 738 if (!_g1h->is_in_closed_subset(obj)) {
663 739 HeapRegion* from = _g1h->heap_region_containing((HeapWord*)p);
664 740 gclog_or_tty->print_cr("Field " PTR_FORMAT
665 741 " of live obj " PTR_FORMAT " in region "
666 742 "[" PTR_FORMAT ", " PTR_FORMAT ")",
667 743 p2i(p), p2i(_containing_obj),
668 744 p2i(from->bottom()), p2i(from->end()));
669 745 print_object(gclog_or_tty, _containing_obj);
670 746 gclog_or_tty->print_cr("points to obj " PTR_FORMAT " not in the heap",
671 747 p2i(obj));
672 748 } else {
673 749 HeapRegion* from = _g1h->heap_region_containing((HeapWord*)p);
674 750 HeapRegion* to = _g1h->heap_region_containing((HeapWord*)obj);
675 751 gclog_or_tty->print_cr("Field " PTR_FORMAT
676 752 " of live obj " PTR_FORMAT " in region "
677 753 "[" PTR_FORMAT ", " PTR_FORMAT ")",
678 754 p2i(p), p2i(_containing_obj),
679 755 p2i(from->bottom()), p2i(from->end()));
680 756 print_object(gclog_or_tty, _containing_obj);
681 757 gclog_or_tty->print_cr("points to dead obj " PTR_FORMAT " in region "
682 758 "[" PTR_FORMAT ", " PTR_FORMAT ")",
683 759 p2i(obj), p2i(to->bottom()), p2i(to->end()));
↓ open down ↓ |
25 lines elided |
↑ open up ↑ |
684 760 print_object(gclog_or_tty, obj);
685 761 }
686 762 gclog_or_tty->print_cr("----------");
687 763 gclog_or_tty->flush();
688 764 _failures = true;
689 765 failed = true;
690 766 _n_failures++;
691 767 }
692 768
693 769 if (!_g1h->collector_state()->full_collection() || G1VerifyRSetsDuringFullGC) {
694 - HeapRegion* from = _g1h->heap_region_containing((HeapWord*)p);
695 - HeapRegion* to = _g1h->heap_region_containing(obj);
696 - if (from != NULL && to != NULL &&
697 - from != to &&
698 - !to->is_pinned()) {
699 - jbyte cv_obj = *_bs->byte_for_const(_containing_obj);
700 - jbyte cv_field = *_bs->byte_for_const(p);
701 - const jbyte dirty = CardTableModRefBS::dirty_card_val();
702 -
703 - bool is_bad = !(from->is_young()
704 - || to->rem_set()->contains_reference(p)
705 - || !G1HRRSFlushLogBuffersOnVerify && // buffers were not flushed
706 - (_containing_obj->is_objArray() ?
707 - cv_field == dirty
708 - : cv_obj == dirty || cv_field == dirty));
709 - if (is_bad) {
710 - MutexLockerEx x(ParGCRareEvent_lock,
711 - Mutex::_no_safepoint_check_flag);
712 -
713 - if (!_failures) {
714 - gclog_or_tty->cr();
715 - gclog_or_tty->print_cr("----------");
716 - }
717 - gclog_or_tty->print_cr("Missing rem set entry:");
718 - gclog_or_tty->print_cr("Field " PTR_FORMAT " "
719 - "of obj " PTR_FORMAT ", "
720 - "in region " HR_FORMAT,
721 - p2i(p), p2i(_containing_obj),
722 - HR_FORMAT_PARAMS(from));
723 - _containing_obj->print_on(gclog_or_tty);
724 - gclog_or_tty->print_cr("points to obj " PTR_FORMAT " "
725 - "in region " HR_FORMAT,
726 - p2i(obj),
727 - HR_FORMAT_PARAMS(to));
728 - obj->print_on(gclog_or_tty);
729 - gclog_or_tty->print_cr("Obj head CTE = %d, field CTE = %d.",
730 - cv_obj, cv_field);
731 - gclog_or_tty->print_cr("----------");
732 - gclog_or_tty->flush();
733 - _failures = true;
734 - if (!failed) _n_failures++;
735 - }
736 - }
770 + verifyRemSets(p);
737 771 }
738 772 }
739 773 }
740 774 };
741 775
742 776 // This really ought to be commoned up into OffsetTableContigSpace somehow.
743 777 // We would need a mechanism to make that code skip dead objects.
744 778
745 779 void HeapRegion::verify(VerifyOption vo,
746 780 bool* failures) const {
747 781 G1CollectedHeap* g1 = G1CollectedHeap::heap();
748 782 *failures = false;
749 783 HeapWord* p = bottom();
750 784 HeapWord* prev_p = NULL;
751 785 VerifyLiveClosure vl_cl(g1, vo);
752 786 bool is_region_humongous = is_humongous();
753 787 size_t object_num = 0;
754 788 while (p < top()) {
755 789 oop obj = oop(p);
756 790 size_t obj_size = block_size(p);
757 791 object_num += 1;
758 792
759 793 if (!g1->is_obj_dead_cond(obj, this, vo)) {
760 794 if (obj->is_oop()) {
761 795 Klass* klass = obj->klass();
762 796 bool is_metaspace_object = Metaspace::contains(klass) ||
763 797 (vo == VerifyOption_G1UsePrevMarking &&
764 798 ClassLoaderDataGraph::unload_list_contains(klass));
765 799 if (!is_metaspace_object) {
766 800 gclog_or_tty->print_cr("klass " PTR_FORMAT " of object " PTR_FORMAT " "
767 801 "not metadata", p2i(klass), p2i(obj));
768 802 *failures = true;
769 803 return;
770 804 } else if (!klass->is_klass()) {
771 805 gclog_or_tty->print_cr("klass " PTR_FORMAT " of object " PTR_FORMAT " "
772 806 "not a klass", p2i(klass), p2i(obj));
773 807 *failures = true;
774 808 return;
775 809 } else {
776 810 vl_cl.set_containing_obj(obj);
777 811 obj->oop_iterate_no_header(&vl_cl);
778 812 if (vl_cl.failures()) {
779 813 *failures = true;
780 814 }
781 815 if (G1MaxVerifyFailures >= 0 &&
782 816 vl_cl.n_failures() >= G1MaxVerifyFailures) {
783 817 return;
784 818 }
785 819 }
786 820 } else {
787 821 gclog_or_tty->print_cr(PTR_FORMAT " no an oop", p2i(obj));
788 822 *failures = true;
789 823 return;
790 824 }
791 825 }
792 826 prev_p = p;
793 827 p += obj_size;
794 828 }
795 829
796 830 if (!is_young() && !is_empty()) {
797 831 _offsets.verify();
798 832 }
799 833
800 834 if (is_region_humongous) {
801 835 oop obj = oop(this->humongous_start_region()->bottom());
802 836 if ((HeapWord*)obj > bottom() || (HeapWord*)obj + obj->size() < bottom()) {
803 837 gclog_or_tty->print_cr("this humongous region is not part of its' humongous object " PTR_FORMAT, p2i(obj));
804 838 }
805 839 }
806 840
807 841 if (!is_region_humongous && p != top()) {
808 842 gclog_or_tty->print_cr("end of last object " PTR_FORMAT " "
809 843 "does not match top " PTR_FORMAT, p2i(p), p2i(top()));
810 844 *failures = true;
811 845 return;
812 846 }
813 847
814 848 HeapWord* the_end = end();
815 849 // Do some extra BOT consistency checking for addresses in the
816 850 // range [top, end). BOT look-ups in this range should yield
817 851 // top. No point in doing that if top == end (there's nothing there).
818 852 if (p < the_end) {
819 853 // Look up top
820 854 HeapWord* addr_1 = p;
821 855 HeapWord* b_start_1 = _offsets.block_start_const(addr_1);
822 856 if (b_start_1 != p) {
823 857 gclog_or_tty->print_cr("BOT look up for top: " PTR_FORMAT " "
824 858 " yielded " PTR_FORMAT ", expecting " PTR_FORMAT,
825 859 p2i(addr_1), p2i(b_start_1), p2i(p));
826 860 *failures = true;
827 861 return;
828 862 }
829 863
830 864 // Look up top + 1
831 865 HeapWord* addr_2 = p + 1;
832 866 if (addr_2 < the_end) {
833 867 HeapWord* b_start_2 = _offsets.block_start_const(addr_2);
834 868 if (b_start_2 != p) {
835 869 gclog_or_tty->print_cr("BOT look up for top + 1: " PTR_FORMAT " "
836 870 " yielded " PTR_FORMAT ", expecting " PTR_FORMAT,
837 871 p2i(addr_2), p2i(b_start_2), p2i(p));
838 872 *failures = true;
839 873 return;
840 874 }
841 875 }
842 876
843 877 // Look up an address between top and end
844 878 size_t diff = pointer_delta(the_end, p) / 2;
845 879 HeapWord* addr_3 = p + diff;
846 880 if (addr_3 < the_end) {
847 881 HeapWord* b_start_3 = _offsets.block_start_const(addr_3);
848 882 if (b_start_3 != p) {
849 883 gclog_or_tty->print_cr("BOT look up for top + diff: " PTR_FORMAT " "
850 884 " yielded " PTR_FORMAT ", expecting " PTR_FORMAT,
851 885 p2i(addr_3), p2i(b_start_3), p2i(p));
852 886 *failures = true;
853 887 return;
854 888 }
855 889 }
856 890
857 891 // Look up end - 1
858 892 HeapWord* addr_4 = the_end - 1;
859 893 HeapWord* b_start_4 = _offsets.block_start_const(addr_4);
860 894 if (b_start_4 != p) {
861 895 gclog_or_tty->print_cr("BOT look up for end - 1: " PTR_FORMAT " "
862 896 " yielded " PTR_FORMAT ", expecting " PTR_FORMAT,
863 897 p2i(addr_4), p2i(b_start_4), p2i(p));
864 898 *failures = true;
865 899 return;
866 900 }
↓ open down ↓ |
120 lines elided |
↑ open up ↑ |
867 901 }
868 902
869 903 verify_strong_code_roots(vo, failures);
870 904 }
871 905
872 906 void HeapRegion::verify() const {
873 907 bool dummy = false;
874 908 verify(VerifyOption_G1UsePrevMarking, /* failures */ &dummy);
875 909 }
876 910
911 +void HeapRegion::verifyRSet(VerifyOption vo,
912 + bool* failures) const {
913 + G1CollectedHeap* g1 = G1CollectedHeap::heap();
914 + *failures = false;
915 + HeapWord* p = bottom();
916 + HeapWord* prev_p = NULL;
917 + VerifyRSetClosure v_rset_cl(g1, vo);
918 + bool is_region_humongous = is_humongous();
919 + while (p < top()) {
920 + oop obj = oop(p);
921 + size_t obj_size = block_size(p);
922 +
923 + if (!g1->is_obj_dead_cond(obj, this, vo)) {
924 + if (obj->is_oop()) {
925 + v_rset_cl.set_containing_obj(obj);
926 + obj->oop_iterate_no_header(&v_rset_cl);
927 + if (v_rset_cl.failures()) {
928 + *failures = true;
929 + }
930 + if (G1MaxVerifyFailures >= 0 &&
931 + v_rset_cl.n_failures() >= G1MaxVerifyFailures) {
932 + return;
933 + }
934 + }
935 + else {
936 + gclog_or_tty->print_cr(PTR_FORMAT " not an oop", p2i(obj));
937 + *failures = true;
938 + return;
939 + }
940 + }
941 + prev_p = p;
942 + p += obj_size;
943 + }
944 +}
945 +
877 946 void HeapRegion::prepare_for_compaction(CompactPoint* cp) {
878 947 scan_and_forward(this, cp);
879 948 }
880 949
881 950 // G1OffsetTableContigSpace code; copied from space.cpp. Hope this can go
882 951 // away eventually.
883 952
884 953 void G1OffsetTableContigSpace::clear(bool mangle_space) {
885 954 set_top(bottom());
886 955 _scan_top = bottom();
887 956 CompactibleSpace::clear(mangle_space);
888 957 reset_bot();
889 958 }
890 959
891 960 void G1OffsetTableContigSpace::set_bottom(HeapWord* new_bottom) {
892 961 Space::set_bottom(new_bottom);
893 962 _offsets.set_bottom(new_bottom);
894 963 }
895 964
896 965 void G1OffsetTableContigSpace::set_end(HeapWord* new_end) {
897 966 assert(new_end == _bottom + HeapRegion::GrainWords, "set_end should only ever be set to _bottom + HeapRegion::GrainWords");
898 967 Space::set_end(new_end);
899 968 _offsets.resize(new_end - bottom());
900 969 }
901 970
902 971 #ifndef PRODUCT
903 972 void G1OffsetTableContigSpace::mangle_unused_area() {
904 973 mangle_unused_area_complete();
905 974 }
906 975
907 976 void G1OffsetTableContigSpace::mangle_unused_area_complete() {
908 977 SpaceMangler::mangle_region(MemRegion(top(), end()));
909 978 }
910 979 #endif
911 980
912 981 void G1OffsetTableContigSpace::print() const {
913 982 print_short();
914 983 gclog_or_tty->print_cr(" [" INTPTR_FORMAT ", " INTPTR_FORMAT ", "
915 984 INTPTR_FORMAT ", " INTPTR_FORMAT ")",
916 985 p2i(bottom()), p2i(top()), p2i(_offsets.threshold()), p2i(end()));
917 986 }
918 987
919 988 HeapWord* G1OffsetTableContigSpace::initialize_threshold() {
920 989 return _offsets.initialize_threshold();
921 990 }
922 991
923 992 HeapWord* G1OffsetTableContigSpace::cross_threshold(HeapWord* start,
924 993 HeapWord* end) {
925 994 _offsets.alloc_block(start, end);
926 995 return _offsets.threshold();
927 996 }
928 997
929 998 HeapWord* G1OffsetTableContigSpace::scan_top() const {
930 999 G1CollectedHeap* g1h = G1CollectedHeap::heap();
931 1000 HeapWord* local_top = top();
932 1001 OrderAccess::loadload();
933 1002 const unsigned local_time_stamp = _gc_time_stamp;
934 1003 assert(local_time_stamp <= g1h->get_gc_time_stamp(), "invariant");
935 1004 if (local_time_stamp < g1h->get_gc_time_stamp()) {
936 1005 return local_top;
937 1006 } else {
938 1007 return _scan_top;
939 1008 }
940 1009 }
941 1010
942 1011 void G1OffsetTableContigSpace::record_timestamp() {
943 1012 G1CollectedHeap* g1h = G1CollectedHeap::heap();
944 1013 unsigned curr_gc_time_stamp = g1h->get_gc_time_stamp();
945 1014
946 1015 if (_gc_time_stamp < curr_gc_time_stamp) {
947 1016 // Setting the time stamp here tells concurrent readers to look at
948 1017 // scan_top to know the maximum allowed address to look at.
949 1018
950 1019 // scan_top should be bottom for all regions except for the
951 1020 // retained old alloc region which should have scan_top == top
952 1021 HeapWord* st = _scan_top;
953 1022 guarantee(st == _bottom || st == _top, "invariant");
954 1023
955 1024 _gc_time_stamp = curr_gc_time_stamp;
956 1025 }
957 1026 }
958 1027
959 1028 void G1OffsetTableContigSpace::record_retained_region() {
960 1029 // scan_top is the maximum address where it's safe for the next gc to
961 1030 // scan this region.
962 1031 _scan_top = top();
963 1032 }
964 1033
965 1034 void G1OffsetTableContigSpace::safe_object_iterate(ObjectClosure* blk) {
966 1035 object_iterate(blk);
967 1036 }
968 1037
969 1038 void G1OffsetTableContigSpace::object_iterate(ObjectClosure* blk) {
970 1039 HeapWord* p = bottom();
971 1040 while (p < top()) {
972 1041 if (block_is_obj(p)) {
973 1042 blk->do_object(oop(p));
974 1043 }
975 1044 p += block_size(p);
976 1045 }
977 1046 }
978 1047
979 1048 G1OffsetTableContigSpace::
980 1049 G1OffsetTableContigSpace(G1BlockOffsetSharedArray* sharedOffsetArray,
981 1050 MemRegion mr) :
982 1051 _offsets(sharedOffsetArray, mr),
983 1052 _par_alloc_lock(Mutex::leaf, "OffsetTableContigSpace par alloc lock", true),
984 1053 _gc_time_stamp(0)
985 1054 {
986 1055 _offsets.set_space(this);
987 1056 }
988 1057
989 1058 void G1OffsetTableContigSpace::initialize(MemRegion mr, bool clear_space, bool mangle_space) {
990 1059 CompactibleSpace::initialize(mr, clear_space, mangle_space);
991 1060 _top = bottom();
992 1061 _scan_top = bottom();
993 1062 set_saved_mark_word(NULL);
994 1063 reset_bot();
995 1064 }
996 1065
↓ open down ↓ |
110 lines elided |
↑ open up ↑ |
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX