86 return 0;
87 }
88
89 void CardTableRS::prepare_for_younger_refs_iterate(bool parallel) {
90 // Parallel or sequential, we must always set the prev to equal the
91 // last one written.
92 if (parallel) {
93 // Find a parallel value to be used next.
94 jbyte next_val = find_unused_youngergenP_card_value();
95 set_cur_youngergen_card_val(next_val);
96
97 } else {
98 // In an sequential traversal we will always write youngergen, so that
99 // the inline barrier is correct.
100 set_cur_youngergen_card_val(youngergen_card);
101 }
102 }
103
104 void CardTableRS::younger_refs_iterate(Generation* g,
105 OopsInGenClosure* blk) {
106 _last_cur_val_in_gen[g->level()+1] = cur_youngergen_card_val();
107 g->younger_refs_iterate(blk);
108 }
109
110 inline bool ClearNoncleanCardWrapper::clear_card(jbyte* entry) {
111 if (_is_par) {
112 return clear_card_parallel(entry);
113 } else {
114 return clear_card_serial(entry);
115 }
116 }
117
118 inline bool ClearNoncleanCardWrapper::clear_card_parallel(jbyte* entry) {
119 while (true) {
120 // In the parallel case, we may have to do this several times.
121 jbyte entry_val = *entry;
122 assert(entry_val != CardTableRS::clean_card_val(),
123 "We shouldn't be looking at clean cards, and this should "
124 "be the only place they get cleaned.");
125 if (CardTableRS::card_is_dirty_wrt_gen_iter(entry_val)
126 || _ct->is_prev_youngergen_card_val(entry_val)) {
288 assert(UseConcMarkSweepGC, "Tautology: see assert above");
289 warning("CMS+ParNew: Did you forget to call save_marks()? "
290 "[" PTR_FORMAT ", " PTR_FORMAT ") is not contained in "
291 "[" PTR_FORMAT ", " PTR_FORMAT ")",
292 p2i(urasm.start()), p2i(urasm.end()), p2i(ur.start()), p2i(ur.end()));
293 MemRegion ur2 = sp->used_region();
294 MemRegion urasm2 = sp->used_region_at_save_marks();
295 if (!ur.equals(ur2)) {
296 warning("CMS+ParNew: Flickering used_region()!!");
297 }
298 if (!urasm.equals(urasm2)) {
299 warning("CMS+ParNew: Flickering used_region_at_save_marks()!!");
300 }
301 ShouldNotReachHere();
302 }
303 #endif
304 _ct_bs->non_clean_card_iterate_possibly_parallel(sp, urasm, cl, this);
305 }
306
307 void CardTableRS::clear_into_younger(Generation* old_gen) {
308 assert(old_gen->level() == 1, "Should only be called for the old generation");
309 // The card tables for the youngest gen need never be cleared.
310 // There's a bit of subtlety in the clear() and invalidate()
311 // methods that we exploit here and in invalidate_or_clear()
312 // below to avoid missing cards at the fringes. If clear() or
313 // invalidate() are changed in the future, this code should
314 // be revisited. 20040107.ysr
315 clear(old_gen->prev_used_region());
316 }
317
318 void CardTableRS::invalidate_or_clear(Generation* old_gen) {
319 assert(old_gen->level() == 1, "Should only be called for the old generation");
320 // Invalidate the cards for the currently occupied part of
321 // the old generation and clear the cards for the
322 // unoccupied part of the generation (if any, making use
323 // of that generation's prev_used_region to determine that
324 // region). No need to do anything for the youngest
325 // generation. Also see note#20040107.ysr above.
326 MemRegion used_mr = old_gen->used_region();
327 MemRegion to_be_cleared_mr = old_gen->prev_used_region().minus(used_mr);
328 if (!to_be_cleared_mr.is_empty()) {
329 clear(to_be_cleared_mr);
330 }
331 invalidate(used_mr);
332 }
333
334
335 class VerifyCleanCardClosure: public OopClosure {
336 private:
337 HeapWord* _boundary;
338 HeapWord* _begin;
339 HeapWord* _end;
365 virtual void do_oop(oop* p) { VerifyCleanCardClosure::do_oop_work(p); }
366 virtual void do_oop(narrowOop* p) { VerifyCleanCardClosure::do_oop_work(p); }
367 };
368
369 class VerifyCTSpaceClosure: public SpaceClosure {
370 private:
371 CardTableRS* _ct;
372 HeapWord* _boundary;
373 public:
374 VerifyCTSpaceClosure(CardTableRS* ct, HeapWord* boundary) :
375 _ct(ct), _boundary(boundary) {}
376 virtual void do_space(Space* s) { _ct->verify_space(s, _boundary); }
377 };
378
379 class VerifyCTGenClosure: public GenCollectedHeap::GenClosure {
380 CardTableRS* _ct;
381 public:
382 VerifyCTGenClosure(CardTableRS* ct) : _ct(ct) {}
383 void do_generation(Generation* gen) {
384 // Skip the youngest generation.
385 if (gen->level() == 0) return;
386 // Normally, we're interested in pointers to younger generations.
387 VerifyCTSpaceClosure blk(_ct, gen->reserved().start());
388 gen->space_iterate(&blk, true);
389 }
390 };
391
392 void CardTableRS::verify_space(Space* s, HeapWord* gen_boundary) {
393 // We don't need to do young-gen spaces.
394 if (s->end() <= gen_boundary) return;
395 MemRegion used = s->used_region();
396
397 jbyte* cur_entry = byte_for(used.start());
398 jbyte* limit = byte_after(used.last());
399 while (cur_entry < limit) {
400 if (*cur_entry == CardTableModRefBS::clean_card) {
401 jbyte* first_dirty = cur_entry+1;
402 while (first_dirty < limit &&
403 *first_dirty == CardTableModRefBS::clean_card) {
404 first_dirty++;
405 }
|
86 return 0;
87 }
88
89 void CardTableRS::prepare_for_younger_refs_iterate(bool parallel) {
90 // Parallel or sequential, we must always set the prev to equal the
91 // last one written.
92 if (parallel) {
93 // Find a parallel value to be used next.
94 jbyte next_val = find_unused_youngergenP_card_value();
95 set_cur_youngergen_card_val(next_val);
96
97 } else {
98 // In an sequential traversal we will always write youngergen, so that
99 // the inline barrier is correct.
100 set_cur_youngergen_card_val(youngergen_card);
101 }
102 }
103
104 void CardTableRS::younger_refs_iterate(Generation* g,
105 OopsInGenClosure* blk) {
106 // The indexing in this array is slightly odd. We want to access
107 // the old generation record here, which is at index 2.
108 _last_cur_val_in_gen[2] = cur_youngergen_card_val();
109 g->younger_refs_iterate(blk);
110 }
111
112 inline bool ClearNoncleanCardWrapper::clear_card(jbyte* entry) {
113 if (_is_par) {
114 return clear_card_parallel(entry);
115 } else {
116 return clear_card_serial(entry);
117 }
118 }
119
120 inline bool ClearNoncleanCardWrapper::clear_card_parallel(jbyte* entry) {
121 while (true) {
122 // In the parallel case, we may have to do this several times.
123 jbyte entry_val = *entry;
124 assert(entry_val != CardTableRS::clean_card_val(),
125 "We shouldn't be looking at clean cards, and this should "
126 "be the only place they get cleaned.");
127 if (CardTableRS::card_is_dirty_wrt_gen_iter(entry_val)
128 || _ct->is_prev_youngergen_card_val(entry_val)) {
290 assert(UseConcMarkSweepGC, "Tautology: see assert above");
291 warning("CMS+ParNew: Did you forget to call save_marks()? "
292 "[" PTR_FORMAT ", " PTR_FORMAT ") is not contained in "
293 "[" PTR_FORMAT ", " PTR_FORMAT ")",
294 p2i(urasm.start()), p2i(urasm.end()), p2i(ur.start()), p2i(ur.end()));
295 MemRegion ur2 = sp->used_region();
296 MemRegion urasm2 = sp->used_region_at_save_marks();
297 if (!ur.equals(ur2)) {
298 warning("CMS+ParNew: Flickering used_region()!!");
299 }
300 if (!urasm.equals(urasm2)) {
301 warning("CMS+ParNew: Flickering used_region_at_save_marks()!!");
302 }
303 ShouldNotReachHere();
304 }
305 #endif
306 _ct_bs->non_clean_card_iterate_possibly_parallel(sp, urasm, cl, this);
307 }
308
309 void CardTableRS::clear_into_younger(Generation* old_gen) {
310 assert(old_gen == GenCollectedHeap::heap()->old_gen(),
311 "Should only be called for the old generation");
312 // The card tables for the youngest gen need never be cleared.
313 // There's a bit of subtlety in the clear() and invalidate()
314 // methods that we exploit here and in invalidate_or_clear()
315 // below to avoid missing cards at the fringes. If clear() or
316 // invalidate() are changed in the future, this code should
317 // be revisited. 20040107.ysr
318 clear(old_gen->prev_used_region());
319 }
320
321 void CardTableRS::invalidate_or_clear(Generation* old_gen) {
322 assert(old_gen == GenCollectedHeap::heap()->old_gen(),
323 "Should only be called for the old generation");
324 // Invalidate the cards for the currently occupied part of
325 // the old generation and clear the cards for the
326 // unoccupied part of the generation (if any, making use
327 // of that generation's prev_used_region to determine that
328 // region). No need to do anything for the youngest
329 // generation. Also see note#20040107.ysr above.
330 MemRegion used_mr = old_gen->used_region();
331 MemRegion to_be_cleared_mr = old_gen->prev_used_region().minus(used_mr);
332 if (!to_be_cleared_mr.is_empty()) {
333 clear(to_be_cleared_mr);
334 }
335 invalidate(used_mr);
336 }
337
338
339 class VerifyCleanCardClosure: public OopClosure {
340 private:
341 HeapWord* _boundary;
342 HeapWord* _begin;
343 HeapWord* _end;
369 virtual void do_oop(oop* p) { VerifyCleanCardClosure::do_oop_work(p); }
370 virtual void do_oop(narrowOop* p) { VerifyCleanCardClosure::do_oop_work(p); }
371 };
372
373 class VerifyCTSpaceClosure: public SpaceClosure {
374 private:
375 CardTableRS* _ct;
376 HeapWord* _boundary;
377 public:
378 VerifyCTSpaceClosure(CardTableRS* ct, HeapWord* boundary) :
379 _ct(ct), _boundary(boundary) {}
380 virtual void do_space(Space* s) { _ct->verify_space(s, _boundary); }
381 };
382
383 class VerifyCTGenClosure: public GenCollectedHeap::GenClosure {
384 CardTableRS* _ct;
385 public:
386 VerifyCTGenClosure(CardTableRS* ct) : _ct(ct) {}
387 void do_generation(Generation* gen) {
388 // Skip the youngest generation.
389 if (gen == GenCollectedHeap::heap()->young_gen()) {
390 return;
391 }
392 // Normally, we're interested in pointers to younger generations.
393 VerifyCTSpaceClosure blk(_ct, gen->reserved().start());
394 gen->space_iterate(&blk, true);
395 }
396 };
397
398 void CardTableRS::verify_space(Space* s, HeapWord* gen_boundary) {
399 // We don't need to do young-gen spaces.
400 if (s->end() <= gen_boundary) return;
401 MemRegion used = s->used_region();
402
403 jbyte* cur_entry = byte_for(used.start());
404 jbyte* limit = byte_after(used.last());
405 while (cur_entry < limit) {
406 if (*cur_entry == CardTableModRefBS::clean_card) {
407 jbyte* first_dirty = cur_entry+1;
408 while (first_dirty < limit &&
409 *first_dirty == CardTableModRefBS::clean_card) {
410 first_dirty++;
411 }
|