44 //
45 // The first thing the G1 post write barrier does is to check whether
46 // the card containing the updated pointer is already dirty and, if
47 // so, skips the remaining code in the barrier.
48 //
49 // Delaying the refinement of a card will make the card fail the
50 // first is_dirty check in the write barrier, skipping the remainder
51 // of the write barrier.
52 //
53 // This can significantly reduce the overhead of the write barrier
54 // code, increasing throughput.
55
56 class G1HotCardCache: public CHeapObj<mtGC> {
57
58 G1CollectedHeap* _g1h;
59
60 bool _use_cache;
61
62 G1CardCounts _card_counts;
63
64 // The card cache table
65 jbyte** _hot_cache;
66
67 size_t _hot_cache_size;
68
69 int _hot_cache_par_chunk_size;
70
71 // Avoids false sharing when concurrently updating _hot_cache_idx or
72 // _hot_cache_par_claimed_idx. These are never updated at the same time
73 // thus it's not necessary to separate them as well
74 char _pad_before[DEFAULT_CACHE_LINE_SIZE];
75
76 volatile size_t _hot_cache_idx;
77
78 volatile size_t _hot_cache_par_claimed_idx;
79
80 char _pad_after[DEFAULT_CACHE_LINE_SIZE];
81
82 // The number of cached cards a thread claims when flushing the cache
83 static const int ClaimChunkSize = 32;
|
44 //
45 // The first thing the G1 post write barrier does is to check whether
46 // the card containing the updated pointer is already dirty and, if
47 // so, skips the remaining code in the barrier.
48 //
49 // Delaying the refinement of a card will make the card fail the
50 // first is_dirty check in the write barrier, skipping the remainder
51 // of the write barrier.
52 //
53 // This can significantly reduce the overhead of the write barrier
54 // code, increasing throughput.
55
56 class G1HotCardCache: public CHeapObj<mtGC> {
57
58 G1CollectedHeap* _g1h;
59
60 bool _use_cache;
61
62 G1CardCounts _card_counts;
63
64 ArrayAllocator<jbyte*, mtGC> _hot_cache_memory;
65
66 // The card cache table
67 jbyte** _hot_cache;
68
69 size_t _hot_cache_size;
70
71 int _hot_cache_par_chunk_size;
72
73 // Avoids false sharing when concurrently updating _hot_cache_idx or
74 // _hot_cache_par_claimed_idx. These are never updated at the same time
75 // thus it's not necessary to separate them as well
76 char _pad_before[DEFAULT_CACHE_LINE_SIZE];
77
78 volatile size_t _hot_cache_idx;
79
80 volatile size_t _hot_cache_par_claimed_idx;
81
82 char _pad_after[DEFAULT_CACHE_LINE_SIZE];
83
84 // The number of cached cards a thread claims when flushing the cache
85 static const int ClaimChunkSize = 32;
|