120 // are also concurrently trying to allocate into it, we typically
121 // allocate a dummy object at the end of the region to ensure that
122 // no more allocations can take place in it. However, sometimes we
123 // want to know where the end of the last "real" object we allocated
124 // into the region was and this is what this keeps track.
125 HeapWord* _pre_dummy_top;
126
127 public:
128 G1OffsetTableContigSpace(G1BlockOffsetSharedArray* sharedOffsetArray,
129 MemRegion mr);
130
131 void set_top(HeapWord* value) { _top = value; }
132 HeapWord* top() const { return _top; }
133
134 protected:
135 // Reset the G1OffsetTableContigSpace.
136 virtual void initialize(MemRegion mr, bool clear_space, bool mangle_space);
137
138 HeapWord** top_addr() { return &_top; }
139 // Allocation helpers (return NULL if full).
140 inline HeapWord* allocate_impl(size_t word_size, HeapWord* end_value);
141 inline HeapWord* par_allocate_impl(size_t word_size, HeapWord* end_value);
142
143 public:
144 void reset_after_compaction() { set_top(compaction_top()); }
145
146 size_t used() const { return byte_size(bottom(), top()); }
147 size_t free() const { return byte_size(top(), end()); }
148 bool is_free_block(const HeapWord* p) const { return p >= top(); }
149
150 MemRegion used_region() const { return MemRegion(bottom(), top()); }
151
152 void object_iterate(ObjectClosure* blk);
153 void safe_object_iterate(ObjectClosure* blk);
154
155 void set_bottom(HeapWord* value);
156 void set_end(HeapWord* value);
157
158 HeapWord* scan_top() const;
159 void record_timestamp();
160 void reset_gc_time_stamp() { _gc_time_stamp = 0; }
161 unsigned get_gc_time_stamp() { return _gc_time_stamp; }
162 void record_retained_region();
163
164 // See the comment above in the declaration of _pre_dummy_top for an
165 // explanation of what it is.
166 void set_pre_dummy_top(HeapWord* pre_dummy_top) {
167 assert(is_in(pre_dummy_top) && pre_dummy_top <= top(), "pre-condition");
168 _pre_dummy_top = pre_dummy_top;
169 }
170 HeapWord* pre_dummy_top() {
171 return (_pre_dummy_top == NULL) ? top() : _pre_dummy_top;
172 }
173 void reset_pre_dummy_top() { _pre_dummy_top = NULL; }
174
175 virtual void clear(bool mangle_space);
176
177 HeapWord* block_start(const void* p);
178 HeapWord* block_start_const(const void* p) const;
179
180 // Add offset table update.
181 virtual HeapWord* allocate(size_t word_size);
182 HeapWord* par_allocate(size_t word_size);
183
184 HeapWord* saved_mark_word() const { ShouldNotReachHere(); return NULL; }
185
186 // MarkSweep support phase3
187 virtual HeapWord* initialize_threshold();
188 virtual HeapWord* cross_threshold(HeapWord* start, HeapWord* end);
189
190 virtual void print() const;
191
192 void reset_bot() {
193 _offsets.reset_bot();
194 }
195
196 void print_bot_on(outputStream* out) {
197 _offsets.print_on(out);
198 }
199 };
200
201 class HeapRegion: public G1OffsetTableContigSpace {
202 friend class VMStructs;
333 static size_t max_region_size();
334
335 // It sets up the heap region size (GrainBytes / GrainWords), as
336 // well as other related fields that are based on the heap region
337 // size (LogOfHRGrainBytes / LogOfHRGrainWords /
338 // CardsPerRegion). All those fields are considered constant
339 // throughout the JVM's execution, therefore they should only be set
340 // up once during initialization time.
341 static void setup_heap_region_size(size_t initial_heap_size, size_t max_heap_size);
342
343 // All allocated blocks are occupied by objects in a HeapRegion
344 bool block_is_obj(const HeapWord* p) const;
345
346 // Returns the object size for all valid block starts
347 // and the amount of unallocated words if called on top()
348 size_t block_size(const HeapWord* p) const;
349
350 // Override for scan_and_forward support.
351 void prepare_for_compaction(CompactPoint* cp);
352
353 inline HeapWord* par_allocate_no_bot_updates(size_t word_size);
354 inline HeapWord* allocate_no_bot_updates(size_t word_size);
355
356 // If this region is a member of a HeapRegionManager, the index in that
357 // sequence, otherwise -1.
358 uint hrm_index() const { return _hrm_index; }
359
360 // The number of bytes marked live in the region in the last marking phase.
361 size_t marked_bytes() { return _prev_marked_bytes; }
362 size_t live_bytes() {
363 return (top() - prev_top_at_mark_start()) * HeapWordSize + marked_bytes();
364 }
365
366 // The number of bytes counted in the next marking.
367 size_t next_marked_bytes() { return _next_marked_bytes; }
368 // The number of bytes live wrt the next marking.
369 size_t next_live_bytes() {
370 return
371 (top() - next_top_at_mark_start()) * HeapWordSize + next_marked_bytes();
372 }
373
374 // A lower bound on the amount of garbage bytes in the region.
|
120 // are also concurrently trying to allocate into it, we typically
121 // allocate a dummy object at the end of the region to ensure that
122 // no more allocations can take place in it. However, sometimes we
123 // want to know where the end of the last "real" object we allocated
124 // into the region was and this is what this keeps track.
125 HeapWord* _pre_dummy_top;
126
127 public:
128 G1OffsetTableContigSpace(G1BlockOffsetSharedArray* sharedOffsetArray,
129 MemRegion mr);
130
131 void set_top(HeapWord* value) { _top = value; }
132 HeapWord* top() const { return _top; }
133
134 protected:
135 // Reset the G1OffsetTableContigSpace.
136 virtual void initialize(MemRegion mr, bool clear_space, bool mangle_space);
137
138 HeapWord** top_addr() { return &_top; }
139 // Allocation helpers (return NULL if full).
140 inline HeapWord* allocate_impl(size_t min_word_size, size_t& word_size, HeapWord* end_value);
141 inline HeapWord* par_allocate_impl(size_t min_word_size, size_t& word_size, HeapWord* end_value);
142
143 public:
144 void reset_after_compaction() { set_top(compaction_top()); }
145
146 size_t used() const { return byte_size(bottom(), top()); }
147 size_t free() const { return byte_size(top(), end()); }
148 bool is_free_block(const HeapWord* p) const { return p >= top(); }
149
150 MemRegion used_region() const { return MemRegion(bottom(), top()); }
151
152 void object_iterate(ObjectClosure* blk);
153 void safe_object_iterate(ObjectClosure* blk);
154
155 void set_bottom(HeapWord* value);
156 void set_end(HeapWord* value);
157
158 HeapWord* scan_top() const;
159 void record_timestamp();
160 void reset_gc_time_stamp() { _gc_time_stamp = 0; }
161 unsigned get_gc_time_stamp() { return _gc_time_stamp; }
162 void record_retained_region();
163
164 // See the comment above in the declaration of _pre_dummy_top for an
165 // explanation of what it is.
166 void set_pre_dummy_top(HeapWord* pre_dummy_top) {
167 assert(is_in(pre_dummy_top) && pre_dummy_top <= top(), "pre-condition");
168 _pre_dummy_top = pre_dummy_top;
169 }
170 HeapWord* pre_dummy_top() {
171 return (_pre_dummy_top == NULL) ? top() : _pre_dummy_top;
172 }
173 void reset_pre_dummy_top() { _pre_dummy_top = NULL; }
174
175 virtual void clear(bool mangle_space);
176
177 HeapWord* block_start(const void* p);
178 HeapWord* block_start_const(const void* p) const;
179
180 // Allocation (return NULL if full). Assumes the caller has established
181 // mutually exclusive access to the space.
182 virtual HeapWord* allocate(size_t min_word_size, size_t& word_size);
183 // Allocation (return NULL if full). Enforces mutual exclusion internally.
184 HeapWord* par_allocate(size_t min_word_size, size_t& word_size);
185
186 virtual HeapWord* allocate(size_t word_size) { return allocate(word_size, word_size); }
187 virtual HeapWord* par_allocate(size_t word_size) { return par_allocate(word_size, word_size); }
188
189 HeapWord* saved_mark_word() const { ShouldNotReachHere(); return NULL; }
190
191 // MarkSweep support phase3
192 virtual HeapWord* initialize_threshold();
193 virtual HeapWord* cross_threshold(HeapWord* start, HeapWord* end);
194
195 virtual void print() const;
196
197 void reset_bot() {
198 _offsets.reset_bot();
199 }
200
201 void print_bot_on(outputStream* out) {
202 _offsets.print_on(out);
203 }
204 };
205
206 class HeapRegion: public G1OffsetTableContigSpace {
207 friend class VMStructs;
338 static size_t max_region_size();
339
340 // It sets up the heap region size (GrainBytes / GrainWords), as
341 // well as other related fields that are based on the heap region
342 // size (LogOfHRGrainBytes / LogOfHRGrainWords /
343 // CardsPerRegion). All those fields are considered constant
344 // throughout the JVM's execution, therefore they should only be set
345 // up once during initialization time.
346 static void setup_heap_region_size(size_t initial_heap_size, size_t max_heap_size);
347
348 // All allocated blocks are occupied by objects in a HeapRegion
349 bool block_is_obj(const HeapWord* p) const;
350
351 // Returns the object size for all valid block starts
352 // and the amount of unallocated words if called on top()
353 size_t block_size(const HeapWord* p) const;
354
355 // Override for scan_and_forward support.
356 void prepare_for_compaction(CompactPoint* cp);
357
358 inline HeapWord* par_allocate_no_bot_updates(size_t min_word_size, size_t& word_size);
359 inline HeapWord* allocate_no_bot_updates(size_t word_size) { return allocate_no_bot_updates(word_size, word_size); }
360 inline HeapWord* allocate_no_bot_updates(size_t min_word_size, size_t& word_size);
361
362 // If this region is a member of a HeapRegionManager, the index in that
363 // sequence, otherwise -1.
364 uint hrm_index() const { return _hrm_index; }
365
366 // The number of bytes marked live in the region in the last marking phase.
367 size_t marked_bytes() { return _prev_marked_bytes; }
368 size_t live_bytes() {
369 return (top() - prev_top_at_mark_start()) * HeapWordSize + marked_bytes();
370 }
371
372 // The number of bytes counted in the next marking.
373 size_t next_marked_bytes() { return _next_marked_bytes; }
374 // The number of bytes live wrt the next marking.
375 size_t next_live_bytes() {
376 return
377 (top() - next_top_at_mark_start()) * HeapWordSize + next_marked_bytes();
378 }
379
380 // A lower bound on the amount of garbage bytes in the region.
|